<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-akka</artifactId>
<features.file>features.xml</features.file>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.1.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
- <karaf.version>3.0.1</karaf.version>
- <feature.test.version>0.7.0-SNAPSHOT</feature.test.version>
- <karaf.empty.version>1.5.0-SNAPSHOT</karaf.empty.version>
+ <branding.version>1.2.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.6.0-SNAPSHOT</karaf.resources.version>
+ <karaf.version>3.0.3</karaf.version>
+ <feature.test.version>0.8.0-SNAPSHOT</feature.test.version>
+ <karaf.empty.version>1.6.0-SNAPSHOT</karaf.empty.version>
<surefire.version>2.16</surefire.version>
</properties>
<dependencies>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.8.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.3.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.2.0-SNAPSHOT/xml/features</repository>
-->
<feature name='odl-akka-all' version='${project.version}' description='OpenDaylight :: Akka :: All'>
<!--
* Basic MD-SAL Provider
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-controller-model</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.2.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
<artifactId>features-config-netty</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
<artifactId>features-config-persister</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
<artifactId>features-config</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-extras</artifactId>
<features.file>features.xml</features.file>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.1.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
- <karaf.version>3.0.1</karaf.version>
- <karaf.empty.version>1.5.0-SNAPSHOT</karaf.empty.version>
+ <branding.version>1.2.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.6.0-SNAPSHOT</karaf.resources.version>
+ <karaf.version>3.0.3</karaf.version>
+ <karaf.empty.version>1.6.0-SNAPSHOT</karaf.empty.version>
<surefire.version>2.16</surefire.version>
</properties>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
<artifactId>features-mdsal</artifactId>
<repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
<feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
<feature version='${project.version}'>odl-mdsal-broker</feature>
- <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${project.version}'>odl-mdsal-xsql</feature>
<feature version='${project.version}'>odl-toaster</feature>
</feature>
</feature>
<!-- TODO move to netconf features, however there are some weird dependencies on features-config-persister all over that cause cyclic dependencies-->
- <!-- TODO when installing this in pure karaf distro, many optimistic lock exceptions are thrown from config manager -->
<feature name='odl-netconf-mdsal' version='${project.version}' description="OpenDaylight :: Netconf :: Mdsal">
<feature version='${config.version}'>odl-config-all</feature>
<feature version='${netconf.version}'>odl-netconf-all</feature>
+ <feature version='${config.version}'>odl-config-netty</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-tcp/${netconf.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-ssh/${netconf.version}</bundle>
<feature version='${mdsal.version}'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/mdsal-netconf-connector/${netconf.version}</bundle>
<configfile finalname='${config.configfile.directory}/${config.netconf.mdsal.configfile}'>mvn:org.opendaylight.controller/netconf-mdsal-config/${netconf.version}/xml/config</configfile>
</feature>
- <feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
+ <feature name='odl-mdsal-broker-local' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
<feature version='${yangtools.version}'>odl-yangtools-models</feature>
<configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
</feature>
<feature name ='odl-mdsal-clustering-commons' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${akka.version}'>odl-akka-system</feature>
<feature version='${akka.version}'>odl-akka-persistence</feature>
<bundle>mvn:org.opendaylight.controller/sal-clustering-commons/${project.version}</bundle>
<bundle>mvn:com.codahale.metrics/metrics-core/3.0.1</bundle>
</feature>
<feature name ='odl-mdsal-distributed-datastore' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
<feature version='${akka.version}'>odl-akka-clustering</feature>
<bundle>mvn:org.opendaylight.controller/sal-distributed-datastore/${project.version}</bundle>
</feature>
<feature name ='odl-mdsal-remoterpc-connector' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
<feature version='${akka.version}'>odl-akka-clustering</feature>
<feature version='0.7'>odl-akka-leveldb</feature>
<bundle>mvn:org.opendaylight.controller/sal-remoterpc-connector/${project.version}</bundle>
</feature>
- <feature name ='odl-mdsal-clustering' version='${project.version}'>
+ <feature name ='odl-mdsal-broker' version='${project.version}'>
<feature version='${project.version}'>odl-mdsal-remoterpc-connector</feature>
<feature version='${project.version}'>odl-mdsal-distributed-datastore</feature>
<configfile finalname="${config.configfile.directory}/${config.clustering.configfile}">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/config</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
<configfile finalname="etc/org.opendaylight.controller.cluster.datastore.cfg">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore</configfile>
</feature>
-
+ <feature name ='odl-mdsal-clustering' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ </feature>
<feature name='odl-clustering-test-app' version='${project.version}'>
<feature version='${yangtools.version}'>odl-yangtools-models</feature>
- <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-broker-local</feature>
<bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
<!--
-->
<artifactId>features-netconf-connector</artifactId>
<!-- Optional TODO: Uncomment version if you are not using a parent pom.xml
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
-->
<packaging>jar</packaging>
<properties>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<classifier>features</classifier>
<type>xml</type>
</dependency>
- <dependency>
+ <!-- dependency>
<groupId>org.opendaylight.aaa</groupId>
<artifactId>features-aaa</artifactId>
<version>${aaa.version}</version>
<classifier>features</classifier>
<type>xml</type>
- </dependency>
+ </dependency -->
<!--
Necessary TODO: Put dependencies for bundles directly referenced
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.8.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.3.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.2.0-SNAPSHOT/xml/features</repository>
-->
<repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
<!-- FIXME: This introduces cycle between projects, which makes version updates
harder. Should be moved to different.
-->
- <repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository>
+ <!-- repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository -->
<feature name='odl-netconf-connector-all' version='${project.version}' description='OpenDaylight :: Netconf Connector :: All'>
<!--
Necessary TODO:
* Basic MD-SAL Provider
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-controller-model</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.2.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
</feature>
<!-- FIXME: This introduces cycle between projects, which makes version updates
harder. Should be moved to different.
-->
- <feature version='${aaa.version}'>odl-aaa-netconf-plugin</feature>
+ <!-- feature version='${aaa.version}'>odl-aaa-netconf-plugin</feature -->
<bundle>mvn:org.opendaylight.controller/netconf-ssh/${netconf.version}</bundle>
</feature>
- <feature name='odl-netconf-tcp' version='${netconf.version}' description="OpenDaylight :: Netconf Connector :: TCP">
+ <feature name='odl-netconf-tcp' version='${netconf.version}' description="OpenDaylight :: Netconf Connector :: TCP">
<feature version='${netconf.version}'>odl-netconf-impl</feature>
+ <feature version='${config.version}'>odl-config-netty</feature>
<bundle>mvn:org.opendaylight.controller/netconf-tcp/${netconf.version}</bundle>
- </feature>
+ </feature>
<feature name='odl-message-bus' version='${project.version}'>
<feature version='${project.version}'>odl-netconf-connector</feature>
<feature version='${project.version}'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/messagebus-api/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/messagebus-spi/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/messagebus-impl/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/05-message-bus.xml">mvn:org.opendaylight.controller/messagebus-config/${project.version}/xml/config</configfile>
</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../../opendaylight/netconf</relativePath>
</parent>
<artifactId>features-netconf</artifactId>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring-extension/${project.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-inet-types/${ietf-inet-types.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types/${ietf-yang-types.version}</bundle>
- <bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types-20130715/2013.07.15.7-SNAPSHOT</bundle>
+ <bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types-20130715/2013.07.15.8-SNAPSHOT</bundle>
</feature>
<feature name='odl-netconf-mapping-api' version='${project.version}' description="OpenDaylight :: Netconf :: Mapping API">
<feature version='${project.version}'>odl-netconf-api</feature>
<feature name='odl-netconf-util' version='${project.version}'>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<bundle>mvn:org.opendaylight.yangtools/yang-model-api/${yangtools.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools/yang-data-api/${yangtools.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-util/${project.version}</bundle>
</feature>
<feature name='odl-netconf-impl' version='${project.version}' description="OpenDaylight :: Netconf :: Impl">
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-controller</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-protocol-framework</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
<!--
<artifactId>features-restconf</artifactId>
<groupId>org.opendaylight.controller</groupId>
<!-- Optional TODO: Uncomment version if you are not using a parent pom.xml
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
-->
<packaging>jar</packaging>
<properties>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<classifier>features</classifier>
<type>xml</type>
</dependency>
- <dependency>
+ <!-- dependency>
<groupId>org.opendaylight.aaa</groupId>
<artifactId>features-aaa</artifactId>
<version>${aaa.version}</version>
<classifier>features</classifier>
<type>xml</type>
- </dependency>
+ </dependency -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.8.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.3.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.2.0-SNAPSHOT/xml/features</repository>
-->
<repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
- <repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository>
+ <!-- repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository -->
<feature name='odl-restconf-all' version='${project.version}' description='OpenDaylight :: Restconf :: All'>
<!--
Necessary TODO:
* Basic MD-SAL Provider
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-controller-model</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.2.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
</feature>
-->
<feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
- <feature version='${aaa.version}'>odl-aaa-authn</feature>
+ <!-- feature version='${aaa.version}'>odl-aaa-authn</feature -->
<feature version='${mdsal.version}'>odl-restconf-noauth</feature>
</feature>
<feature name='odl-restconf-noauth' version='${project.version}' description="OpenDaylight :: Restconf">
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>itests-controller</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>base-features-it</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>itests-controller</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>releasepom</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
</parent>
<artifactId>karaf.branding</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<name>OpenDaylight :: Karaf :: Branding</name>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath/>
</parent>
<modelVersion>4.0.0</modelVersion>
<packaging>pom</packaging>
<properties>
- <branding.version>1.1.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
+ <branding.version>1.2.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.6.0-SNAPSHOT</karaf.resources.version>
+ <karaf.localFeature>standard</karaf.localFeature>
+ <!-- Karaf Maven plugin 3.023 reinstalls already installed
+ boot features to container, which could be observed
+ in huge logs and seems like loop in build, even if it
+ is not.
+ -->
+ <karaf.maven.version>3.0.1</karaf.maven.version>
</properties>
<dependencyManagement>
<dependencies>
</dependencies>
</dependencyManagement>
<dependencies>
+ <!-- karaf standard features -->
+ <dependency>
+ <groupId>org.apache.karaf.features</groupId>
+ <artifactId>standard</artifactId>
+ <version>${karaf.version}</version>
+ <type>xml</type>
+ <classifier>features</classifier>
+ <scope>runtime</scope>
+ </dependency>
<!-- ODL Branding -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<plugin>
<groupId>org.apache.karaf.tooling</groupId>
<artifactId>karaf-maven-plugin</artifactId>
- <version>${karaf.version}</version>
+ <version>${karaf.maven.version}</version>
<extensions>true</extensions>
<configuration>
<!-- no startupFeatures -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>karaf-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
- <relativePath/>
+ <version>1.6.0-SNAPSHOT</version>
+ <relativePath>../karaf-parent</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<name>${project.artifactId}</name>
<prerequisites>
<maven>3.1.1</maven>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>opendaylight-karaf-resources</artifactId>
# Increase the maximum file descriptors if we can
if [ "$os400" = "false" ] && [ "$cygwin" = "false" ]; then
MAX_FD_LIMIT=`ulimit -H -n`
- if [ "$MAX_FD_LIMIT" != 'unlimited' ]; then
+ if [ "$MAX_FD_LIMIT" != 'unlimited' ]; then
if [ $? -eq 0 ]; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ]; then
# use the system max
fi
if [ "x$JAVA_HOME" = "x" ] && [ "$darwin" = "true" ]; then
- JAVA_HOME="$(/usr/libexec/java_home)"
+ JAVA_HOME="$(/usr/libexec/java_home -v 1.7)"
fi
if [ "x$JAVA" = "x" ] && [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
else
warn "JAVA_HOME not set; results may vary"
JAVA=`type java`
- JAVA=`expr "$JAVA" : '.*is \(.*\)$'`
+ JAVA=`expr "$JAVA" : '.* \(/.*\)$'`
if [ "x$JAVA" = "x" ]; then
die "java command not found"
fi
fi
export JAVA_OPTS
+ if [ "x$EXTRA_JAVA_OPTS" != "x" ]; then
+ JAVA_OPTS="$JAVA_OPTS $EXTRA_JAVA_OPTS"
+ fi
+
# Set Debug options if enabled
if [ "x$KARAF_DEBUG" != "x" ]; then
# Use the defaults if JAVA_DEBUG_OPTS was not set
CLASSPATH="$CLASSPATH:$file"
fi
done
- DEFAULT_JAVA_DEBUG_OPTS="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
+
+ DEFAULT_JAVA_DEBUG_PORT="5005"
+ if [ "x$JAVA_DEBUG_PORT" = "x" ]; then
+ JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
+ fi
+ DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=$JAVA_DEBUG_PORT"
##
## TODO: Move to conf/profiler/yourkit.{sh|cmd}
run() {
- CLASSPATH="${KARAF_HOME}/system/org/apache/karaf/instance/org.apache.karaf.instance.command/3.0.1/org.apache.karaf.instance.command-3.0.1.jar:${KARAF_HOME}/system/org/apache/karaf/instance/org.apache.karaf.instance.core/3.0.1/org.apache.karaf.instance.core-3.0.1.jar:${KARAF_HOME}/system/org/apache/karaf/shell/org.apache.karaf.shell.console/3.0.1/org.apache.karaf.shell.console-3.0.1.jar:${KARAF_HOME}/system/org/apache/karaf/shell/org.apache.karaf.shell.table/3.0.1/org.apache.karaf.shell.table-3.0.1.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.api/1.0.0/org.apache.aries.blueprint.api-1.0.0.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.core/1.4.0/org.apache.aries.blueprint.core-1.4.0.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.cm/1.0.3/org.apache.aries.blueprint.cm-1.0.3.jar:${KARAF_HOME}/system/org/ops4j/pax/logging/pax-logging-api/1.7.2/pax-logging-api-1.7.2.jar:${KARAF_HOME}/system/org/apache/felix/org.apache.felix.framework/4.2.1/org.apache.felix.framework-4.2.1.jar:${KARAF_HOME}/system/jline/jline/2.11/jline-2.11.jar:$CLASSPATH"
+ CLASSPATH="${KARAF_HOME}/system/org/apache/karaf/instance/org.apache.karaf.instance.command/3.0.3/org.apache.karaf.instance.command-3.0.3.jar:${KARAF_HOME}/system/org/apache/karaf/instance/org.apache.karaf.instance.core/3.0.3/org.apache.karaf.instance.core-3.0.3.jar:${KARAF_HOME}/system/org/apache/karaf/shell/org.apache.karaf.shell.console/3.0.3/org.apache.karaf.shell.console-3.0.3.jar:${KARAF_HOME}/system/org/apache/karaf/shell/org.apache.karaf.shell.table/3.0.3/org.apache.karaf.shell.table-3.0.3.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.api/1.0.1/org.apache.aries.blueprint.api-1.0.1.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.core/1.4.2/org.apache.aries.blueprint.core-1.4.2.jar:${KARAF_HOME}/system/org/apache/aries/blueprint/org.apache.aries.blueprint.cm/1.0.5/org.apache.aries.blueprint.cm-1.0.5.jar:${KARAF_HOME}/system/org/ops4j/pax/logging/pax-logging-api/1.8.1/pax-logging-api-1.8.1.jar:${KARAF_HOME}/system/org/apache/felix/org.apache.felix.framework/4.2.1/org.apache.felix.framework-4.2.1.jar:${KARAF_HOME}/system/jline/jline/2.12/jline-2.12.jar:$CLASSPATH"
if $cygwin; then
KARAF_HOME=`cygpath --path --windows "$KARAF_HOME"`
}
main "$@"
-
)\r
\r
set DEFAULT_JAVA_OPTS=\r
-set DEFAULT_JAVA_DEBUG_OPTS=-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005\r
+set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005\r
\r
rem Support for loading native libraries\r
set PATH=%PATH%;%KARAF_BASE%\lib;%KARAF_HOME%\lib\r
\r
if "%JAVA_OPTS%" == "" set JAVA_OPTS=%DEFAULT_JAVA_OPTS%\r
\r
+if "%EXTRA_JAVA_OPTS%" == "" goto :KARAF_EXTRA_JAVA_OPTS_END\r
+ set JAVA_OPTS="%JAVA_OPTS% %EXTRA_JAVA_OPTS%"\r
+:KARAF_EXTRA_JAVA_OPTS_END\r
+\r
if "%KARAF_DEBUG%" == "" goto :KARAF_DEBUG_END\r
rem Use the defaults if JAVA_DEBUG_OPTS was not set\r
if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS%\r
\r
- set "JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
+ set JAVA_OPTS="%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
call :warn Enabling Java debug options: %JAVA_DEBUG_OPTS%\r
:KARAF_DEBUG_END\r
\r
\r
:CLASSPATH_END\r
\r
-set CLASSPATH=%KARAF_HOME%\system\org\apache\karaf\instance\org.apache.karaf.instance.command\3.0.1\org.apache.karaf.instance.command-3.0.1.jar;%KARAF_HOME%\system\org\apache\karaf\instance\org.apache.karaf.instance.core\3.0.1\org.apache.karaf.instance.core-3.0.1.jar;%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.console\3.0.1\org.apache.karaf.shell.console-3.0.1.jar;%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.table\3.0.1\org.apache.karaf.shell.table-3.0.1.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.api\1.0.0\org.apache.aries.blueprint.api-1.0.0.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.core\1.4.0\org.apache.aries.blueprint.core-1.4.0.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.cm\1.0.3\org.apache.aries.blueprint.cm-1.0.3.jar;%KARAF_HOME%\system\org\ops4j\pax\logging\pax-logging-api\1.7.2\pax-logging-api-1.7.2.jar;%KARAF_HOME%\system\org\apache\felix\org.apache.felix.framework\4.2.1\org.apache.felix.framework-4.2.1.jar;%KARAF_HOME%\system\jline\jline\2.11\jline-2.11.jar;%CLASSPATH%\r
+set CLASSPATH=%KARAF_HOME%\system\org\apache\karaf\instance\org.apache.karaf.instance.command\3.0.3\org.apache.karaf.instance.command-3.0.3.jar;%KARAF_HOME%\system\org\apache\karaf\instance\org.apache.karaf.instance.core\3.0.3\org.apache.karaf.instance.core-3.0.3.jar;%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.console\3.0.3\org.apache.karaf.shell.console-3.0.3.jar;%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.table\3.0.3\org.apache.karaf.shell.table-3.0.3.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.api\1.0.1\org.apache.aries.blueprint.api-1.0.1.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.core\1.4.2\org.apache.aries.blueprint.core-1.4.2.jar;%KARAF_HOME%\system\org\apache\aries\blueprint\org.apache.aries.blueprint.cm\1.0.5\org.apache.aries.blueprint.cm-1.0.5.jar;%KARAF_HOME%\system\org\ops4j\pax\logging\pax-logging-api\1.8.1\pax-logging-api-1.8.1.jar;%KARAF_HOME%\system\org\apache\felix\org.apache.felix.framework\4.2.1\org.apache.felix.framework-4.2.1.jar;%KARAF_HOME%\system\jline\jline\2.12\jline-2.12.jar;%CLASSPATH%\r
\r
:EXECUTE\r
if "%SHIFT%" == "true" SET ARGS=%2 %3 %4 %5 %6 %7 %8\r
:END\r
\r
endlocal\r
-\r
# limitations under the License.
#
-DIRNAME=`dirname $0`
-PROGNAME=`basename $0`
+DIRNAME=`dirname "$0"`
+PROGNAME=`basename "$0"`
#
# Sourcing environment settings for karaf similar to tomcats setenv
# Increase the maximum file descriptors if we can
if [ "$os400" = "false" ] && [ "$cygwin" = "false" ]; then
MAX_FD_LIMIT=`ulimit -H -n`
- if [ "$MAX_FD_LIMIT" != 'unlimited' ]; then
+ if [ "$MAX_FD_LIMIT" != 'unlimited' ]; then
if [ $? -eq 0 ]; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ]; then
# use the system max
fi
if [ "x$JAVA_HOME" = "x" ] && [ "$darwin" = "true" ]; then
- JAVA_HOME="$(/usr/libexec/java_home)"
+ JAVA_HOME="$(/usr/libexec/java_home -v 1.7)"
fi
if [ "x$JAVA" = "x" ] && [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
else
warn "JAVA_HOME not set; results may vary"
JAVA=`type java`
- JAVA=`expr "$JAVA" : '.*is \(.*\)$'`
+ JAVA=`expr "$JAVA" : '.* \(/.*\)$'`
if [ "x$JAVA" = "x" ]; then
die "java command not found"
fi
checkJvmVersion() {
# echo "`$JAVA -version`"
- VERSION=`$JAVA -version 2>&1 | egrep '"([0-9].[0-9]\..*[0-9])"' | awk '{print substr($3,2,length($3)-2)}' | awk '{print substr($1, 3, 3)}' | sed -e 's;\.;;g'`
+ VERSION=`$JAVA -version 2>&1 | egrep '"([0-9].[0-9]\..*[0-9]).*"' | awk '{print substr($3,2,length($3)-2)}' | awk '{print substr($1, 3, 3)}' | sed -e 's;\.;;g'`
# echo $VERSION
if [ "$VERSION" -lt "60" ]; then
echo "JVM must be greater than 1.6"
fi
export JAVA_OPTS
+ if [ "x$EXTRA_JAVA_OPTS" != "x" ]; then
+ JAVA_OPTS="$JAVA_OPTS $EXTRA_JAVA_OPTS"
+ fi
+
# Set Debug options if enabled
if [ "x$KARAF_DEBUG" != "x" ]; then
# Ignore DEBUG in case of stop or client mode
CLASSPATH="$CLASSPATH:$file"
fi
done
- DEFAULT_JAVA_DEBUG_OPTS="-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
+
+ DEFAULT_JAVA_DEBUG_PORT="5005"
+ if [ "x$JAVA_DEBUG_PORT" = "x" ]; then
+ JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
+ fi
+ DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=$JAVA_DEBUG_PORT"
##
## TODO: Move to conf/profiler/yourkit.{sh|cmd}
# Determine the JVM vendor
detectJVM
-
+
# Determine the JVM version >= 1.6
checkJvmVersion
\r
set LOCAL_CLASSPATH=%CLASSPATH%\r
set JAVA_MODE=-server\r
-if not exist "%JAVA_HOME%\bin\server\jvm.dll" (\r
- if not exist "%JAVA_HOME%\jre\bin\server\jvm.dll" (\r
- echo WARNING: Running karaf on a Java HotSpot Client VM because server-mode is not available.\r
- echo Install Java Developer Kit to fix this.\r
- echo For more details see http://java.sun.com/products/hotspot/whitepaper.html#client\r
- set JAVA_MODE=-client\r
- )\r
-)\r
-set DEFAULT_JAVA_OPTS=%JAVA_MODE% -Xms%JAVA_MIN_MEM% -Xmx%JAVA_MAX_MEM% -Dderby.system.home="%KARAF_DATA%\derby" -Dderby.storage.fileSyncTransactionLog=true -Dcom.sun.management.jmxremote -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass\r
-\r
-rem Check some easily accessible MIN/MAX params for JVM mem usage\r
-if not "%JAVA_PERM_MEM%" == "" (\r
- set DEFAULT_JAVA_OPTS=%DEFAULT_JAVA_OPTS% -XX:PermSize=%JAVA_PERM_MEM%\r
-)\r
-if not "%JAVA_MAX_PERM_MEM%" == "" (\r
- set DEFAULT_JAVA_OPTS=%DEFAULT_JAVA_OPTS% -XX:MaxPermSize=%JAVA_MAX_PERM_MEM%\r
-)\r
\r
set CLASSPATH=%LOCAL_CLASSPATH%;%KARAF_BASE%\conf\r
-set DEFAULT_JAVA_DEBUG_OPTS=-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005\r
+set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005\r
\r
if "%LOCAL_CLASSPATH%" == "" goto :KARAF_CLASSPATH_EMPTY\r
set CLASSPATH=%LOCAL_CLASSPATH%;%KARAF_BASE%\conf\r
)\r
if not exist "%JAVA_HOME%" (\r
goto TryRegJDK\r
- )\r
- goto TryJDKEnd\r
+ )\r
+ goto TryJDKEnd\r
:TryRegJDK\r
rem try getting the JAVA_HOME from registry\r
FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKLM\Software\JavaSoft\Java Development Kit" /v CurrentVersion`) DO (\r
if not exist "%JAVA_HOME%" (\r
call :warn Unable to retrieve JAVA_HOME from Registry\r
)\r
- goto TryJDKEnd\r
+ goto TryJDKEnd\r
:TryJDKEnd\r
if not exist "%JAVA_HOME%" (\r
call :warn JAVA_HOME is not valid: "%JAVA_HOME%"\r
set JAVA=%JAVA_HOME%\bin\java\r
:Check_JAVA_END\r
\r
+if not exist "%JAVA_HOME%\bin\server\jvm.dll" (\r
+ if not exist "%JAVA_HOME%\jre\bin\server\jvm.dll" (\r
+ echo WARNING: Running Karaf on a Java HotSpot Client VM because server-mode is not available.\r
+ echo Install Java Developer Kit to fix this.\r
+ echo For more details see http://java.sun.com/products/hotspot/whitepaper.html#client\r
+ set JAVA_MODE=-client\r
+ )\r
+)\r
+set DEFAULT_JAVA_OPTS=%JAVA_MODE% -Xms%JAVA_MIN_MEM% -Xmx%JAVA_MAX_MEM% -Dderby.system.home="%KARAF_DATA%\derby" -Dderby.storage.fileSyncTransactionLog=true -Dcom.sun.management.jmxremote -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass\r
+\r
+rem Check some easily accessible MIN/MAX params for JVM mem usage\r
+if not "%JAVA_PERM_MEM%" == "" (\r
+ set DEFAULT_JAVA_OPTS=%DEFAULT_JAVA_OPTS% -XX:PermSize=%JAVA_PERM_MEM%\r
+)\r
+if not "%JAVA_MAX_PERM_MEM%" == "" (\r
+ set DEFAULT_JAVA_OPTS=%DEFAULT_JAVA_OPTS% -XX:MaxPermSize=%JAVA_MAX_PERM_MEM%\r
+)\r
+\r
if "%JAVA_OPTS%" == "" set JAVA_OPTS=%DEFAULT_JAVA_OPTS%\r
\r
+if "%EXTRA_JAVA_OPTS%" == "" goto :KARAF_EXTRA_JAVA_OPTS_END\r
+ set JAVA_OPTS=%JAVA_OPTS% %EXTRA_JAVA_OPTS%\r
+:KARAF_EXTRA_JAVA_OPTS_END\r
+\r
if "%KARAF_DEBUG%" == "" goto :KARAF_DEBUG_END\r
if "%1" == "stop" goto :KARAF_DEBUG_END\r
if "%1" == "client" goto :KARAF_DEBUG_END\r
rem Use the defaults if JAVA_DEBUG_OPTS was not set\r
if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS%\r
\r
- set "JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
+ set JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%\r
call :warn Enabling Java debug options: %JAVA_DEBUG_OPTS%\r
:KARAF_DEBUG_END\r
\r
\r
:EXECUTE_DEBUG\r
if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS%\r
- set "JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%"\r
+ set JAVA_OPTS=%JAVA_DEBUG_OPTS% %JAVA_OPTS%\r
shift\r
goto :RUN_LOOP\r
\r
if not "%PAUSE%" == "" pause\r
\r
:END_NO_PAUSE\r
-\r
#
#
-# The following section shows the possible configuration options for the default
+# The following section shows the possible configuration options for the default
# karaf scripts
#
# export JAVA_HOME # Location of Java installation
# export JAVA_MAX_MEM # Maximum memory for the JVM
# export JAVA_PERM_MEM # Minimum perm memory for the JVM
# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM
+# export EXTRA_JAVA_OPTS # Additional JVM options
# export KARAF_HOME # Karaf home folder
# export KARAF_DATA # Karaf data folder
# export KARAF_BASE # Karaf base folder
# export KARAF_ETC # Karaf etc folder
# export KARAF_OPTS # Additional available Karaf options
# export KARAF_DEBUG # Enable debug mode
+# export KARAF_REDIRECT # Enable/set the std/err redirection when using bin/start
if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then
export JAVA_MAX_PERM_MEM="512m"
fi
if [ "x$JAVA_MAX_MEM" = "x" ]; then
export JAVA_MAX_MEM="2048m"
fi
-
rem SET JAVA_PERM_MEM
rem Maximum perm memory for the JVM
rem SET JAVA_MAX_PERM_MEM
+rem Additional JVM options
+rem SET EXTRA_JAVA_OPTS
rem Karaf home folder
rem SET KARAF_HOME
rem Karaf data folder
# Extra packages to import from the boot class loader
org.osgi.framework.system.packages.extra=org.apache.karaf.branding,sun.reflect,sun.reflect.misc,sun.misc,sun.nio.ch
-# Override the config.properties to remove
-# ${services-${karaf.framework}}
-# This is to work around:
-# https://issues.apache.org/jira/browse/KARAF-3092
-# Which should be fixed in karaf 3.0.2
-# Which is not as of today (2014-08-24) released.
-# Since it is biting folks, we need to fix it here
-# Please remove this when we shift to karaf 3.0.2
-org.osgi.framework.system.capabilities= \
- ${eecap-${java.specification.version}}, \
- service-reference;effective:=active;objectClass=org.osgi.service.packageadmin.PackageAdmin, \
- service-reference;effective:=active;objectClass=org.osgi.service.startlevel.StartLevel, \
- service-reference;effective:=active;objectClass=org.osgi.service.url.URLHandlers
-
# https://bugs.eclipse.org/bugs/show_bug.cgi?id=325578
# Extend the framework to avoid the resources to be presented with
# a URL of type bundleresource: but to be presented as file:
hosttracker.keyscheme=IP
# LISP Flow Mapping configuration
-# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings (default: true)
lisp.mappingOverwrite = true
-# Enable the Solicit-Map-Request (SMR) mechanism
-lisp.smr = false
+# Enable the Solicit-Map-Request (SMR) mechanism (default: true)
+lisp.smr = true
+# Choose policy for Explicit Locator Path (ELP) handling
+# There are three options:
+# default: don't add or remove locator records, return mapping as-is
+# both: keep the ELP, but add the next hop as a standalone non-LCAF locator with a lower priority
+# replace: remove the ELP, add the next hop as a standalone non-LCAF locator
+lisp.elpPolicy = default
javax.accessibility, \
javax.activation;version="1.1", \
javax.activity, \
- javax.annotation;version="1.1", \
- javax.annotation.processing;version="1.1", \
+ javax.annotation;version="1.0", \
+ javax.annotation.processing;version="1.6", \
javax.crypto, \
javax.crypto.interfaces, \
javax.crypto.spec, \
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers, \
+ org.xml.sax.helpers, \
sun.misc
# Standard package set. Note that:
javax.accessibility, \
javax.activation;version="1.1", \
javax.activity, \
- javax.annotation;version="1.2", \
- javax.annotation.processing;version="1.2", \
+ javax.annotation;version="1.0", \
+ javax.annotation.processing;version="1.6", \
javax.crypto, \
javax.crypto.interfaces, \
javax.crypto.spec, \
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers, \
+ org.xml.sax.helpers, \
sun.misc
jre-1.8= \
javax.accessibility, \
javax.activation;version="1.1", \
javax.activity, \
- javax.annotation;version="1.2", \
- javax.annotation.processing;version="1.2", \
+ javax.annotation;version="1.0", \
+ javax.annotation.processing;version="1.6", \
javax.crypto, \
javax.crypto.interfaces, \
javax.crypto.spec, \
javax.xml.ws.wsaddressing;version="2.2", \
javax.xml.ws.spi.http;version="2.2", \
javax.xml.xpath, \
+ javafx.animation, \
+ javafx.application, \
+ javafx.beans, \
+ javafx.beans.binding, \
+ javafx.beans.property, \
+ javafx.beans.property.adapter, \
+ javafx.beans.value, \
+ javafx.collections, \
+ javafx.concurrent, \
+ javafx.css, \
+ javafx.embed.swing, \
+ javafx.embed.swt, \
+ javafx.event, \
+ javafx.fxml, \
+ javafx.geometry, \
+ javafx.scene, \
+ javafx.scene.canvas, \
+ javafx.scene.chart, \
+ javafx.scene.control, \
+ javafx.scene.control.cell, \
+ javafx.scene.effect, \
+ javafx.scene.image, \
+ javafx.scene.input, \
+ javafx.scene.layout, \
+ javafx.scene.media, \
+ javafx.scene.paint, \
+ javafx.scene.shape, \
+ javafx.scene.text, \
+ javafx.scene.transform, \
+ javafx.scene.web, \
+ javafx.stage, \
+ javafx.util, \
+ javafx.util.converter, \
org.ietf.jgss, \
org.omg.CORBA, \
org.omg.CORBA_2_3, \
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers, \
+ org.xml.sax.helpers, \
sun.misc
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>distribution.opendaylight-karaf</artifactId>
<type>xml</type>
<scope>runtime</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-flow</artifactId>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-restconf</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../opendaylight/commons/parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/enunciate-parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>appauth</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>arphandler</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>clustering.services.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.services</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.services-implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<!-- Sonar properties using jacoco to retrieve integration test results -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.stub</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.test</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.httpclient</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>configuration</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>configuration.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>configuration.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager.implementation</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.implementation</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.it.implementation</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.shell</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>dummy-console</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>Dummy Console Interfaces for Equinox-specific CLI</description>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-adsal</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
<!-- Bundle dependencies -->
<bundle>mvn:org.opendaylight.controller/configuration.implementation/${configuration.implementation.version}</bundle>
</feature>
<feature name="odl-adsal-thirdparty" description="OpenDaylight :: AD-SAL :: Third-Party Depenencies" version="${project.version}">
- <bundle>mvn:org.opendaylight.controller/logging.bridge/${logging.bridge.version}</bundle>
+ <!-- Removed out - causes loop in loggers which polutes logs expotencially
+ <bundle>mvn:org.opendaylight.controller/logging.bridge/${logging.bridge.version}</bundle>
+ -->
</feature>
</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-base</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
</dependencies>
<feature>odl-base-jackson</feature>
<feature>odl-base-spring-security</feature>
</feature>
- <feature name="odl-base-dummy-console" description="Temporary Dummy Console" version="1.2.0-SNAPSHOT">
- <bundle>mvn:org.opendaylight.controller/dummy-console/1.2.0-SNAPSHOT</bundle>
+ <feature name="odl-base-dummy-console" description="Temporary Dummy Console" version="1.3.0-SNAPSHOT">
+ <bundle>mvn:org.opendaylight.controller/dummy-console/1.3.0-SNAPSHOT</bundle>
</feature>
<feature name="odl-base-felix-dm" description="Felix Dependency Manager" version="${felix.dependencymanager.version}">
<bundle>mvn:org.osgi/org.osgi.compendium/${osgi.compendium.version}</bundle>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>controller-features</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>extras-features</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-nsf</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
<!-- Feature Dependencies -->
<feature name="odl-nsf-service" description="OpenDaylight :: NSF :: Network Service Functions in Controller" version="${project.version}">
<feature version="${sal.version}">odl-adsal-all</feature>
- <feature version="${project.version}">odl-nsf-controller-managers</feature>
- <feature version="${project.version}">odl-adsal-controller-northbound</feature>
+ <feature version="${project.version}">odl-nsf-managers</feature>
+ <feature version="${project.version}">odl-adsal-northbound</feature>
</feature>
<feature name="odl-nsf-managers" description="OpenDaylight :: AD-SAL :: Network Service Functions" version="${project.version}">
<bundle>mvn:org.opendaylight.controller/topologymanager/${topologymanager.version}</bundle>
<bundle>mvn:org.opendaylight.controller/topologymanager.shell/${topologymanager.shell.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/hosttracker/${hosttracker.api.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/hosttracker.implementation/${hosttracker.implementation.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/hosttracker.shell/${hosttracker.shell.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting/${forwarding.staticrouting}</bundle>
-
- <bundle>mvn:org.opendaylight.controller.thirdparty/net.sf.jung2/2.0.1</bundle>
- <bundle>mvn:org.opendaylight.controller/routing.dijkstra_implementation/${routing.dijkstra_implementation.version}</bundle>
- </feature>
-
- <feature name="odl-nsf-controller-managers" description="OpenDaylight :: AD-SAL :: Network Service Functions in Controller" version="${project.version}">
- <feature version="${commons.opendaylight.version}">odl-base-all</feature>
- <feature version="${sal.version}">odl-adsal-all</feature>
- <bundle>mvn:org.opendaylight.controller/usermanager/${usermanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/usermanager.implementation/${usermanager.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/appauth/${appauth.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/connectionmanager/${connectionmanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/connectionmanager.implementation/${connectionmanager.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/containermanager/${containermanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/containermanager.implementation/${containermanager.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/statisticsmanager/${statisticsmanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/statisticsmanager.implementation/${statisticsmanager.implementation.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/switchmanager/${switchmanager.api.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/switchmanager.implementation/${switchmanager.implementation.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/forwardingrulesmanager/${forwardingrulesmanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/forwardingrulesmanager.implementation/${forwardingrulesmanager.implementation.version}</bundle>
-
- <bundle>mvn:org.opendaylight.controller/topologymanager/${topologymanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/topologymanager.shell/${topologymanager.shell.version}</bundle>
-
<bundle>mvn:org.opendaylight.controller/hosttracker/${hosttracker.api.version}</bundle>
<bundle>mvn:org.opendaylight.controller/hosttracker.implementation/${hosttracker.implementation.version}</bundle>
<bundle>mvn:org.opendaylight.controller/hosttracker.shell/${hosttracker.shell.version}</bundle>
<bundle>mvn:org.opendaylight.controller/topology.northbound/${topology.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/usermanager.northbound/${usermanager.northbound.version}</bundle>
</feature>
-
- <feature name="odl-adsal-controller-northbound" description="OpenDaylight :: AD-SAL :: Northbound APIs in Controller" version="${project.version}">
- <feature version="${commons.opendaylight.version}">odl-base-all</feature>
- <feature version="${project.version}">odl-nsf-managers</feature>
- <bundle>mvn:org.ow2.asm/asm-all/${asm.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/bundlescanner/${bundlescanner.api.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/bundlescanner.implementation/${bundlescanner.implementation.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/commons.northbound/${northbound.commons.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/connectionmanager.northbound/${connectionmanager.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
- <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
- <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
- <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/subnets.northbound/${subnets.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/switchmanager.northbound/${switchmanager.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/topology.northbound/${topology.northbound.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/usermanager.northbound/${usermanager.northbound.version}</bundle>
- </feature>
</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwardingrulesmanager</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwardingrulesmanager.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>forwardingrulesmanager.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>hosttracker.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.shell</artifactId>
<dependency>
<groupId>org.apache.karaf.shell</groupId>
<artifactId>org.apache.karaf.shell.console</artifactId>
- <version>3.0.0</version>
+ <version>${karaf.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker_new</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker_new.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>karaf-tomcat-security</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>logging.bridge</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<groupId>org.opendaylight.controller</groupId>
<artifactId>app-northbound</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>app-northbound</name>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>bundlescanner</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>bundlescanner.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>connectionmanager.northbound</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>containermanager.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>controllermanager.northbound</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>flowprogrammer.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>hosttracker.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>httpservice-bridge</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>HttpService bridge web application</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>northbound.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>northbound.client</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>jolokia-bridge</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>Jolokia bridge web application</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>networkconfig.bridgedomain.northbound</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>forwarding.staticrouting.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>statistics.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>subnets.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>swagger-ui</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>switchmanager.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>topology.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>usermanager.northbound</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<scm>
<groupId>org.opendaylight.controller</groupId>
<artifactId>northboundtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../commons/parent</relativePath>
</parent>
</modules>
</profile>
<profile>
- <id>docs</id>
+ <id>docs-java7</id>
<activation>
<activeByDefault>false</activeByDefault>
+ <jdk>1.7</jdk>
</activation>
<modules>
<module>northbound/java-client</module>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol_plugins.openflow</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol_plugins.stub</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>routing.dijkstra_implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal</artifactId>
- <version>0.9.0-SNAPSHOT</version>
+ <version>0.10.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.connection</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.connection.implementation</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.networkconfiguration</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.networkconfiguration.implementation</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustersession</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.loadbalancer</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>adsal-enunciate-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>samples.loadbalancer.northbound</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.simpleforwarding</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>security</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>statisticsmanager</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>statisticsmanager.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<!-- Sonar properties using jacoco to retrieve integration test results -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>statisticsmanager.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager</artifactId>
- <version>0.8.0-SNAPSHOT</version>
+ <version>0.9.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>switchmanager.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topologymanager</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
package org.opendaylight.controller.topologymanager.internal;
-import org.junit.Assert;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.switchmanager.SwitchConfig;
import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
public class TopologyManagerImplTest {
private TopologyManagerImpl topoManagerImpl;
}
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(1, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(1);
Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
}
+ private void verifyEdgesSize(int expSize) throws InterruptedException {
+ int timeout = 5000;
+ for(int i = 0; i < timeout / 50; i++) {
+ if(topoManagerImpl.getEdges().size() == expSize) {
+ return;
+ }
+
+ Thread.sleep(50);
+ }
+
+ Assert.fail(String.format("Expected edges size %d. Actual was %d",
+ topoManagerImpl.getEdges().size(), expSize));
+ }
+
@Test
- public void testNotifyNodeConnector() throws ConstructionException,
- InterruptedException {
+ public void testNotifyNodeConnector() throws Exception {
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
Map<String, Property> propMap = new HashMap<>();
swMgr.addNodeConnectors(nc1);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.ADDED, propMap);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.CHANGED, propMap);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
swMgr.clear();
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
// Test NodeConnector notification in the case that there is a related
// edge update just before the notification.
swMgr.addNodeConnectors(nc2);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.CHANGED, propMap);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(2, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(2);
teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.REMOVED);
teu2 = new TopoEdgeUpdate(edge2, props, UpdateType.REMOVED);
topoedgeupdateList.add(teu2);
topoManagerImpl.edgeUpdate(topoedgeupdateList);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.REMOVED, propMap);
swMgr.addNodeConnectors(nc2);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.CHANGED, propMap);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
topoManagerImpl.notifyNodeConnector(nc2, UpdateType.REMOVED, propMap);
Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
- // Give TopologyManger time to update its edges DB.
- Thread.sleep(1000);
- Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ verifyEdgesSize(0);
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>topologymanager.integrationtest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topologymanager.shell</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager.implementation</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<scm>
<groupId>org.opendaylight.controller</groupId>
<artifactId>web.brandfragment</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>devices.web</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>flows.web</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>osgi-brandfragment.web</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>OSGi management web application brand fragment</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>web</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topology.web</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>troubleshoot.web</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller.archetypes</groupId>
<artifactId>odl-model-project</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<properties>
<nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
<nexus.repository.release>opendaylight.release</nexus.repository.release>
<nexus.repository.snapshot>opendaylight.release</nexus.repository.snapshot>
- <yang.version>0.7.0-SNAPSHOT</yang.version>
- <yang.codegen.version>0.7.0-SNAPSHOT</yang.codegen.version>
+ <yang.version>0.8.0-SNAPSHOT</yang.version>
+ <yang.codegen.version>0.8.0-SNAPSHOT</yang.codegen.version>
<bundle.plugin.version>2.3.7</bundle.plugin.version>
</properties>
<scm>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-configfile-archetype</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<parent>
<groupId>org.opendaylight.controller.archetypes</groupId>
<artifactId>archetypes-parent</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-distro-archetype</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>distribution-karaf-archetype</name>
<properties>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.1.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
- <karaf.version>3.0.1</karaf.version>
+ <branding.version>1.2.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.6.0-SNAPSHOT</karaf.resources.version>
+ <karaf.version>3.0.3</karaf.version>
</properties>
<dependencies>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
<parent>
<groupId>org.opendaylight.controller.archetypes</groupId>
<artifactId>archetypes-parent</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-features-archetype</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>opendaylight-karaf-features-archetype</name>
<features.file>features.xml</features.file>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.1.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
- <karaf.version>3.0.1</karaf.version>
- <feature.test.version>1.5.0-SNAPSHOT</feature.test.version>
- <karaf.empty.version>1.5.0-SNAPSHOT</karaf.empty.version>
+ <branding.version>1.2.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.6.0-SNAPSHOT</karaf.resources.version>
+ <karaf.version>3.0.3</karaf.version>
+ <feature.test.version>1.6.0-SNAPSHOT</feature.test.version>
+ <karaf.empty.version>1.6.0-SNAPSHOT</karaf.empty.version>
<surefire.version>2.16</surefire.version>
</properties>
<dependencies>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.8.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.3.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.2.0-SNAPSHOT/xml/features</repository>
-->
<feature name='odl-${repoName}-all' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: All'>
<!--
* Basic MD-SAL Provider
<feature name='odl-${repoName}-provider' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Provider '>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${symbol_dollar}{project.version}'>odl-${repoName}-model</feature>
<bundle>mvn:${groupId}/${repoName}-provider/${symbol_dollar}{project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-${repoName}-model' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Model'>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.8.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:${groupId}/${repoName}-model/${symbol_dollar}{project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-${repoName}-provider' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Provider'>
- <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.3.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:${groupId}/${repoName}-provider/${symbol_dollar}{project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-${repoName}.xml">mvn:${groupId}/${repoName}-config/${symbol_dollar}{project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-${repoName}-provider' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Provider'>
- <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.2.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:${groupId}/${repoName}-provider/${symbol_dollar}{project.version}</bundle>
... whatever other bundles you need
</feature>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-startup-archetype</artifactId>
- <version>1.0.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>${project.artifactId}</name>
<parent>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>binding-parent</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<relativePath/>
</parent>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>${groupId}</groupId>
<maven>3.1.1</maven>
</prerequisites>
<properties>
- <mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
- <yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
+ <mdsal.version>1.3.0-SNAPSHOT</mdsal.version>
+ <yangtools.version>0.8.0-SNAPSHOT</yangtools.version>
<configfile.directory>etc/opendaylight/karaf</configfile.directory>
</properties>
<dependencyManagement>
<artifactId>${artifactId}-impl</artifactId>
<version>${symbol_dollar}{project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${symbol_dollar}{project.groupId}</groupId>
+ <artifactId>${artifactId}-impl</artifactId>
+ <version>${symbol_dollar}{project.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
<dependency>
<groupId>${symbol_dollar}{project.groupId}</groupId>
<artifactId>${artifactId}-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath/>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>karaf-parent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath/>
</parent>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
</parent>
<groupId>${groupId}</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<scm>
</scm>
<groupId>org.opendaylight.controller.archetypes</groupId>
<artifactId>archetypes-parent</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<distributionManagement>
<!-- OpenDayLight Released artifact -->
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>concepts</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../opendaylight</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../opendaylight</relativePath>
</parent>
<artifactId>filter-valve</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../opendaylight</relativePath>
</parent>
<artifactId>liblldp</artifactId>
- <version>0.9.0-SNAPSHOT</version>
+ <version>0.10.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<artifactId>commons.logback_settings</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath></relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
- <akka.version>2.3.9</akka.version>
- <appauth.version>0.5.0-SNAPSHOT</appauth.version>
- <archetype-app-northbound>0.1.0-SNAPSHOT</archetype-app-northbound>
- <arphandler.version>0.6.0-SNAPSHOT</arphandler.version>
+ <akka.version>2.3.10</akka.version>
+ <appauth.version>0.6.0-SNAPSHOT</appauth.version>
+ <archetype-app-northbound>0.2.0-SNAPSHOT</archetype-app-northbound>
+ <arphandler.version>0.7.0-SNAPSHOT</arphandler.version>
<!-- Plugin Versions -->
- <bundlescanner.api.version>0.5.0-SNAPSHOT</bundlescanner.api.version>
- <bundlescanner.implementation.version>0.5.0-SNAPSHOT</bundlescanner.implementation.version>
- <bundlescanner.version>0.5.0-SNAPSHOT</bundlescanner.version>
- <clustering.services.version>0.6.0-SNAPSHOT</clustering.services.version>
- <clustering.services_implementation.version>0.5.0-SNAPSHOT</clustering.services_implementation.version>
- <clustering.stub.version>0.5.0-SNAPSHOT</clustering.stub.version>
- <clustering.test.version>0.5.0-SNAPSHOT</clustering.test.version>
- <commmons.northbound.version>0.5.0-SNAPSHOT</commmons.northbound.version>
+ <bundlescanner.api.version>0.6.0-SNAPSHOT</bundlescanner.api.version>
+ <bundlescanner.implementation.version>0.6.0-SNAPSHOT</bundlescanner.implementation.version>
+ <bundlescanner.version>0.6.0-SNAPSHOT</bundlescanner.version>
+ <clustering.services.version>0.7.0-SNAPSHOT</clustering.services.version>
+ <clustering.services_implementation.version>0.6.0-SNAPSHOT</clustering.services_implementation.version>
+ <clustering.stub.version>0.6.0-SNAPSHOT</clustering.stub.version>
+ <clustering.test.version>0.6.0-SNAPSHOT</clustering.test.version>
+ <commmons.northbound.version>0.6.0-SNAPSHOT</commmons.northbound.version>
<!-- Third Party Versions -->
<codahale.metrics.version>3.0.1</codahale.metrics.version>
<commons.tomcat.api>7.0.53.v201406060720</commons.tomcat.api>
<commons.tomcat.util>7.0.53.v201406070630</commons.tomcat.util>
- <commons.checkstyle.version>0.1.0-SNAPSHOT</commons.checkstyle.version>
- <commons.httpclient.version>0.2.0-SNAPSHOT</commons.httpclient.version>
- <commons.logback_settings.version>0.1.0-SNAPSHOT</commons.logback_settings.version>
+ <commons.checkstyle.version>0.2.0-SNAPSHOT</commons.checkstyle.version>
+ <commons.httpclient.version>0.3.0-SNAPSHOT</commons.httpclient.version>
+ <commons.logback_settings.version>0.2.0-SNAPSHOT</commons.logback_settings.version>
<commons.net.version>3.0.1</commons.net.version>
- <commons.opendaylight.commons.httpclient>0.2.0-SNAPSHOT</commons.opendaylight.commons.httpclient>
- <commons.opendaylight.concepts.version>0.6.0-SNAPSHOT</commons.opendaylight.concepts.version>
- <commons.opendaylight.version>1.5.0-SNAPSHOT</commons.opendaylight.version>
- <commons.parent.version>1.1.0-SNAPSHOT</commons.parent.version>
- <commons.httpclient.version>0.2.0-SNAPSHOT</commons.httpclient.version>
- <concepts.version>0.6.0-SNAPSHOT</concepts.version>
+ <commons.opendaylight.commons.httpclient>0.3.0-SNAPSHOT</commons.opendaylight.commons.httpclient>
+ <commons.opendaylight.concepts.version>0.7.0-SNAPSHOT</commons.opendaylight.concepts.version>
+ <commons.opendaylight.version>1.6.0-SNAPSHOT</commons.opendaylight.version>
+ <commons.parent.version>1.2.0-SNAPSHOT</commons.parent.version>
+ <commons.httpclient.version>0.3.0-SNAPSHOT</commons.httpclient.version>
+ <concepts.version>0.7.0-SNAPSHOT</concepts.version>
<concurrentlinkedhashmap.version>1.4</concurrentlinkedhashmap.version>
- <config.version>0.3.0-SNAPSHOT</config.version>
- <aaa.version>0.2.0-SNAPSHOT</aaa.version>
+ <config.version>0.4.0-SNAPSHOT</config.version>
+ <aaa.version>0.3.0-SNAPSHOT</aaa.version>
<config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
<config.clustering.configfile>05-clustering.xml</config.clustering.configfile>
<config.netty.configfile>00-netty.xml</config.netty.configfile>
<config.netconf.mdsal.configfile>08-mdsal-netconf.xml</config.netconf.mdsal.configfile>
<config.restconf.configfile>10-rest-connector.xml</config.restconf.configfile>
<config.netconf.connector.configfile>99-netconf-connector.xml</config.netconf.connector.configfile>
- <configuration.implementation.version>0.5.0-SNAPSHOT</configuration.implementation.version>
- <configuration.version>0.5.0-SNAPSHOT</configuration.version>
- <connectionmanager.version>0.2.0-SNAPSHOT</connectionmanager.version>
- <containermanager.it.version>0.6.0-SNAPSHOT</containermanager.it.version>
- <containermanager.northbound.version>0.5.0-SNAPSHOT</containermanager.northbound.version>
- <containermanager.shell.version>0.6.0-SNAPSHOT</containermanager.shell.version>
- <containermanager.version>0.6.0-SNAPSHOT</containermanager.version>
- <controllermanager.northbound.version>0.1.0-SNAPSHOT</controllermanager.northbound.version>
- <devices.web.version>0.5.0-SNAPSHOT</devices.web.version>
- <dummy-console.version>1.2.0-SNAPSHOT</dummy-console.version>
+ <configuration.implementation.version>0.6.0-SNAPSHOT</configuration.implementation.version>
+ <configuration.version>0.6.0-SNAPSHOT</configuration.version>
+ <connectionmanager.version>0.3.0-SNAPSHOT</connectionmanager.version>
+ <containermanager.it.version>0.7.0-SNAPSHOT</containermanager.it.version>
+ <containermanager.northbound.version>0.6.0-SNAPSHOT</containermanager.northbound.version>
+ <containermanager.shell.version>0.7.0-SNAPSHOT</containermanager.shell.version>
+ <containermanager.version>0.7.0-SNAPSHOT</containermanager.version>
+ <controllermanager.northbound.version>0.2.0-SNAPSHOT</controllermanager.northbound.version>
+ <devices.web.version>0.6.0-SNAPSHOT</devices.web.version>
+ <dummy-console.version>1.3.0-SNAPSHOT</dummy-console.version>
<config.statistics.manager.configfile>30-statistics-manager.xml</config.statistics.manager.configfile>
- <eclipse.persistence.version>2.5.0</eclipse.persistence.version>
<eclipse.jdt.core.compiler.batch.version>3.8.0.I20120518-2145</eclipse.jdt.core.compiler.batch.version>
<!-- OpenEXI third party lib for netconf-->
<exi.nagasena.version>0000.0002.0038.0</exi.nagasena.version>
<felix.util.version>1.6.0</felix.util.version>
- <features.test.version>1.5.0-SNAPSHOT</features.test.version>
- <filtervalve.version>1.5.0-SNAPSHOT</filtervalve.version>
+ <features.test.version>1.6.0-SNAPSHOT</features.test.version>
+ <filtervalve.version>1.6.0-SNAPSHOT</filtervalve.version>
<findbugs.maven.plugin.version>2.4.0</findbugs.maven.plugin.version>
- <flowprogrammer.northbound.version>0.5.0-SNAPSHOT</flowprogrammer.northbound.version>
- <flows.web.version>0.5.0-SNAPSHOT</flows.web.version>
- <forwarding.staticrouting>0.6.0-SNAPSHOT</forwarding.staticrouting>
- <forwarding.staticrouting.northbound.version>0.5.0-SNAPSHOT</forwarding.staticrouting.northbound.version>
- <forwardingrulesmanager.implementation.version>0.5.0-SNAPSHOT</forwardingrulesmanager.implementation.version>
- <forwardingrulesmanager.version>0.7.0-SNAPSHOT</forwardingrulesmanager.version>
- <hosttracker.api.version>0.6.0-SNAPSHOT</hosttracker.api.version>
- <hosttracker.implementation.version>0.6.0-SNAPSHOT</hosttracker.implementation.version>
- <hosttracker.northbound.version>0.5.0-SNAPSHOT</hosttracker.northbound.version>
- <hosttracker.shell.version>1.1.0-SNAPSHOT</hosttracker.shell.version>
- <hosttracker_new.api.version>0.5.0-SNAPSHOT</hosttracker_new.api.version>
- <hosttracker_new.implementation.version>0.5.0-SNAPSHOT</hosttracker_new.implementation.version>
- <httpservice-bridge.northbound.version>0.1.0-SNAPSHOT</httpservice-bridge.northbound.version>
- <ietf-inet-types.version>2010.09.24.7-SNAPSHOT</ietf-inet-types.version>
- <ietf-restconf.version>2013.10.19.7-SNAPSHOT</ietf-restconf.version>
- <ietf-topology.version>2013.10.21.7-SNAPSHOT</ietf-topology.version>
- <ietf-yang-types.version>2010.09.24.7-SNAPSHOT</ietf-yang-types.version>
+ <flowprogrammer.northbound.version>0.6.0-SNAPSHOT</flowprogrammer.northbound.version>
+ <flows.web.version>0.6.0-SNAPSHOT</flows.web.version>
+ <forwarding.staticrouting>0.7.0-SNAPSHOT</forwarding.staticrouting>
+ <forwarding.staticrouting.northbound.version>0.6.0-SNAPSHOT</forwarding.staticrouting.northbound.version>
+ <forwardingrulesmanager.implementation.version>0.6.0-SNAPSHOT</forwardingrulesmanager.implementation.version>
+ <forwardingrulesmanager.version>0.8.0-SNAPSHOT</forwardingrulesmanager.version>
+ <hosttracker.api.version>0.7.0-SNAPSHOT</hosttracker.api.version>
+ <hosttracker.implementation.version>0.7.0-SNAPSHOT</hosttracker.implementation.version>
+ <hosttracker.northbound.version>0.6.0-SNAPSHOT</hosttracker.northbound.version>
+ <hosttracker.shell.version>1.2.0-SNAPSHOT</hosttracker.shell.version>
+ <hosttracker_new.api.version>0.6.0-SNAPSHOT</hosttracker_new.api.version>
+ <hosttracker_new.implementation.version>0.6.0-SNAPSHOT</hosttracker_new.implementation.version>
+ <httpservice-bridge.northbound.version>0.2.0-SNAPSHOT</httpservice-bridge.northbound.version>
+ <ietf-inet-types.version>2010.09.24.8-SNAPSHOT</ietf-inet-types.version>
+ <ietf-restconf.version>2013.10.19.8-SNAPSHOT</ietf-restconf.version>
+ <ietf-topology.version>2013.10.21.8-SNAPSHOT</ietf-topology.version>
+ <ietf-yang-types.version>2010.09.24.8-SNAPSHOT</ietf-yang-types.version>
<jdepend.maven.plugin.version>2.0-beta-2</jdepend.maven.plugin.version>
<jmxGeneratorPath>src/main/yang-gen-config</jmxGeneratorPath>
- <jolokia-bridge.version>0.1.0-SNAPSHOT</jolokia-bridge.version>
+ <jolokia-bridge.version>0.2.0-SNAPSHOT</jolokia-bridge.version>
<jolokia.version>1.1.4</jolokia.version>
<jsr311.api.version>1.1.1</jsr311.api.version>
<jsr311.v2.api.version>2.0</jsr311.v2.api.version>
- <karaf.branding.version>1.1.0-SNAPSHOT</karaf.branding.version>
- <karaf.shell.version>3.0.0</karaf.shell.version>
- <karaf.version>3.0.1</karaf.version>
+ <karaf.branding.version>1.2.0-SNAPSHOT</karaf.branding.version>
+ <karaf.shell.version>${karaf.version}</karaf.shell.version>
<leveldb.version>0.7</leveldb.version>
<leveldbjni.version>1.8</leveldbjni.version>
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
- <logging.bridge.version>0.5.0-SNAPSHOT</logging.bridge.version>
+ <logging.bridge.version>0.6.0-SNAPSHOT</logging.bridge.version>
<maven.plugin.api.version>3.0.5</maven.plugin.api.version>
<mimepull.version>1.9.4</mimepull.version>
- <mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
- <netconf.version>0.3.0-SNAPSHOT</netconf.version>
- <networkconfig.bridgedomain.northbound.version>0.1.0-SNAPSHOT</networkconfig.bridgedomain.northbound.version>
+ <mdsal.version>1.3.0-SNAPSHOT</mdsal.version>
+ <netconf.version>0.4.0-SNAPSHOT</netconf.version>
+ <networkconfig.bridgedomain.northbound.version>0.2.0-SNAPSHOT</networkconfig.bridgedomain.northbound.version>
<!-- ODL repository / plugin repository -->
<nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
- <northbound.commons.version>0.5.0-SNAPSHOT</northbound.commons.version>
- <northbound.hosttracker.version>1.5.0-SNAPSHOT</northbound.hosttracker.version>
- <northbound.jolokia.version>1.5.0-SNAPSHOT</northbound.jolokia.version>
- <opendaylight-l2-types.version>2013.08.27.7-SNAPSHOT</opendaylight-l2-types.version>
- <osgi-brandfragment.web.version>0.1.0-SNAPSHOT</osgi-brandfragment.web.version>
+ <northbound.commons.version>0.6.0-SNAPSHOT</northbound.commons.version>
+ <northbound.hosttracker.version>1.6.0-SNAPSHOT</northbound.hosttracker.version>
+ <northbound.jolokia.version>1.6.0-SNAPSHOT</northbound.jolokia.version>
+ <opendaylight-l2-types.version>2013.08.27.8-SNAPSHOT</opendaylight-l2-types.version>
+ <osgi-brandfragment.web.version>0.2.0-SNAPSHOT</osgi-brandfragment.web.version>
<parboiled.version>1.1.6</parboiled.version>
<parboiled.scala.version>1.1.6</parboiled.scala.version>
<propertymavenplugin.version>1.0-alpha-2</propertymavenplugin.version>
<protobuf.version>2.5.0</protobuf.version>
- <protocol-framework.version>0.6.0-SNAPSHOT</protocol-framework.version>
- <protocol_plugins.openflow.version>0.5.0-SNAPSHOT</protocol_plugins.openflow.version>
- <protocol_plugins.stub.version>0.5.0-SNAPSHOT</protocol_plugins.stub.version>
- <routing.dijkstra_implementation.version>0.5.0-SNAPSHOT</routing.dijkstra_implementation.version>
- <sal.connection.version>0.2.0-SNAPSHOT</sal.connection.version>
- <sal.implementation.version>0.5.0-SNAPSHOT</sal.implementation.version>
- <sal.networkconfiguration.version>0.1.0-SNAPSHOT</sal.networkconfiguration.version>
- <sal.version>0.9.0-SNAPSHOT</sal.version>
+ <protocol-framework.version>0.7.0-SNAPSHOT</protocol-framework.version>
+ <protocol_plugins.openflow.version>0.6.0-SNAPSHOT</protocol_plugins.openflow.version>
+ <protocol_plugins.stub.version>0.6.0-SNAPSHOT</protocol_plugins.stub.version>
+ <routing.dijkstra_implementation.version>0.6.0-SNAPSHOT</routing.dijkstra_implementation.version>
+ <sal.connection.version>0.3.0-SNAPSHOT</sal.connection.version>
+ <sal.implementation.version>0.6.0-SNAPSHOT</sal.implementation.version>
+ <sal.networkconfiguration.version>0.2.0-SNAPSHOT</sal.networkconfiguration.version>
+ <sal.version>0.10.0-SNAPSHOT</sal.version>
<salGeneratorPath>src/main/yang-gen-sal</salGeneratorPath>
- <samples.loadbalancer>0.6.0-SNAPSHOT</samples.loadbalancer>
- <samples.loadbalancer.northbound.version>0.5.0-SNAPSHOT</samples.loadbalancer.northbound.version>
- <samples.simpleforwarding.version>0.5.0-SNAPSHOT</samples.simpleforwarding.version>
- <sanitytest.version>0.5.0-SNAPSHOT</sanitytest.version>
+ <samples.loadbalancer>0.7.0-SNAPSHOT</samples.loadbalancer>
+ <samples.loadbalancer.northbound.version>0.6.0-SNAPSHOT</samples.loadbalancer.northbound.version>
+ <samples.simpleforwarding.version>0.6.0-SNAPSHOT</samples.simpleforwarding.version>
+ <sanitytest.version>0.6.0-SNAPSHOT</sanitytest.version>
<scala.version>2.10</scala.version>
<scala.micro.version>4</scala.micro.version>
- <security.version>0.5.0-SNAPSHOT</security.version>
- <karaf.security.version>0.5.0-SNAPSHOT</karaf.security.version>
+ <security.version>0.6.0-SNAPSHOT</security.version>
+ <karaf.security.version>0.6.0-SNAPSHOT</karaf.security.version>
<shapeless.version>1.2.4</shapeless.version>
<sitedeploy>dav:http://nexus.opendaylight.org/content/sites/site</sitedeploy>
<sonar.branch>${user.name}-private-view</sonar.branch>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
<spring-security.version>3.1.3.RELEASE</spring-security.version>
<spring.version>3.1.3.RELEASE</spring.version>
- <statistics.northbound.version>0.5.0-SNAPSHOT</statistics.northbound.version>
- <statisticsmanager.implementation.version>0.5.0-SNAPSHOT</statisticsmanager.implementation.version>
- <statisticsmanager.version>0.6.0-SNAPSHOT</statisticsmanager.version>
- <subnets.northbound.version>0.5.0-SNAPSHOT</subnets.northbound.version>
+ <statistics.northbound.version>0.6.0-SNAPSHOT</statistics.northbound.version>
+ <statisticsmanager.implementation.version>0.6.0-SNAPSHOT</statisticsmanager.implementation.version>
+ <statisticsmanager.version>0.7.0-SNAPSHOT</statisticsmanager.version>
+ <subnets.northbound.version>0.6.0-SNAPSHOT</subnets.northbound.version>
<surefire.version>2.15</surefire.version>
- <switchmanager.api.version>0.8.0-SNAPSHOT</switchmanager.api.version>
- <switchmanager.implementation.version>0.5.0-SNAPSHOT</switchmanager.implementation.version>
- <switchmanager.northbound.version>0.5.0-SNAPSHOT</switchmanager.northbound.version>
+ <switchmanager.api.version>0.9.0-SNAPSHOT</switchmanager.api.version>
+ <switchmanager.implementation.version>0.6.0-SNAPSHOT</switchmanager.implementation.version>
+ <switchmanager.northbound.version>0.6.0-SNAPSHOT</switchmanager.northbound.version>
<testvm.argLine>-Xmx1024m -XX:MaxPermSize=256m</testvm.argLine>
- <topology.northbound.version>0.5.0-SNAPSHOT</topology.northbound.version>
- <topology.web.version>0.5.0-SNAPSHOT</topology.web.version>
- <topologymanager.version>0.5.0-SNAPSHOT</topologymanager.version>
- <topologymanager.shell.version>1.1.0-SNAPSHOT</topologymanager.shell.version>
- <troubleshoot.web.version>0.5.0-SNAPSHOT</troubleshoot.web.version>
+ <topology.northbound.version>0.6.0-SNAPSHOT</topology.northbound.version>
+ <topology.web.version>0.6.0-SNAPSHOT</topology.web.version>
+ <topologymanager.version>0.6.0-SNAPSHOT</topologymanager.version>
+ <topologymanager.shell.version>1.2.0-SNAPSHOT</topologymanager.shell.version>
+ <troubleshoot.web.version>0.6.0-SNAPSHOT</troubleshoot.web.version>
<typesafe.config.version>1.2.0</typesafe.config.version>
<uncommons.maths.version>1.2.2a</uncommons.maths.version>
- <usermanager.implementation.version>0.5.0-SNAPSHOT</usermanager.implementation.version>
- <usermanager.northbound.version>0.1.0-SNAPSHOT</usermanager.northbound.version>
- <usermanager.version>0.5.0-SNAPSHOT</usermanager.version>
- <nsf.version>0.5.0-SNAPSHOT</nsf.version>
- <web.version>0.5.0-SNAPSHOT</web.version>
- <yang-ext.version>2013.09.07.7-SNAPSHOT</yang-ext.version>
- <yang-jmx-generator.version>1.1.0-SNAPSHOT</yang-jmx-generator.version>
- <yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
- <sshd-core.version>0.12.0</sshd-core.version>
+ <usermanager.implementation.version>0.6.0-SNAPSHOT</usermanager.implementation.version>
+ <usermanager.northbound.version>0.2.0-SNAPSHOT</usermanager.northbound.version>
+ <usermanager.version>0.6.0-SNAPSHOT</usermanager.version>
+ <nsf.version>0.6.0-SNAPSHOT</nsf.version>
+ <web.version>0.6.0-SNAPSHOT</web.version>
+ <yang-ext.version>2013.09.07.8-SNAPSHOT</yang-ext.version>
+ <yang-jmx-generator.version>1.2.0-SNAPSHOT</yang-jmx-generator.version>
+ <yangtools.version>0.8.0-SNAPSHOT</yangtools.version>
+ <sshd-core.version>0.14.0</sshd-core.version>
<jmh.version>0.9.7</jmh.version>
<lmax.version>3.3.0</lmax.version>
</properties>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>httpservice-bridge</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>jolokia-bridge</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
<!-- Karaf Dependencies -->
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.logback_settings</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.logback_settings</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
<executions>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
<executions>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol-framework</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
<description>Common protocol framework</description>
if (LocalServerChannel.class.equals(channelClass) == false) {
// makes no sense for LocalServer and produces warning
b.childOption(ChannelOption.SO_KEEPALIVE, true);
+ b.childOption(ChannelOption.TCP_NODELAY , true);
}
b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
customizeBootstrap(b);
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>config-api</artifactId>
@Immutable
public class CommitStatus {
private final List<ObjectName> newInstances, reusedInstances,
- recreatedInstances;
+ recreatedInstances;
/**
* @param newInstances newly created instances
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-artifacts</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-manager</artifactId>
static final String SUBCLASS2 = "subclass2";
@ServiceInterfaceAnnotation(value = SIMPLE, osgiRegistrationType = Executor.class,
- namespace = "ns", revision = "rev", localName = SIMPLE)
+ namespace = "ns", revision = "rev", localName = SIMPLE)
static interface SimpleSI extends AbstractServiceInterface {
}
}
@ServiceInterfaceAnnotation(value = SUBCLASS2, osgiRegistrationType = ExecutorService.class,
- namespace = "ns", revision = "rev", localName = SUBCLASS2)
+ namespace = "ns", revision = "rev", localName = SUBCLASS2)
static interface SubSI2 extends SubSI {
import org.opendaylight.controller.config.api.annotations.ServiceInterfaceAnnotation;
@ServiceInterfaceAnnotation(value = TestingParallelAPSPConfigMXBean.NAME, osgiRegistrationType = TestingAPSP.class,
-namespace = "namespace", revision = "rev", localName = TestingParallelAPSPConfigMXBean.NAME)
+ namespace = "namespace", revision = "rev", localName = TestingParallelAPSPConfigMXBean.NAME)
public interface TestingParallelAPSPConfigMXBean {
static final String NAME = "apsp";
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>config-module-archetype</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>config-netty-config</artifactId>
<description>Configuration files for sal-rest-connector</description>
<parent>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>binding-parent</artifactId>
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<relativePath/>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
- <config.version>0.3.0-SNAPSHOT</config.version>
- <mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
- <yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
+ <config.version>0.4.0-SNAPSHOT</config.version>
+ <mdsal.version>1.3.0-SNAPSHOT</mdsal.version>
+ <yangtools.version>0.8.0-SNAPSHOT</yangtools.version>
<jmxGeneratorPath>src/main/yang-gen-config</jmxGeneratorPath>
<config.file>src/main/config/default-config.xml</config.file>
</properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-persister-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-persister-directory-xml-adapter</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
import com.google.common.base.Preconditions;
import java.util.LinkedHashSet;
import java.util.List;
-import java.util.Map;
import javax.xml.bind.JAXBException;
import org.apache.karaf.features.BundleInfo;
import org.apache.karaf.features.Conditional;
import org.apache.karaf.features.ConfigFileInfo;
+import org.apache.karaf.features.ConfigInfo;
import org.apache.karaf.features.Dependency;
import org.apache.karaf.features.Feature;
import org.slf4j.Logger;
/*
* @param f Feature to wrap
*/
- public AbstractFeatureWrapper(Feature f) {
+ public AbstractFeatureWrapper(final Feature f) {
Preconditions.checkNotNull(f,"FeatureWrapper requires non-null Feature in constructor");
this.feature = f;
}
* from the underlying Feature Config files
*/
public LinkedHashSet<FeatureConfigSnapshotHolder> getFeatureConfigSnapshotHolders() throws Exception {
- LinkedHashSet <FeatureConfigSnapshotHolder> snapShotHolders = new LinkedHashSet<FeatureConfigSnapshotHolder>();
- for(ConfigFileInfo c: getConfigurationFiles()) {
+ final LinkedHashSet <FeatureConfigSnapshotHolder> snapShotHolders = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+ for(final ConfigFileInfo c: getConfigurationFiles()) {
try {
snapShotHolders.add(new FeatureConfigSnapshotHolder(c,this));
- } catch (JAXBException e) {
+ } catch (final JAXBException e) {
LOG.debug("{} is not a config subsystem config file",c.getFinalname());
}
}
}
@Override
- public boolean equals(Object obj) {
+ public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (getClass() != obj.getClass()) {
return false;
}
- AbstractFeatureWrapper other = (AbstractFeatureWrapper) obj;
+ final AbstractFeatureWrapper other = (AbstractFeatureWrapper) obj;
if (feature == null) {
if (other.feature != null) {
return false;
* @return
* @see org.apache.karaf.features.Feature#getId()
*/
+ @Override
public String getId() {
return feature.getId();
}
* @return
* @see org.apache.karaf.features.Feature#getName()
*/
+ @Override
public String getName() {
return feature.getName();
}
* @return
* @see org.apache.karaf.features.Feature#getDescription()
*/
+ @Override
public String getDescription() {
return feature.getDescription();
}
* @return
* @see org.apache.karaf.features.Feature#getDetails()
*/
+ @Override
public String getDetails() {
return feature.getDetails();
}
* @return
* @see org.apache.karaf.features.Feature#getVersion()
*/
+ @Override
public String getVersion() {
return feature.getVersion();
}
* @return
* @see org.apache.karaf.features.Feature#hasVersion()
*/
+ @Override
public boolean hasVersion() {
return feature.hasVersion();
}
* @return
* @see org.apache.karaf.features.Feature#getResolver()
*/
+ @Override
public String getResolver() {
return feature.getResolver();
}
* @return
* @see org.apache.karaf.features.Feature#getInstall()
*/
+ @Override
public String getInstall() {
return feature.getInstall();
}
* @return
* @see org.apache.karaf.features.Feature#getDependencies()
*/
+ @Override
public List<Dependency> getDependencies() {
return feature.getDependencies();
}
* @return
* @see org.apache.karaf.features.Feature#getBundles()
*/
+ @Override
public List<BundleInfo> getBundles() {
return feature.getBundles();
}
* @return
* @see org.apache.karaf.features.Feature#getConfigurations()
*/
- public Map<String, Map<String, String>> getConfigurations() {
+ @Override
+ public List<ConfigInfo> getConfigurations() {
return feature.getConfigurations();
}
* @return
* @see org.apache.karaf.features.Feature#getConfigurationFiles()
*/
+ @Override
public List<ConfigFileInfo> getConfigurationFiles() {
return feature.getConfigurationFiles();
}
* @return
* @see org.apache.karaf.features.Feature#getConditional()
*/
+ @Override
public List<? extends Conditional> getConditional() {
return feature.getConditional();
}
* @return
* @see org.apache.karaf.features.Feature#getStartLevel()
*/
+ @Override
public int getStartLevel() {
return feature.getStartLevel();
}
* @return
* @see org.apache.karaf.features.Feature#getRegion()
*/
+ @Override
public String getRegion() {
return feature.getRegion();
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-persister-file-xml-adapter</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>config-plugin-parent</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-util</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>logback-config-loader</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>logback-config</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-config-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-event-executor-config</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-threadgroup-config</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-timer-config</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>shutdown-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>shutdown-impl</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>threadpool-config-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>threadpool-config-impl</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>yang-jmx-generator-it</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>yang-jmx-generator-plugin</artifactId>
private static class SieASTVisitor extends ASTVisitor {
protected String packageName, descriptionAnotValue, sieAnnotValue,
- sieAnnotOsgiRegistrationType, type, extnds, javadoc;
+ sieAnnotOsgiRegistrationType, type, extnds, javadoc;
protected Map<String, String> methodDescriptions = Maps.newHashMap();
@Override
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
public abstract class AbstractAttribute implements AttributeIfc {
private final String attributeYangName, upperCaseCammelCase,
- lowerCaseCammelCase;
+ lowerCaseCammelCase;
protected final DataSchemaNode node;
private static String getLocalName(DataSchemaNode attrNode) {
protected Map<String, Module> namesToModules; // are module names globally
// unique?
protected Module configModule, rpcContextModule, threadsModule,
- threadsJavaModule, bgpListenerJavaModule, ietfInetTypesModule,
- jmxModule, jmxImplModule, testFilesModule, testFiles1Module;
+ threadsJavaModule, bgpListenerJavaModule, ietfInetTypesModule,
+ jmxModule, jmxImplModule, testFilesModule, testFiles1Module;
public static final String EVENTBUS_MXB_NAME = "eventbus";
public static final String ASYNC_EVENTBUS_MXB_NAME = "async-eventbus";
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>yang-test-plugin</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<parent>
<artifactId>sal-parent</artifactId>
<groupId>org.opendaylight.controller</groupId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>md-sal-config</artifactId>
<description>Configuration files for md-sal</description>
<provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
</instance>
</service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-normalized-node-serializer</type>
+ <instance>
+ <name>runtime-mapping-singleton</name>
+ <provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
+ </instance>
+ </service>
<service>
<type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-new-notification-service</type>
<instance>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>mdsal-artifacts</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<artifactId>sal-remoterpc-connector</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft-example</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<!-- OpenFlow stuff -->
<dependency>
<artifactId>model-flow-statistics</artifactId>
<version>${project.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-flow</artifactId>
- <version>${project.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<!-- RESTCONF -->
<dependency>
<artifactId>messagebus-api</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>messagebus-spi</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>messagebus-impl</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>messagebus-api</artifactId>
<parent>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal-parent</artifactId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.3.0-SNAPSHOT</version>\r
</parent>\r
\r
<artifactId>messagebus-config</artifactId>\r
and is available at http://www.eclipse.org/legal/epl-v10.html
-->
<snapshot>
- <configuration>
+ <configuration>
<data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<module>
- <name>messagebus-app</name>
+ <name>messagebus-app-impl</name>
<type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:messagebus:app:impl">binding-impl:messagebus-app-impl</type>
<binding-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:messagebus:app:impl">
<type xmlns:md-sal-binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">md-sal-binding:binding-broker-osgi-registry</type>
</namespace-to-stream>
</module>
</modules>
+ <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <service>
+ <type xmlns:mb-esr="urn:opendaylight:params:xml:ns:yang:controller:messagebus:spi:eventsourceregistry">mb-esr:event-source-registry</type>
+ <instance>
+ <name>messagebus-app-impl</name>
+ <provider>/modules/module[type='messagebus-app-impl'][name='messagebus-app-impl']</provider>
+ </instance>
+ </service>
+ </services>
</data>
</configuration>
<required-capabilities>
<capability>urn:opendaylight:params:xml:ns:yang:controller:messagebus:app:impl?module=messagebus-app-impl&revision=2015-02-03</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:messagebus:spi:eventsourceregistry?module=messagebus-event-source-registry&revision=2015-04-02</capability>
</required-capabilities>
</snapshot>
<parent>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal-parent</artifactId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.3.0-SNAPSHOT</version>\r
</parent>\r
\r
<artifactId>messagebus-impl</artifactId>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>messagebus-api</artifactId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.3.0-SNAPSHOT</version>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>messagebus-spi</artifactId>\r
+ <version>1.3.0-SNAPSHOT</version>\r
</dependency>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
-/**
+/*
* Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
*/
package org.opendaylight.controller.config.yang.messagebus.app.impl;
-import java.util.List;
+import java.util.HashSet;
+import java.util.Set;
+
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
import org.opendaylight.controller.messagebus.app.impl.EventSourceTopology;
-import org.opendaylight.controller.messagebus.app.impl.NetconfEventSourceManager;
+import org.opendaylight.controller.messagebus.eventsources.netconf.NetconfEventSourceManager;
+import org.opendaylight.controller.messagebus.spi.EventSource;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistry;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class MessageBusAppImplModule extends
- org.opendaylight.controller.config.yang.messagebus.app.impl.AbstractMessageBusAppImplModule {
+import com.google.common.base.Preconditions;
+
+public class MessageBusAppImplModule extends org.opendaylight.controller.config.yang.messagebus.app.impl.AbstractMessageBusAppImplModule {
private static final Logger LOGGER = LoggerFactory.getLogger(MessageBusAppImplModule.class);
private BundleContext bundleContext;
@Override
public java.lang.AutoCloseable createInstance() {
- final List<NamespaceToStream> namespaceMapping = getNamespaceToStream();
final ProviderContext bindingCtx = getBindingBrokerDependency().registerProvider(new Providers.BindingAware());
final ProviderSession domCtx = getDomBrokerDependency().registerProvider(new Providers.BindingIndependent());
-
final DataBroker dataBroker = bindingCtx.getSALService(DataBroker.class);
final DOMNotificationPublishService domPublish = domCtx.getService(DOMNotificationPublishService.class);
final DOMMountPointService domMount = domCtx.getService(DOMMountPointService.class);
final MountPointService bindingMount = bindingCtx.getSALService(MountPointService.class);
final RpcProviderRegistry rpcRegistry = bindingCtx.getSALService(RpcProviderRegistry.class);
- final EventSourceTopology eventSourceTopology = new EventSourceTopology(dataBroker, rpcRegistry);
- final NetconfEventSourceManager eventSourceManager = new NetconfEventSourceManager(dataBroker, domPublish,
- domMount, bindingMount, eventSourceTopology, getNamespaceToStream());
-
- final AutoCloseable closer = new AutoCloseable() {
- @Override
- public void close() {
- eventSourceTopology.close();
- eventSourceManager.close();
- }
- };
+ final EventSourceRegistryWrapper eventSourceRegistryWrapper = new EventSourceRegistryWrapper(new EventSourceTopology(dataBroker, rpcRegistry));
+ final NetconfEventSourceManager netconfEventSourceManager = NetconfEventSourceManager.create(dataBroker, domPublish,domMount, bindingMount, eventSourceRegistryWrapper, getNamespaceToStream());
+ eventSourceRegistryWrapper.addAutoCloseable(netconfEventSourceManager);
+ LOGGER.info("Messagebus initialized");
+ return eventSourceRegistryWrapper;
- return closer;
}
- private void closeProvider(final AutoCloseable closable) {
- try {
- closable.close();
- } catch (final Exception e) {
- LOGGER.error("Exception while closing: {}\n Exception: {}", closable, e);
+ //TODO: separate NetconfEventSource into separate bundle, remove this wrapper, return EventSourceTopology directly as EventSourceRegistry
+ private class EventSourceRegistryWrapper implements EventSourceRegistry{
+
+ private final EventSourceRegistry baseEventSourceRegistry;
+ private final Set<AutoCloseable> autoCloseables = new HashSet<>();
+
+ public EventSourceRegistryWrapper(EventSourceRegistry baseEventSourceRegistry) {
+ this.baseEventSourceRegistry = baseEventSourceRegistry;
}
+
+ public void addAutoCloseable(AutoCloseable ac){
+ Preconditions.checkNotNull(ac);
+ autoCloseables.add(ac);
+ }
+
+ @Override
+ public void close() throws Exception {
+ for(AutoCloseable ac : autoCloseables){
+ ac.close();
+ }
+ baseEventSourceRegistry.close();
+ }
+
+ @Override
+ public <T extends EventSource> EventSourceRegistration<T> registerEventSource(T eventSource) {
+ return this.baseEventSourceRegistry.registerEventSource(eventSource);
+ }
+
}
}
-/**
+/*
* Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.messagebus.app.impl;
+
+import org.opendaylight.controller.messagebus.spi.EventSource;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+
+import com.google.common.base.Preconditions;
+
+
+class EventSourceRegistrationImpl <T extends EventSource> extends AbstractObjectRegistration<T> implements EventSourceRegistration<T>{
+
+ private final EventSourceTopology eventSourceTopology;
+
+ /**
+ * @param instance of EventSource that has been registered by {@link EventSourceRegistryImpl#registerEventSource(Node, EventSource)}
+ */
+ public EventSourceRegistrationImpl(T instance, EventSourceTopology eventSourceTopology) {
+ super(instance);
+ this.eventSourceTopology = Preconditions.checkNotNull(eventSourceTopology);
+ }
+
+ @Override
+ protected void removeRegistration() {
+ this.eventSourceTopology.unRegister(getInstance());
+ }
+
+}
package org.opendaylight.controller.messagebus.app.impl;
-import com.google.common.base.Preconditions;
import java.util.Map;
import java.util.regex.Pattern;
+
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
public class EventSourceTopic implements DataChangeListener {
private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(EventSourceTopic.class);
private final NotificationPattern notificationPattern;
public EventSourceTopic(final NotificationPattern notificationPattern, final String nodeIdPattern, final EventSourceService eventSource) {
this.notificationPattern = Preconditions.checkNotNull(notificationPattern);
this.sourceService = eventSource;
+ this.nodeIdPattern = Pattern.compile(nodeIdPattern);
- // FIXME: regex should be the language of nodeIdPattern
- final String regex = Util.wildcardToRegex(nodeIdPattern);
- this.nodeIdPattern = Pattern.compile(regex);
-
-
- // FIXME: We need to perform some salting in order to make
- // the topic IDs less predictable.
- this.topicId = new TopicId(Util.md5String(notificationPattern + nodeIdPattern));
+ this.topicId = new TopicId(Util.getUUIDIdent());
}
public TopicId getTopicId() {
}
public void notifyNode(final InstanceIdentifier<?> nodeId) {
+
try {
- sourceService.joinTopic(getJoinTopicInputArgument(nodeId));
+ RpcResult<JoinTopicOutput> rpcResultJoinTopic = sourceService.joinTopic(getJoinTopicInputArgument(nodeId)).get();
+ if(rpcResultJoinTopic.isSuccessful() == false){
+ for(RpcError err : rpcResultJoinTopic.getErrors()){
+ LOG.error("Can not join topic: [{}] on node: [{}]. Error: {}",getTopicId().getValue(),nodeId.toString(),err.toString());
+ }
+ }
} catch (final Exception e) {
LOG.error("Could not invoke join topic for node {}", nodeId);
}
return jti;
}
-
}
package org.opendaylight.controller.messagebus.app.impl;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.Futures;
-
-import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+import java.util.regex.Pattern;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.messagebus.spi.EventSource;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistry;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.regex.Pattern;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+
-public class EventSourceTopology implements EventAggregatorService, AutoCloseable {
+public class EventSourceTopology implements EventAggregatorService, EventSourceRegistry {
private static final Logger LOG = LoggerFactory.getLogger(EventSourceTopology.class);
private static final String TOPOLOGY_ID = "EVENT-SOURCE-TOPOLOGY" ;
.child(TopologyTypes.class)
.augmentation(TopologyTypes1.class);
- private final Map<DataChangeListener, ListenerRegistration<DataChangeListener>> registrations =
+ private final Map<EventSourceTopic, ListenerRegistration<DataChangeListener>> topicListenerRegistrations =
+ new ConcurrentHashMap<>();
+ private final Map<NodeKey, RoutedRpcRegistration<EventSourceService>> routedRpcRegistrations =
new ConcurrentHashMap<>();
private final DataBroker dataBroker;
private final RpcRegistration<EventAggregatorService> aggregatorRpcReg;
private final EventSourceService eventSourceService;
private final RpcProviderRegistry rpcRegistry;
- private final ExecutorService executorService;
public EventSourceTopology(final DataBroker dataBroker, final RpcProviderRegistry rpcRegistry) {
+
this.dataBroker = dataBroker;
- this.executorService = Executors.newCachedThreadPool();
this.rpcRegistry = rpcRegistry;
aggregatorRpcReg = rpcRegistry.addRpcImplementation(EventAggregatorService.class, this);
eventSourceService = rpcRegistry.getRpcService(EventSourceService.class);
final TopologyEventSource topologySource = new TopologyEventSourceBuilder().build();
final TopologyTypes1 topologyTypeAugment = new TopologyTypes1Builder().setTopologyEventSource(topologySource).build();
putData(OPERATIONAL, TOPOLOGY_TYPE_PATH, topologyTypeAugment);
+ LOG.info("EventSourceRegistry has been initialized");
}
private <T extends DataObject> void putData(final LogicalDatastoreType store,
- final InstanceIdentifier<T> path, final T data) {
+ final InstanceIdentifier<T> path,
+ final T data){
final WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
tx.put(store, path, data, true);
tx.submit();
+
}
- private void insert(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath, final Node node) {
- final NodeKey nodeKey = node.getKey();
+ private <T extends DataObject> void deleteData(final LogicalDatastoreType store, final InstanceIdentifier<T> path){
+ final WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
+ tx.delete(OPERATIONAL, path);
+ tx.submit();
+ }
+
+ private void insert(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath) {
+ final NodeKey nodeKey = sourcePath.getKey();
final InstanceIdentifier<Node1> augmentPath = sourcePath.augmentation(Node1.class);
final Node1 nodeAgument = new Node1Builder().setEventSourceNode(new NodeId(nodeKey.getNodeId().getValue())).build();
putData(OPERATIONAL, augmentPath, nodeAgument);
}
+ private void remove(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath){
+ final InstanceIdentifier<Node1> augmentPath = sourcePath.augmentation(Node1.class);
+ deleteData(OPERATIONAL, augmentPath);
+ }
+
private void notifyExistingNodes(final Pattern nodeIdPatternRegex, final EventSourceTopic eventSourceTopic){
- executorService.execute(new NotifyAllNodeExecutor(dataBroker, nodeIdPatternRegex, eventSourceTopic));
+
+ final ReadOnlyTransaction tx = dataBroker.newReadOnlyTransaction();
+
+ final CheckedFuture<Optional<Topology>, ReadFailedException> future = tx.read(OPERATIONAL, EVENT_SOURCE_TOPOLOGY_PATH);
+
+ Futures.addCallback(future, new FutureCallback<Optional<Topology>>(){
+
+ @Override
+ public void onSuccess(Optional<Topology> data) {
+ if(data.isPresent()) {
+ final List<Node> nodes = data.get().getNode();
+ for (final Node node : nodes) {
+ if (nodeIdPatternRegex.matcher(node.getNodeId().getValue()).matches()) {
+ eventSourceTopic.notifyNode(EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, node.getKey()));
+ }
+ }
+ }
+ tx.close();
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Can not notify existing nodes {}", t);
+ tx.close();
+ }
+
+ });
+
}
@Override
final NotificationPattern notificationPattern = new NotificationPattern(input.getNotificationPattern());
final String nodeIdPattern = input.getNodeIdPattern().getValue();
- final Pattern nodeIdPatternRegex = Pattern.compile(Util.wildcardToRegex(nodeIdPattern));
- final EventSourceTopic eventSourceTopic = new EventSourceTopic(notificationPattern, input.getNodeIdPattern().getValue(), eventSourceService);
+ final Pattern nodeIdPatternRegex = Pattern.compile(nodeIdPattern);
+ final EventSourceTopic eventSourceTopic = new EventSourceTopic(notificationPattern, nodeIdPattern, eventSourceService);
registerTopic(eventSourceTopic);
.setTopicId(eventSourceTopic.getTopicId())
.build();
- return Util.resultFor(cto);
+ return Util.resultRpcSuccessFor(cto);
}
@Override
@Override
public void close() {
aggregatorRpcReg.close();
+ for(ListenerRegistration<DataChangeListener> reg : topicListenerRegistrations.values()){
+ reg.close();
+ }
}
- public void registerTopic(final EventSourceTopic listener) {
+ private void registerTopic(final EventSourceTopic listener) {
final ListenerRegistration<DataChangeListener> listenerRegistration = dataBroker.registerDataChangeListener(OPERATIONAL,
EVENT_SOURCE_TOPOLOGY_PATH,
listener,
DataBroker.DataChangeScope.SUBTREE);
- registrations.put(listener, listenerRegistration);
- }
-
- public void register(final Node node, final NetconfEventSource netconfEventSource) {
- final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, node.getKey());
- rpcRegistry.addRoutedRpcImplementation(EventSourceService.class, netconfEventSource)
- .registerPath(NodeContext.class, sourcePath);
- insert(sourcePath,node);
- // FIXME: Return registration object.
+ topicListenerRegistrations.put(listener, listenerRegistration);
}
- private class NotifyAllNodeExecutor implements Runnable {
+ public void register(final EventSource eventSource){
+ NodeKey nodeKey = eventSource.getSourceNodeKey();
+ final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey);
+ RoutedRpcRegistration<EventSourceService> reg = rpcRegistry.addRoutedRpcImplementation(EventSourceService.class, eventSource);
+ reg.registerPath(NodeContext.class, sourcePath);
+ routedRpcRegistrations.put(nodeKey,reg);
+ insert(sourcePath);
- private final EventSourceTopic topic;
- private final DataBroker dataBroker;
- private final Pattern nodeIdPatternRegex;
-
- public NotifyAllNodeExecutor(final DataBroker dataBroker, final Pattern nodeIdPatternRegex, final EventSourceTopic topic) {
- this.topic = topic;
- this.dataBroker = dataBroker;
- this.nodeIdPatternRegex = nodeIdPatternRegex;
+ for(EventSourceTopic est : topicListenerRegistrations.keySet()){
+ est.notifyNode(EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey));
}
+ }
- @Override
- public void run() {
- //# Code reader note: Context of Node type is NetworkTopology
- final List<Node> nodes = snapshot();
- for (final Node node : nodes) {
- if (nodeIdPatternRegex.matcher(node.getNodeId().getValue()).matches()) {
- topic.notifyNode(EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, node.getKey()));
- }
- }
+ public void unRegister(final EventSource eventSource){
+ final NodeKey nodeKey = eventSource.getSourceNodeKey();
+ final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey);
+ final RoutedRpcRegistration<EventSourceService> removeRegistration = routedRpcRegistrations.remove(nodeKey);
+ if(removeRegistration != null){
+ removeRegistration.close();
+ remove(sourcePath);
}
+ }
- private List<Node> snapshot() {
- try (ReadOnlyTransaction tx = dataBroker.newReadOnlyTransaction();) {
-
- final Optional<Topology> data = tx.read(OPERATIONAL, EVENT_SOURCE_TOPOLOGY_PATH).checkedGet();
-
- if(data.isPresent()) {
- final List<Node> nodeList = data.get().getNode();
- if(nodeList != null) {
- return nodeList;
- }
- }
- return Collections.emptyList();
- } catch (final ReadFailedException e) {
- LOG.error("Unable to retrieve node list.", e);
- return Collections.emptyList();
- }
- }
+ @Override
+ public <T extends EventSource> EventSourceRegistration<T> registerEventSource(
+ T eventSource) {
+ EventSourceRegistrationImpl<T> esr = new EventSourceRegistrationImpl<>(eventSource, this);
+ register(eventSource);
+ return esr;
}
}
+
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.messagebus.app.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.Future;
-import java.util.regex.Pattern;
-
-import javax.xml.stream.XMLStreamException;
-import javax.xml.transform.dom.DOMResult;
-import javax.xml.transform.dom.DOMSource;
-
-import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.NotificationsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.inventory.rev140108.NetconfNode;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Throwables;
-
-public class NetconfEventSource implements EventSourceService, DOMNotificationListener, DataChangeListener {
-
- private static final Logger LOG = LoggerFactory.getLogger(NetconfEventSource.class);
-
- private static final NodeIdentifier TOPIC_NOTIFICATION_ARG = new NodeIdentifier(TopicNotification.QNAME);
- private static final NodeIdentifier EVENT_SOURCE_ARG = new NodeIdentifier(QName.create(TopicNotification.QNAME, "node-id"));
- private static final NodeIdentifier PAYLOAD_ARG = new NodeIdentifier(QName.create(TopicNotification.QNAME, "payload"));
-
- private static final NodeIdentifier STREAM_QNAME = new NodeIdentifier(QName.create(CreateSubscriptionInput.QNAME,"stream"));
- private static final SchemaPath CREATE_SUBSCRIPTION = SchemaPath.create(true, QName.create(CreateSubscriptionInput.QNAME, "create-subscription"));
-
-
- private final String nodeId;
-
-
- private final DOMMountPoint netconfMount;
- private final DOMNotificationPublishService domPublish;
- private final NotificationsService notificationRpcService;
-
- private final Set<String> activeStreams = new ConcurrentSkipListSet<>();
-
- private final Map<String, String> urnPrefixToStreamMap;
-
-
- public NetconfEventSource(final String nodeId, final Map<String, String> streamMap, final DOMMountPoint netconfMount, final DOMNotificationPublishService publishService, final MountPoint bindingMount) {
- this.netconfMount = netconfMount;
- this.notificationRpcService = bindingMount.getService(RpcConsumerRegistry.class).get().getRpcService(NotificationsService.class);
- this.nodeId = nodeId;
- this.urnPrefixToStreamMap = streamMap;
- this.domPublish = publishService;
- LOG.info("NetconfEventSource [{}] created.", nodeId);
- }
-
- @Override
- public Future<RpcResult<JoinTopicOutput>> joinTopic(final JoinTopicInput input) {
- final NotificationPattern notificationPattern = input.getNotificationPattern();
-
- // FIXME: default language should already be regex
- final String regex = Util.wildcardToRegex(notificationPattern.getValue());
-
- final Pattern pattern = Pattern.compile(regex);
- final List<SchemaPath> matchingNotifications = Util.expandQname(availableNotifications(), pattern);
- registerNotificationListener(matchingNotifications);
- final JoinTopicOutput output = new JoinTopicOutputBuilder().build();
- return com.google.common.util.concurrent.Futures.immediateFuture(RpcResultBuilder.success(output).build());
- }
-
- private List<SchemaPath> availableNotifications() {
- // FIXME: use SchemaContextListener to get changes asynchronously
- final Set<NotificationDefinition> availableNotifications = netconfMount.getSchemaContext().getNotifications();
- final List<SchemaPath> qNs = new ArrayList<>(availableNotifications.size());
- for (final NotificationDefinition nd : availableNotifications) {
- qNs.add(nd.getPath());
- }
- return qNs;
- }
-
- private void registerNotificationListener(final List<SchemaPath> notificationsToSubscribe) {
-
- final Optional<DOMNotificationService> notifyService = netconfMount.getService(DOMNotificationService.class);
- if(notifyService.isPresent()) {
- for (final SchemaPath qName : notificationsToSubscribe) {
- startSubscription(qName);
- }
- // FIXME: Capture registration
- notifyService.get().registerNotificationListener(this, notificationsToSubscribe);
- }
- }
-
- private void startSubscription(final SchemaPath path) {
- final String streamName = resolveStream(path.getLastComponent());
-
- if (streamIsActive(streamName) == false) {
- LOG.info("Stream {} is not active on node {}. Will subscribe.", streamName, nodeId);
- startSubscription(streamName);
- }
- }
-
- private void resubscribeToActiveStreams() {
- for (final String streamName : activeStreams) {
- startSubscription(streamName);
- }
- }
-
- private synchronized void startSubscription(final String streamName) {
- final ContainerNode input = Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(CreateSubscriptionInput.QNAME))
- .withChild(ImmutableNodes.leafNode(STREAM_QNAME, streamName))
- .build();
- netconfMount.getService(DOMRpcService.class).get().invokeRpc(CREATE_SUBSCRIPTION, input);
- activeStreams.add(streamName);
- }
-
- private String resolveStream(final QName qName) {
- String streamName = null;
-
- for (final Map.Entry<String, String> entry : urnPrefixToStreamMap.entrySet()) {
- final String nameSpace = qName.getNamespace().toString();
- final String urnPrefix = entry.getKey();
- if( nameSpace.startsWith(urnPrefix) ) {
- streamName = entry.getValue();
- break;
- }
- }
-
- return streamName;
- }
-
- private boolean streamIsActive(final String streamName) {
- return activeStreams.contains(streamName);
- }
-
- @Override
- public void onNotification(final DOMNotification notification) {
- final ContainerNode topicNotification = Builders.containerBuilder()
- .withNodeIdentifier(TOPIC_NOTIFICATION_ARG)
- .withChild(ImmutableNodes.leafNode(EVENT_SOURCE_ARG, nodeId))
- .withChild(encapsulate(notification))
- .build();
- try {
- domPublish.putNotification(new TopicDOMNotification(topicNotification));
- } catch (final InterruptedException e) {
- throw Throwables.propagate(e);
- }
- }
-
- private AnyXmlNode encapsulate(final DOMNotification body) {
- // FIXME: Introduce something like AnyXmlWithNormalizedNodeData in Yangtools
- final Document doc = XmlUtil.newDocument();
- final Optional<String> namespace = Optional.of(PAYLOAD_ARG.getNodeType().getNamespace().toString());
- final Element element = XmlUtil.createElement(doc , "payload", namespace);
-
-
- final DOMResult result = new DOMResult(element);
-
- final SchemaContext context = netconfMount.getSchemaContext();
- final SchemaPath schemaPath = body.getType();
- try {
- NetconfMessageTransformUtil.writeNormalizedNode(body.getBody(), result, schemaPath, context);
- return Builders.anyXmlBuilder().withNodeIdentifier(PAYLOAD_ARG)
- .withValue(new DOMSource(element))
- .build();
- } catch (IOException | XMLStreamException e) {
- LOG.error("Unable to encapsulate notification.",e);
- throw Throwables.propagate(e);
- }
- }
-
- @Override
- public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- boolean wasConnected = false;
- boolean nowConnected = false;
-
- for (final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : change.getOriginalData().entrySet()) {
- if ( isNetconfNode(changeEntry) ) {
- final NetconfNode nn = (NetconfNode)changeEntry.getValue();
- wasConnected = nn.isConnected();
- }
- }
-
- for (final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : change.getUpdatedData().entrySet()) {
- if ( isNetconfNode(changeEntry) ) {
- final NetconfNode nn = (NetconfNode)changeEntry.getValue();
- nowConnected = nn.isConnected();
- }
- }
-
- if (wasConnected == false && nowConnected == true) {
- resubscribeToActiveStreams();
- }
- }
-
- private static boolean isNetconfNode(final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry ) {
- return NetconfNode.class.equals(changeEntry.getKey().getTargetType());
- }
-
-}
package org.opendaylight.controller.messagebus.app.impl;
-import java.math.BigInteger;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
+import java.util.UUID;
import java.util.concurrent.Future;
import java.util.regex.Pattern;
import com.google.common.util.concurrent.Futures;
-public final class Util {
- private static final MessageDigest messageDigestTemplate = getDigestInstance();
-
- private static MessageDigest getDigestInstance() {
- try {
- return MessageDigest.getInstance("MD5");
- } catch (final NoSuchAlgorithmException e) {
- throw new RuntimeException("Unable to get MD5 instance");
- }
- }
- static String md5String(final String inputString) {
+public final class Util {
- try {
- final MessageDigest md = (MessageDigest)messageDigestTemplate.clone();
- md.update(inputString.getBytes("UTF-8"), 0, inputString.length());
- return new BigInteger(1, md.digest()).toString(16);
- } catch (final Exception e) {
- throw new RuntimeException("Unable to get MD5 instance");
- }
+ public static String getUUIDIdent(){
+ UUID uuid = UUID.randomUUID();
+ return uuid.toString();
}
- public static <T> Future<RpcResult<T>> resultFor(final T output) {
+ public static <T> Future<RpcResult<T>> resultRpcSuccessFor(final T output) {
final RpcResult<T> result = RpcResultBuilder.success(output).build();
return Futures.immediateFuture(result);
}
* @param wildcard
* @return
*/
- static String wildcardToRegex(final String wildcard){
+ public static String wildcardToRegex(final String wildcard){
final StringBuffer s = new StringBuffer(wildcard.length());
s.append('^');
for (final char c : wildcard.toCharArray()) {
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.eventsources.netconf;
+
+import static com.google.common.util.concurrent.Futures.immediateFuture;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+
+import javax.xml.stream.XMLStreamException;
+import javax.xml.transform.dom.DOMResult;
+import javax.xml.transform.dom.DOMSource;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.messagebus.app.impl.TopicDOMNotification;
+import org.opendaylight.controller.messagebus.app.impl.Util;
+import org.opendaylight.controller.messagebus.spi.EventSource;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicStatus;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.inventory.rev140108.NetconfNode;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Throwables;
+
+public class NetconfEventSource implements EventSource, DOMNotificationListener, DataChangeListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfEventSource.class);
+
+ private static final NodeIdentifier TOPIC_NOTIFICATION_ARG = new NodeIdentifier(TopicNotification.QNAME);
+ private static final NodeIdentifier EVENT_SOURCE_ARG = new NodeIdentifier(QName.create(TopicNotification.QNAME, "node-id"));
+ private static final NodeIdentifier TOPIC_ID_ARG = new NodeIdentifier(QName.create(TopicNotification.QNAME, "topic-id"));
+ private static final NodeIdentifier PAYLOAD_ARG = new NodeIdentifier(QName.create(TopicNotification.QNAME, "payload"));
+
+ private static final NodeIdentifier STREAM_QNAME = new NodeIdentifier(QName.create(CreateSubscriptionInput.QNAME,"stream"));
+ private static final SchemaPath CREATE_SUBSCRIPTION = SchemaPath.create(true, QName.create(CreateSubscriptionInput.QNAME, "create-subscription"));
+
+ private final String nodeId;
+ private final Node node;
+
+ private final DOMMountPoint netconfMount;
+ private final DOMNotificationPublishService domPublish;
+
+ private final Map<String, String> urnPrefixToStreamMap;
+
+ private final ConcurrentHashMap<String, StreamNotificationTopicRegistration> streamNotifRegistrationMap = new ConcurrentHashMap<>();
+
+ public NetconfEventSource(final Node node, final Map<String, String> streamMap, final DOMMountPoint netconfMount, final DOMNotificationPublishService publishService) {
+ this.netconfMount = netconfMount;
+ this.node = node;
+ this.nodeId = node.getNodeId().getValue();
+ this.urnPrefixToStreamMap = streamMap;
+ this.domPublish = publishService;
+ this.initializeStreamNotifRegistrationMap();
+ LOG.info("NetconfEventSource [{}] created.", nodeId);
+ }
+
+ private void initializeStreamNotifRegistrationMap(){
+ for(String streamName : this.urnPrefixToStreamMap.values()){
+ streamNotifRegistrationMap.put(streamName, new StreamNotificationTopicRegistration(streamName, this.nodeId, this.netconfMount, this));
+ }
+ }
+
+ @Override
+ public Future<RpcResult<JoinTopicOutput>> joinTopic(final JoinTopicInput input) {
+
+ final NotificationPattern notificationPattern = input.getNotificationPattern();
+ final List<SchemaPath> matchingNotifications = getMatchingNotifications(notificationPattern);
+ return registerTopic(input.getTopicId(),matchingNotifications);
+
+ }
+
+ private synchronized Future<RpcResult<JoinTopicOutput>> registerTopic(final TopicId topicId, final List<SchemaPath> notificationsToSubscribe){
+
+ JoinTopicStatus joinTopicStatus = JoinTopicStatus.Down;
+ if(notificationsToSubscribe != null && notificationsToSubscribe.isEmpty() == false){
+ final Optional<DOMNotificationService> notifyService = netconfMount.getService(DOMNotificationService.class);
+ if(notifyService.isPresent()){
+ int subscribedStreams = 0;
+ for(SchemaPath schemaNotification : notificationsToSubscribe){
+ final Optional<String> streamName = resolveStream(schemaNotification.getLastComponent());
+ if(streamName.isPresent()){
+ LOG.info("Stream {} is activating, TopicId {}", streamName.get(), topicId.getValue() );
+ StreamNotificationTopicRegistration streamReg = streamNotifRegistrationMap.get(streamName.get());
+ streamReg.activateStream();
+ for(SchemaPath notificationPath : notificationsToSubscribe){
+ LOG.info("Notification listener is registering, Notification {}, TopicId {}", notificationPath, topicId.getValue() );
+ streamReg.registerNotificationListenerTopic(notificationPath, topicId);
+ }
+ subscribedStreams = subscribedStreams + 1;
+ }
+ }
+ if(subscribedStreams > 0){
+ joinTopicStatus = JoinTopicStatus.Up;
+ }
+ }
+ }
+
+ final JoinTopicOutput output = new JoinTopicOutputBuilder().setStatus(joinTopicStatus).build();
+ return immediateFuture(RpcResultBuilder.success(output).build());
+
+ }
+
+ private void resubscribeToActiveStreams() {
+ for (StreamNotificationTopicRegistration streamReg : streamNotifRegistrationMap.values()){
+ streamReg.reActivateStream();
+ }
+ }
+
+ private Optional<String> resolveStream(final QName qName) {
+ String streamName = null;
+
+ for (final Map.Entry<String, String> entry : urnPrefixToStreamMap.entrySet()) {
+ final String nameSpace = qName.getNamespace().toString();
+ final String urnPrefix = entry.getKey();
+ if( nameSpace.startsWith(urnPrefix) ) {
+ streamName = entry.getValue();
+ break;
+ }
+ }
+ return Optional.fromNullable(streamName);
+ }
+
+ @Override
+ public void onNotification(final DOMNotification notification) {
+ SchemaPath notificationPath = notification.getType();
+ LOG.info("Notification {} has come.",notification.getType());
+ for(StreamNotificationTopicRegistration streamReg : streamNotifRegistrationMap.values()){
+ for(TopicId topicId : streamReg.getNotificationTopicIds(notificationPath)){
+ publishNotification(notification, topicId);
+ LOG.info("Notification {} has been published for TopicId {}",notification.getType(), topicId.getValue());
+ }
+ }
+ }
+
+ private void publishNotification(final DOMNotification notification, TopicId topicId){
+ final ContainerNode topicNotification = Builders.containerBuilder()
+ .withNodeIdentifier(TOPIC_NOTIFICATION_ARG)
+ .withChild(ImmutableNodes.leafNode(TOPIC_ID_ARG, topicId))
+ .withChild(ImmutableNodes.leafNode(EVENT_SOURCE_ARG, nodeId))
+ .withChild(encapsulate(notification))
+ .build();
+ try {
+ domPublish.putNotification(new TopicDOMNotification(topicNotification));
+ } catch (final InterruptedException e) {
+ throw Throwables.propagate(e);
+ }
+ }
+
+ private AnyXmlNode encapsulate(final DOMNotification body) {
+ // FIXME: Introduce something like AnyXmlWithNormalizedNodeData in Yangtools
+ final Document doc = XmlUtil.newDocument();
+ final Optional<String> namespace = Optional.of(PAYLOAD_ARG.getNodeType().getNamespace().toString());
+ final Element element = XmlUtil.createElement(doc , "payload", namespace);
+
+ final DOMResult result = new DOMResult(element);
+
+ final SchemaContext context = netconfMount.getSchemaContext();
+ final SchemaPath schemaPath = body.getType();
+ try {
+ NetconfMessageTransformUtil.writeNormalizedNode(body.getBody(), result, schemaPath, context);
+ return Builders.anyXmlBuilder().withNodeIdentifier(PAYLOAD_ARG)
+ .withValue(new DOMSource(element))
+ .build();
+ } catch (IOException | XMLStreamException e) {
+ LOG.error("Unable to encapsulate notification.",e);
+ throw Throwables.propagate(e);
+ }
+ }
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ boolean wasConnected = false;
+ boolean nowConnected = false;
+
+ for (final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : change.getOriginalData().entrySet()) {
+ if ( isNetconfNode(changeEntry) ) {
+ final NetconfNode nn = (NetconfNode)changeEntry.getValue();
+ wasConnected = nn.isConnected();
+ }
+ }
+
+ for (final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : change.getUpdatedData().entrySet()) {
+ if ( isNetconfNode(changeEntry) ) {
+ final NetconfNode nn = (NetconfNode)changeEntry.getValue();
+ nowConnected = nn.isConnected();
+ }
+ }
+
+ if (wasConnected == false && nowConnected == true) {
+ resubscribeToActiveStreams();
+ }
+ }
+
+ private static boolean isNetconfNode(final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry ) {
+ return NetconfNode.class.equals(changeEntry.getKey().getTargetType());
+ }
+
+ private List<SchemaPath> getMatchingNotifications(NotificationPattern notificationPattern){
+ // FIXME: default language should already be regex
+ final String regex = Util.wildcardToRegex(notificationPattern.getValue());
+
+ final Pattern pattern = Pattern.compile(regex);
+ List<SchemaPath> availableNotifications = getAvailableNotifications();
+ if(availableNotifications == null || availableNotifications.isEmpty()){
+ return null;
+ }
+ return Util.expandQname(availableNotifications, pattern);
+ }
+
+ @Override
+ public void close() throws Exception {
+ for(StreamNotificationTopicRegistration streamReg : streamNotifRegistrationMap.values()){
+ streamReg.deactivateStream();
+ }
+ }
+
+ @Override
+ public NodeKey getSourceNodeKey(){
+ return node.getKey();
+ }
+
+ @Override
+ public List<SchemaPath> getAvailableNotifications() {
+ // FIXME: use SchemaContextListener to get changes asynchronously
+ final Set<NotificationDefinition> availableNotifications = netconfMount.getSchemaContext().getNotifications();
+ final List<SchemaPath> qNs = new ArrayList<>(availableNotifications.size());
+ for (final NotificationDefinition nd : availableNotifications) {
+ qNs.add(nd.getPath());
+ }
+ return qNs;
+ }
+
+ private class StreamNotificationTopicRegistration{
+
+ final private String streamName;
+ final private DOMMountPoint netconfMount;
+ final private String nodeId;
+ final private NetconfEventSource notificationListener;
+ private boolean active;
+
+ private ConcurrentHashMap<SchemaPath, ListenerRegistration<NetconfEventSource>> notificationRegistrationMap = new ConcurrentHashMap<>();
+ private ConcurrentHashMap<SchemaPath, ArrayList<TopicId>> notificationTopicMap = new ConcurrentHashMap<>();
+
+ public StreamNotificationTopicRegistration(final String streamName, final String nodeId, final DOMMountPoint netconfMount, NetconfEventSource notificationListener) {
+ this.streamName = streamName;
+ this.netconfMount = netconfMount;
+ this.nodeId = nodeId;
+ this.notificationListener = notificationListener;
+ this.active = false;
+ }
+
+ public boolean isActive() {
+ return active;
+ }
+
+ public void reActivateStream(){
+ if(this.isActive()){
+ LOG.info("Stream {} is reactivated active on node {}.", this.streamName, this.nodeId);
+ final ContainerNode input = Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(CreateSubscriptionInput.QNAME))
+ .withChild(ImmutableNodes.leafNode(STREAM_QNAME, this.streamName))
+ .build();
+ netconfMount.getService(DOMRpcService.class).get().invokeRpc(CREATE_SUBSCRIPTION, input);
+ }
+ }
+
+ public void activateStream() {
+ if(this.isActive() == false){
+ LOG.info("Stream {} is not active on node {}. Will subscribe.", this.streamName, this.nodeId);
+ final ContainerNode input = Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(CreateSubscriptionInput.QNAME))
+ .withChild(ImmutableNodes.leafNode(STREAM_QNAME, this.streamName))
+ .build();
+ netconfMount.getService(DOMRpcService.class).get().invokeRpc(CREATE_SUBSCRIPTION, input);
+ this.active = true;
+ } else {
+ LOG.info("Stream {} is now active on node {}", this.streamName, this.nodeId);
+ }
+ }
+
+ public void deactivateStream() {
+ for(ListenerRegistration<NetconfEventSource> reg : notificationRegistrationMap.values()){
+ reg.close();
+ }
+ this.active = false;
+ }
+
+ public String getStreamName() {
+ return streamName;
+ }
+
+ public ArrayList<TopicId> getNotificationTopicIds(SchemaPath notificationPath){
+ return notificationTopicMap.get(notificationPath);
+ }
+
+ public void registerNotificationListenerTopic(SchemaPath notificationPath, TopicId topicId){
+ final Optional<DOMNotificationService> notifyService = netconfMount.getService(DOMNotificationService.class);
+ if(notificationPath != null && notifyService.isPresent()){
+ ListenerRegistration<NetconfEventSource> registration = notifyService.get().registerNotificationListener(this.notificationListener,notificationPath);
+ notificationRegistrationMap.put(notificationPath, registration);
+ ArrayList<TopicId> topicIds = getNotificationTopicIds(notificationPath);
+ if(topicIds == null){
+ topicIds = new ArrayList<>();
+ topicIds.add(topicId);
+ } else {
+ if(topicIds.contains(topicId) == false){
+ topicIds.add(topicId);
+ }
+ }
+ notificationTopicMap.put(notificationPath, topicIds);
+ }
+ }
+
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.messagebus.app.impl;
+package org.opendaylight.controller.messagebus.eventsources.netconf;
-import com.google.common.base.Optional;
import java.util.HashMap;
import java.util.List;
import org.opendaylight.controller.config.yang.messagebus.app.impl.NamespaceToStream;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
import org.opendaylight.controller.md.sal.binding.api.MountPointService;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeFields.ConnectionStatus;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.network.topology.topology.topology.types.TopologyNetconf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
public final class NetconfEventSourceManager implements DataChangeListener, AutoCloseable {
- private static final Logger LOGGER = LoggerFactory.getLogger(NetconfEventSourceManager.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfEventSourceManager.class);
private static final TopologyKey NETCONF_TOPOLOGY_KEY = new TopologyKey(new TopologyId(TopologyNetconf.QNAME.getLocalName()));
private static final InstanceIdentifier<Node> NETCONF_DEVICE_PATH = InstanceIdentifier.create(NetworkTopology.class)
.child(Topology.class, NETCONF_TOPOLOGY_KEY)
.build();
private static final QName NODE_ID_QNAME = QName.create(Node.QNAME,"node-id");
-
- private final EventSourceTopology eventSourceTopology;
private final Map<String, String> streamMap;
-
- private final ConcurrentHashMap<InstanceIdentifier<?>, NetconfEventSource> netconfSources = new ConcurrentHashMap<>();
- private final ListenerRegistration<DataChangeListener> listenerReg;
+ private final ConcurrentHashMap<InstanceIdentifier<?>, EventSourceRegistration<NetconfEventSource>> eventSourceRegistration = new ConcurrentHashMap<>();
private final DOMNotificationPublishService publishService;
private final DOMMountPointService domMounts;
private final MountPointService bindingMounts;
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+ private final EventSourceRegistry eventSourceRegistry;
+
+ public static NetconfEventSourceManager create(final DataBroker dataBroker,
+ final DOMNotificationPublishService domPublish,
+ final DOMMountPointService domMount,
+ final MountPointService bindingMount,
+ final EventSourceRegistry eventSourceRegistry,
+ final List<NamespaceToStream> namespaceMapping){
+
+ final NetconfEventSourceManager eventSourceManager =
+ new NetconfEventSourceManager(domPublish, domMount, bindingMount, eventSourceRegistry, namespaceMapping);
- public NetconfEventSourceManager(final DataBroker dataStore,
- final DOMNotificationPublishService domPublish,
+ eventSourceManager.initialize(dataBroker);
+
+ return eventSourceManager;
+
+ }
+
+ private NetconfEventSourceManager(final DOMNotificationPublishService domPublish,
final DOMMountPointService domMount,
final MountPointService bindingMount,
- final EventSourceTopology eventSourceTopology,
+ final EventSourceRegistry eventSourceRegistry,
final List<NamespaceToStream> namespaceMapping) {
- listenerReg = dataStore.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL, NETCONF_DEVICE_PATH, this, DataChangeScope.SUBTREE);
- this.eventSourceTopology = eventSourceTopology;
+ Preconditions.checkNotNull(domPublish);
+ Preconditions.checkNotNull(domMount);
+ Preconditions.checkNotNull(bindingMount);
+ Preconditions.checkNotNull(eventSourceRegistry);
+ Preconditions.checkNotNull(namespaceMapping);
this.streamMap = namespaceToStreamMapping(namespaceMapping);
this.domMounts = domMount;
this.bindingMounts = bindingMount;
this.publishService = domPublish;
- LOGGER.info("EventSourceManager initialized.");
+ this.eventSourceRegistry = eventSourceRegistry;
+ }
+
+ private void initialize(final DataBroker dataBroker){
+ Preconditions.checkNotNull(dataBroker);
+ listenerRegistration = dataBroker.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL, NETCONF_DEVICE_PATH, this, DataChangeScope.SUBTREE);
+ LOG.info("NetconfEventSourceManager initialized.");
}
private Map<String,String> namespaceToStreamMapping(final List<NamespaceToStream> namespaceMapping) {
@Override
public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> event) {
- //FIXME: Prevent creating new event source on subsequent changes in inventory, like disconnect.
- LOGGER.debug("[DataChangeEvent<InstanceIdentifier<?>, DataObject>: {}]", event);
+
+ LOG.debug("[DataChangeEvent<InstanceIdentifier<?>, DataObject>: {}]", event);
for (final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : event.getCreatedData().entrySet()) {
if (changeEntry.getValue() instanceof Node) {
nodeUpdated(changeEntry.getKey(),(Node) changeEntry.getValue());
}
}
-
for (final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : event.getUpdatedData().entrySet()) {
if (changeEntry.getValue() instanceof Node) {
nodeUpdated(changeEntry.getKey(),(Node) changeEntry.getValue());
}
}
-
}
private void nodeUpdated(final InstanceIdentifier<?> key, final Node node) {
// we listen on node tree, therefore we should rather throw IllegalStateException when node is null
if ( node == null ) {
- LOGGER.debug("OnDataChanged Event. Node is null.");
- return;
+ throw new IllegalStateException("Node is null");
}
if ( isNetconfNode(node) == false ) {
- LOGGER.debug("OnDataChanged Event. Not a Netconf node.");
+ LOG.debug("OnDataChanged Event. Not a Netconf node.");
return;
}
if ( isEventSource(node) == false ) {
- LOGGER.debug("OnDataChanged Event. Node an EventSource node.");
+ LOG.debug("OnDataChanged Event. Node an EventSource node.");
return;
}
if(node.getAugmentation(NetconfNode.class).getConnectionStatus() != ConnectionStatus.Connected ) {
return;
}
- if(!netconfSources.containsKey(key)) {
+ if(!eventSourceRegistration.containsKey(key)) {
createEventSource(key,node);
}
}
private void createEventSource(final InstanceIdentifier<?> key, final Node node) {
final Optional<DOMMountPoint> netconfMount = domMounts.getMountPoint(domMountPath(node.getNodeId()));
- final Optional<MountPoint> bindingMount = bindingMounts.getMountPoint(key);
- if(netconfMount.isPresent() && bindingMount.isPresent()) {
- final String nodeId = node.getNodeId().getValue();
- final NetconfEventSource netconfEventSource = new NetconfEventSource(nodeId, streamMap, netconfMount.get(), publishService, bindingMount.get());
- eventSourceTopology.register(node,netconfEventSource);
- netconfSources.putIfAbsent(key, netconfEventSource);
+ if(netconfMount.isPresent()) {
+ final NetconfEventSource netconfEventSource =
+ new NetconfEventSource(node, streamMap, netconfMount.get(), publishService);
+ final EventSourceRegistration<NetconfEventSource> registration = eventSourceRegistry.registerEventSource(netconfEventSource);
+ LOG.info("Event source {} has been registered",node.getNodeId().getValue());
+ eventSourceRegistration.putIfAbsent(key, registration);
+
}
}
return node.getAugmentation(NetconfNode.class) != null ;
}
- public boolean isEventSource(final Node node) {
- final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
+ private boolean isEventSource(final Node node) {
+ final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
return isEventSource(netconfNode);
+
}
private boolean isEventSource(final NetconfNode node) {
+ if (node.getAvailableCapabilities() == null) {
+ return false;
+ }
+ final List<String> capabilities = node.getAvailableCapabilities().getAvailableCapability();
+ if(capabilities == null) {
+ return false;
+ }
for (final String capability : node.getAvailableCapabilities().getAvailableCapability()) {
if(capability.startsWith("(urn:ietf:params:xml:ns:netconf:notification")) {
return true;
@Override
public void close() {
- listenerReg.close();
+ for(final EventSourceRegistration<NetconfEventSource> reg : eventSourceRegistration.values()){
+ reg.close();
+ }
+ listenerRegistration.close();
}
+
}
\ No newline at end of file
import config { prefix config; revision-date 2013-04-05; }
import opendaylight-md-sal-binding {prefix sal;}
import opendaylight-md-sal-dom {prefix dom;}
-
+ import messagebus-event-source-registry {prefix esr;}
description
"Service definition for Message Bus application implementation.";
identity messagebus-app-impl {
base config:module-type;
+ config:provided-service esr:event-source-registry;
config:java-name-prefix MessageBusAppImpl;
}
-
+
augment "/config:modules/config:module/config:configuration" {
case messagebus-app-impl {
when "/config:modules/config:module/config:type = 'messagebus-app-impl'";
*/
package org.opendaylight.controller.config.yang.messagebus.app.impl;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
import org.osgi.framework.BundleContext;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
public class MessageBusAppImplModuleFactoryTest {
DependencyResolver dependencyResolverMock;
assertNotNull("Module has not been created correctly.", messageBusAppImplModuleFactory.createModule("instanceName1", dependencyResolverMock, dynamicMBeanWithInstanceMock, bundleContextMock));
}
-}
+}
\ No newline at end of file
*/
package org.opendaylight.controller.config.yang.messagebus.app.impl;
-import com.google.common.util.concurrent.CheckedFuture;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.JmxAttribute;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.MountPointService;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.messagebus.app.impl.EventSourceTopology;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.Provider;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.osgi.framework.BundleContext;
import javax.management.ObjectName;
-import java.util.ArrayList;
-import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
-import static org.mockito.Matchers.notNull;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
public class MessageBusAppImplModuleTest {
}
@Test
- public void createInstanceTest() {
- createInstanceTestHelper();
- messageBusAppImplModule.getInstance();
- assertNotNull("AutoCloseable instance has not been created correctly.", messageBusAppImplModule.createInstance());
- }
+ public void createInstanceTest() throws Exception{
+ org.opendaylight.controller.sal.binding.api.BindingAwareBroker bindingAwareBrokerMock = mock(org.opendaylight.controller.sal.binding.api.BindingAwareBroker.class);
+ Broker brokerMock = mock(Broker.class);
+ doReturn(brokerMock).when(dependencyResolverMock).resolveInstance(eq(org.opendaylight.controller.sal.core.api.Broker.class), any(ObjectName.class), any(JmxAttribute.class));
+ doReturn(bindingAwareBrokerMock).when(dependencyResolverMock).resolveInstance(eq(org.opendaylight.controller.sal.binding.api.BindingAwareBroker.class), any(ObjectName.class), any(JmxAttribute.class));
+ messageBusAppImplModule.resolveDependencies();
- private void createInstanceTestHelper(){
- NamespaceToStream namespaceToStream = mock(NamespaceToStream.class);
- List<NamespaceToStream> listNamespaceToStreamMock = new ArrayList<>();
- listNamespaceToStreamMock.add(namespaceToStream);
- messageBusAppImplModule.setNamespaceToStream(listNamespaceToStreamMock);
- ObjectName objectName = mock(ObjectName.class);
- org.opendaylight.controller.sal.core.api.Broker domBrokerDependency = mock(Broker.class);
- org.opendaylight.controller.sal.binding.api.BindingAwareBroker bindingBrokerDependency = mock(BindingAwareBroker.class);
- when(dependencyResolverMock.resolveInstance((java.lang.Class) notNull(), (javax.management.ObjectName) notNull(), eq(AbstractMessageBusAppImplModule.domBrokerJmxAttribute))).thenReturn(domBrokerDependency);
- when(dependencyResolverMock.resolveInstance((java.lang.Class) notNull(), (javax.management.ObjectName) notNull(), eq(AbstractMessageBusAppImplModule.bindingBrokerJmxAttribute))).thenReturn(bindingBrokerDependency);
- messageBusAppImplModule.setBindingBroker(objectName);
- messageBusAppImplModule.setDomBroker(objectName);
- BindingAwareBroker.ProviderContext providerContextMock = mock(BindingAwareBroker.ProviderContext.class);
- doReturn(providerContextMock).when(bindingBrokerDependency).registerProvider(any(BindingAwareProvider.class));
+ BindingAwareBroker.ProviderContext providerContext = mock(BindingAwareBroker.ProviderContext.class);
+ doReturn(providerContext).when(bindingAwareBrokerMock).registerProvider(any(BindingAwareProvider.class));
Broker.ProviderSession providerSessionMock = mock(Broker.ProviderSession.class);
- doReturn(providerSessionMock).when(domBrokerDependency).registerProvider(any(Provider.class));
-
+ doReturn(providerSessionMock).when(brokerMock).registerProvider(any(Provider.class));
DataBroker dataBrokerMock = mock(DataBroker.class);
- doReturn(dataBrokerMock).when(providerContextMock).getSALService(DataBroker.class);
+ doReturn(dataBrokerMock).when(providerContext).getSALService(eq(DataBroker.class));
+ DOMNotificationPublishService domNotificationPublishServiceMock = mock(DOMNotificationPublishService.class);
+ doReturn(domNotificationPublishServiceMock).when(providerSessionMock).getService(DOMNotificationPublishService.class);
+ DOMMountPointService domMountPointServiceMock = mock(DOMMountPointService.class);
+ doReturn(domMountPointServiceMock).when(providerSessionMock).getService(DOMMountPointService.class);
+ MountPointService mountPointServiceMock = mock(MountPointService.class);
+ doReturn(mountPointServiceMock).when(providerContext).getSALService(eq(MountPointService.class));
RpcProviderRegistry rpcProviderRegistryMock = mock(RpcProviderRegistry.class);
- doReturn(rpcProviderRegistryMock).when(providerContextMock).getSALService(RpcProviderRegistry.class);
- BindingAwareBroker.RpcRegistration rpcRegistrationMock = mock(BindingAwareBroker.RpcRegistration.class);
- doReturn(rpcRegistrationMock).when(rpcProviderRegistryMock).addRpcImplementation(eq(EventAggregatorService.class), any(EventSourceTopology.class));
- EventSourceService eventSourceServiceMock = mock(EventSourceService.class);
- doReturn(eventSourceServiceMock).when(rpcProviderRegistryMock).getRpcService(EventSourceService.class);
+ doReturn(rpcProviderRegistryMock).when(providerContext).getSALService(eq(RpcProviderRegistry.class));
WriteTransaction writeTransactionMock = mock(WriteTransaction.class);
doReturn(writeTransactionMock).when(dataBrokerMock).newWriteOnlyTransaction();
- doNothing().when(writeTransactionMock).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(DataObject.class));
- CheckedFuture checkedFutureMock = mock(CheckedFuture.class);
- doReturn(checkedFutureMock).when(writeTransactionMock).submit();
+ doNothing().when(writeTransactionMock).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(DataObject.class), eq(true));
+
+ assertNotNull("EventSourceRegistryWrapper has not been created correctly.", messageBusAppImplModule.createInstance());
}
-}
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.messagebus.app.impl;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.messagebus.spi.EventSource;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+public class EventSourceRegistrationImplTest {
+
+ EventSourceRegistrationImplLocal eventSourceRegistrationImplLocal;
+ EventSourceTopology eventSourceTopologyMock;
+
+ @BeforeClass
+ public static void initTestClass() throws IllegalAccessException, InstantiationException {
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ EventSource eventSourceMock = mock(EventSource.class);
+ eventSourceTopologyMock = mock(EventSourceTopology.class);
+ eventSourceRegistrationImplLocal = new EventSourceRegistrationImplLocal(eventSourceMock, eventSourceTopologyMock);
+ }
+
+ @Test
+ public void removeRegistrationTest() {
+ eventSourceRegistrationImplLocal.removeRegistration();
+ verify(eventSourceTopologyMock, times(1)).unRegister(any(EventSource.class));
+ }
+
+
+ private class EventSourceRegistrationImplLocal extends EventSourceRegistrationImpl{
+
+ /**
+ * @param instance of EventSource that has been registered by {@link EventSourceRegistryImpl#registerEventSource(Node, org.opendaylight.controller.messagebus.spi.EventSource)}
+ * @param eventSourceTopology
+ */
+ public EventSourceRegistrationImplLocal(EventSource instance, EventSourceTopology eventSourceTopology) {
+ super(instance, eventSourceTopology);
+ }
+ }
+
+}
\ No newline at end of file
*/
package org.opendaylight.controller.messagebus.app.impl;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.util.HashMap;
+import java.util.Map;
+
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.any;
-
public class EventSourceTopicTest {
EventSourceTopic eventSourceTopic;
nodeIdMock = mock(NodeId.class);
doReturn(nodeIdMock).when(dataObjectMock).getId();
- doReturn("0").when(nodeIdMock).getValue();
+ doReturn("nodeIdPattern1").when(nodeIdMock).getValue();
}
@Test
verify(eventSourceServiceMock, times(1)).joinTopic(any(JoinTopicInput.class));
}
-}
+}
\ No newline at end of file
*/
package org.opendaylight.controller.messagebus.app.impl;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.messagebus.spi.EventSource;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.Pattern;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.eq;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
public class EventSourceTopologyTest {
CreateTopicInput createTopicInputMock;
ListenerRegistration listenerRegistrationMock;
NodeKey nodeKey;
+ RpcRegistration<EventAggregatorService> aggregatorRpcReg;
@BeforeClass
public static void initTestClass() throws IllegalAccessException, InstantiationException {
public void setUp() throws Exception {
dataBrokerMock = mock(DataBroker.class);
rpcProviderRegistryMock = mock(RpcProviderRegistry.class);
-
}
@Test
}
private void constructorTestHelper(){
+ aggregatorRpcReg = mock(RpcRegistration.class);
+ EventSourceService eventSourceService = mock(EventSourceService.class);
+ doReturn(aggregatorRpcReg).when(rpcProviderRegistryMock).addRpcImplementation(eq(EventAggregatorService.class), any(EventSourceTopology.class));
+ doReturn(eventSourceService).when(rpcProviderRegistryMock).getRpcService(EventSourceService.class);
WriteTransaction writeTransactionMock = mock(WriteTransaction.class);
doReturn(writeTransactionMock).when(dataBrokerMock).newWriteOnlyTransaction();
- doNothing().when(writeTransactionMock).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(DataObject.class));
+ doNothing().when(writeTransactionMock).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(DataObject.class),eq(true));
CheckedFuture checkedFutureMock = mock(CheckedFuture.class);
doReturn(checkedFutureMock).when(writeTransactionMock).submit();
}
@Test
public void createTopicTest() throws Exception{
- createTopicTestHelper();
+ topicTestHelper();
assertNotNull("Topic has not been created correctly.", eventSourceTopology.createTopic(createTopicInputMock));
}
- private void createTopicTestHelper() throws Exception{
+ private void topicTestHelper() throws Exception{
constructorTestHelper();
createTopicInputMock = mock(CreateTopicInput.class);
eventSourceTopology = new EventSourceTopology(dataBrokerMock, rpcProviderRegistryMock);
@Test
public void destroyTopicTest() throws Exception{
- createTopicTestHelper();
+ topicTestHelper();
+ //TODO: modify test when destroyTopic will be implemented
DestroyTopicInput destroyTopicInput = null;
assertNotNull("Instance has not been created correctly.", eventSourceTopology.destroyTopic(destroyTopicInput));
}
@Test
public void closeTest() throws Exception{
- BindingAwareBroker.RpcRegistration rpcRegistrationMock = mock(BindingAwareBroker.RpcRegistration.class);
- doReturn(rpcRegistrationMock).when(rpcProviderRegistryMock).addRpcImplementation(eq(EventAggregatorService.class), any(EventSourceTopology.class));
- doNothing().when(rpcRegistrationMock).close();
- createTopicTestHelper();
- eventSourceTopology.createTopic(createTopicInputMock);
+ constructorTestHelper();
+ topicTestHelper();
+ Map<DataChangeListener, ListenerRegistration<DataChangeListener>> localMap = getTopicListenerRegistrations();
+ DataChangeListener dataChangeListenerMock = mock(DataChangeListener.class);
+ ListenerRegistration<DataChangeListener> listenerListenerRegistrationMock = (ListenerRegistration<DataChangeListener>) mock(ListenerRegistration.class);
+ localMap.put(dataChangeListenerMock, listenerListenerRegistrationMock);
eventSourceTopology.close();
- verify(rpcRegistrationMock, times(1)).close();
+ verify(aggregatorRpcReg, times(1)).close();
+ verify(listenerListenerRegistrationMock, times(1)).close();
}
@Test
public void registerTest() throws Exception {
- createTopicTestHelper();
+ topicTestHelper();
Node nodeMock = mock(Node.class);
- NetconfEventSource netconfEventSourceMock = mock(NetconfEventSource.class);
-
+ EventSource eventSourceMock = mock(EventSource.class);
NodeId nodeId = new NodeId("nodeIdValue1");
nodeKey = new NodeKey(nodeId);
doReturn(nodeKey).when(nodeMock).getKey();
-
+ doReturn(nodeKey).when(eventSourceMock).getSourceNodeKey();
BindingAwareBroker.RoutedRpcRegistration routedRpcRegistrationMock = mock(BindingAwareBroker.RoutedRpcRegistration.class);
- doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock).addRoutedRpcImplementation(EventSourceService.class, netconfEventSourceMock);
- eventSourceTopology.register(nodeMock, netconfEventSourceMock);
+ doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock).addRoutedRpcImplementation(EventSourceService.class, eventSourceMock);
+ doNothing().when(routedRpcRegistrationMock).registerPath(eq(NodeContext.class), any(KeyedInstanceIdentifier.class));
+ eventSourceTopology.register(eventSourceMock);
verify(routedRpcRegistrationMock, times(1)).registerPath(eq(NodeContext.class), any(KeyedInstanceIdentifier.class));
}
-}
+ @Test
+ public void unregisterTest() throws Exception {
+ topicTestHelper();
+ EventSource eventSourceMock = mock(EventSource.class);
+ NodeId nodeId = new NodeId("nodeIdValue1");
+ nodeKey = new NodeKey(nodeId);
+ Map<NodeKey, BindingAwareBroker.RoutedRpcRegistration<EventSourceService>> localMap = getRoutedRpcRegistrations();
+ NodeKey nodeKeyMock = mock(NodeKey.class);
+ doReturn(nodeKeyMock).when(eventSourceMock).getSourceNodeKey();
+ BindingAwareBroker.RoutedRpcRegistration<EventSourceService> routedRpcRegistrationMock = (BindingAwareBroker.RoutedRpcRegistration<EventSourceService>) mock(BindingAwareBroker.RoutedRpcRegistration.class);
+ localMap.put(nodeKeyMock, routedRpcRegistrationMock);
+ eventSourceTopology.unRegister(eventSourceMock);
+ verify(routedRpcRegistrationMock, times(1)).close();
+ }
+
+ @Test
+ public void registerEventSourceTest() throws Exception {
+ topicTestHelper();
+ Node nodeMock = mock(Node.class);
+ EventSource eventSourceMock = mock(EventSource.class);
+ NodeId nodeId = new NodeId("nodeIdValue1");
+ nodeKey = new NodeKey(nodeId);
+ doReturn(nodeKey).when(nodeMock).getKey();
+ doReturn(nodeKey).when(eventSourceMock).getSourceNodeKey();
+ BindingAwareBroker.RoutedRpcRegistration routedRpcRegistrationMock = mock(BindingAwareBroker.RoutedRpcRegistration.class);
+ doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock).addRoutedRpcImplementation(EventSourceService.class, eventSourceMock);
+ doNothing().when(routedRpcRegistrationMock).registerPath(eq(NodeContext.class), any(KeyedInstanceIdentifier.class));
+ assertNotNull("Return value has not been created correctly.", eventSourceTopology.registerEventSource(eventSourceMock));
+ }
+
+ private Map getTopicListenerRegistrations() throws Exception{
+ Field nesField = EventSourceTopology.class.getDeclaredField("topicListenerRegistrations");
+ nesField.setAccessible(true);
+ return (Map) nesField.get(eventSourceTopology);
+ }
+
+ private Map getRoutedRpcRegistrations() throws Exception{
+ Field nesField = EventSourceTopology.class.getDeclaredField("routedRpcRegistrations");
+ nesField.setAccessible(true);
+ return (Map) nesField.get(eventSourceTopology);
+ }
+
+}
\ No newline at end of file
*/
package org.opendaylight.controller.messagebus.app.impl;
-import com.google.common.base.Optional;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.notNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.messagebus.eventsources.netconf.NetconfEventSourceManager;
+import org.opendaylight.controller.messagebus.spi.EventSource;
+import org.opendaylight.controller.messagebus.spi.EventSourceRegistry;
import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.NotificationsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeFields;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.notNull;
+import com.google.common.base.Optional;
public class NetconfEventSourceManagerTest {
+ private static final String notification_capability_prefix = "(urn:ietf:params:xml:ns:netconf:notification";
NetconfEventSourceManager netconfEventSourceManager;
ListenerRegistration listenerRegistrationMock;
DOMMountPointService domMountPointServiceMock;
MountPointService mountPointServiceMock;
EventSourceTopology eventSourceTopologyMock;
AsyncDataChangeEvent asyncDataChangeEventMock;
-
+ RpcProviderRegistry rpcProviderRegistryMock;
+ EventSourceRegistry eventSourceRegistry;
@BeforeClass
public static void initTestClass() throws IllegalAccessException, InstantiationException {
}
domMountPointServiceMock = mock(DOMMountPointService.class);
mountPointServiceMock = mock(MountPointService.class);
eventSourceTopologyMock = mock(EventSourceTopology.class);
+ rpcProviderRegistryMock = mock(RpcProviderRegistry.class);
+ eventSourceRegistry = mock(EventSourceRegistry.class);
List<NamespaceToStream> namespaceToStreamList = new ArrayList<>();
listenerRegistrationMock = mock(ListenerRegistration.class);
doReturn(listenerRegistrationMock).when(dataBrokerMock).registerDataChangeListener(eq(LogicalDatastoreType.OPERATIONAL), any(InstanceIdentifier.class), any(NetconfEventSourceManager.class), eq(AsyncDataBroker.DataChangeScope.SUBTREE));
- netconfEventSourceManager = new NetconfEventSourceManager(dataBrokerMock, domNotificationPublishServiceMock, domMountPointServiceMock,
- mountPointServiceMock, eventSourceTopologyMock, namespaceToStreamList);
+ netconfEventSourceManager =
+ NetconfEventSourceManager.create(dataBrokerMock,
+ domNotificationPublishServiceMock,
+ domMountPointServiceMock,
+ mountPointServiceMock,
+ eventSourceRegistry,
+ namespaceToStreamList);
}
@Test
- public void constructorTest() {
- assertNotNull("Instance has not been created correctly.", netconfEventSourceManager);
+ public void onDataChangedCreateEventSourceTestByCreateEntry() throws InterruptedException, ExecutionException {
+ onDataChangedTestHelper(true,false,true,notification_capability_prefix);
+ netconfEventSourceManager.onDataChanged(asyncDataChangeEventMock);
+ verify(eventSourceRegistry, times(1)).registerEventSource(any(EventSource.class));
}
@Test
- public void onDataChangedTest() {
- AsyncDataChangeEvent asyncDataChangeEventMock = mock(AsyncDataChangeEvent.class);
- Map<InstanceIdentifier, DataObject> map = new HashMap<>();
- InstanceIdentifier instanceIdentifierMock = mock(InstanceIdentifier.class);
- Node dataObjectMock = mock(Node.class);
- map.put(instanceIdentifierMock, dataObjectMock);
- doReturn(map).when(asyncDataChangeEventMock).getCreatedData();
- doReturn(map).when(asyncDataChangeEventMock).getUpdatedData();
+ public void onDataChangedCreateEventSourceTestByUpdateEntry() throws InterruptedException, ExecutionException {
+ onDataChangedTestHelper(false,true,true, notification_capability_prefix);
+ netconfEventSourceManager.onDataChanged(asyncDataChangeEventMock);
+ verify(eventSourceRegistry, times(1)).registerEventSource(any(EventSource.class));
+ }
+
+ @Test
+ public void onDataChangedCreateEventSourceTestNotNeconf() throws InterruptedException, ExecutionException {
+ onDataChangedTestHelper(false,true,false,notification_capability_prefix);
netconfEventSourceManager.onDataChanged(asyncDataChangeEventMock);
- verify(dataObjectMock, times(2)).getAugmentation(NetconfNode.class);
+ verify(eventSourceRegistry, times(0)).registerEventSource(any(EventSource.class));
}
@Test
- public void onDataChangedCreateEventSourceTest() {
- onDataChangedCreateEventSourceTestHelper();
+ public void onDataChangedCreateEventSourceTestNotNotificationCapability() throws InterruptedException, ExecutionException {
+ onDataChangedTestHelper(false,true,true,"bad-prefix");
netconfEventSourceManager.onDataChanged(asyncDataChangeEventMock);
- verify(eventSourceTopologyMock, times(1)).register(any(Node.class), any(NetconfEventSource.class));
+ verify(eventSourceRegistry, times(0)).registerEventSource(any(EventSource.class));
}
- private void onDataChangedCreateEventSourceTestHelper(){
+ private void onDataChangedTestHelper(boolean create, boolean update, boolean isNetconf, String notificationCapabilityPrefix) throws InterruptedException, ExecutionException{
asyncDataChangeEventMock = mock(AsyncDataChangeEvent.class);
- Map<InstanceIdentifier, DataObject> map = new HashMap<>();
+ Map<InstanceIdentifier, DataObject> mapCreate = new HashMap<>();
+ Map<InstanceIdentifier, DataObject> mapUpdate = new HashMap<>();
InstanceIdentifier instanceIdentifierMock = mock(InstanceIdentifier.class);
Node dataObjectMock = mock(Node.class);
- map.put(instanceIdentifierMock, dataObjectMock);
- doReturn(map).when(asyncDataChangeEventMock).getCreatedData();
- doReturn(map).when(asyncDataChangeEventMock).getUpdatedData();
+ if(create){
+ mapCreate.put(instanceIdentifierMock, dataObjectMock);
+ }
+ if(update){
+ mapUpdate.put(instanceIdentifierMock, dataObjectMock);
+ }
+
+ doReturn(mapCreate).when(asyncDataChangeEventMock).getCreatedData();
+ doReturn(mapUpdate).when(asyncDataChangeEventMock).getUpdatedData();
NetconfNode netconfNodeMock = mock(NetconfNode.class);
AvailableCapabilities availableCapabilitiesMock = mock(AvailableCapabilities.class);
- doReturn(netconfNodeMock).when(dataObjectMock).getAugmentation(NetconfNode.class);
- doReturn(availableCapabilitiesMock).when(netconfNodeMock).getAvailableCapabilities();
- List<String> availableCapabilityList = new ArrayList<>();
- availableCapabilityList.add("(urn:ietf:params:xml:ns:netconf:notification_availableCapabilityString1");
- doReturn(availableCapabilityList).when(availableCapabilitiesMock).getAvailableCapability();
-
- doReturn(NetconfNodeFields.ConnectionStatus.Connected).when(netconfNodeMock).getConnectionStatus();
+ if(isNetconf){
+ doReturn(netconfNodeMock).when(dataObjectMock).getAugmentation(NetconfNode.class);
+ doReturn(availableCapabilitiesMock).when(netconfNodeMock).getAvailableCapabilities();
+ List<String> availableCapabilityList = new ArrayList<>();
+ availableCapabilityList.add(notificationCapabilityPrefix +"_availableCapabilityString1");
+ doReturn(availableCapabilityList).when(availableCapabilitiesMock).getAvailableCapability();
+ doReturn(NetconfNodeFields.ConnectionStatus.Connected).when(netconfNodeMock).getConnectionStatus();
+ } else {
+ doReturn(null).when(dataObjectMock).getAugmentation(NetconfNode.class);
+ }
Optional optionalMock = mock(Optional.class);
Optional optionalBindingMountMock = mock(Optional.class);
doReturn(onlyOptionalMock).when(mountPointMock).getService(RpcConsumerRegistry.class);
doReturn(rpcConsumerRegistryMock).when(onlyOptionalMock).get();
doReturn(notificationsServiceMock).when(rpcConsumerRegistryMock).getRpcService(NotificationsService.class);
+ EventSourceRegistrationImpl esrMock = mock(EventSourceRegistrationImpl.class);
+ doReturn(esrMock).when(eventSourceRegistry).registerEventSource(any(EventSource.class));
}
- @Test
- public void isEventSourceTest() {
- Node nodeMock = mock(Node.class);
- NetconfNode netconfNodeMock = mock(NetconfNode.class);
- AvailableCapabilities availableCapabilitiesMock = mock(AvailableCapabilities.class);
- doReturn(netconfNodeMock).when(nodeMock).getAugmentation(NetconfNode.class);
- doReturn(availableCapabilitiesMock).when(netconfNodeMock).getAvailableCapabilities();
- List<String> availableCapabilityList = new ArrayList<>();
- availableCapabilityList.add("(urn:ietf:params:xml:ns:netconf:notification_availableCapabilityString1");
- doReturn(availableCapabilityList).when(availableCapabilitiesMock).getAvailableCapability();
- assertTrue("Method has not been run correctly.", netconfEventSourceManager.isEventSource(nodeMock));
- }
-
- @Test
- public void isNotEventSourceTest() {
- Node nodeMock = mock(Node.class);
- NetconfNode netconfNodeMock = mock(NetconfNode.class);
- AvailableCapabilities availableCapabilitiesMock = mock(AvailableCapabilities.class);
- doReturn(netconfNodeMock).when(nodeMock).getAugmentation(NetconfNode.class);
- doReturn(availableCapabilitiesMock).when(netconfNodeMock).getAvailableCapabilities();
- List<String> availableCapabilityList = new ArrayList<>();
- availableCapabilityList.add("availableCapabilityString1");
- doReturn(availableCapabilityList).when(availableCapabilitiesMock).getAvailableCapability();
- assertFalse("Method has not been run correctly.", netconfEventSourceManager.isEventSource(nodeMock));
- }
-
- @Test
- public void closeTest() {
- netconfEventSourceManager.close();
- verify(listenerRegistrationMock, times(1)).close();
- }
-}
+}
\ No newline at end of file
*/
package org.opendaylight.controller.messagebus.app.impl;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
+//import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.net.URI;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.md.sal.binding.api.BindingService;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.api.DOMService;
+import org.opendaylight.controller.messagebus.eventsources.netconf.NetconfEventSource;
import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.NotificationsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.opendaylight.yangtools.yang.common.QName;
-
-import java.lang.reflect.Field;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
public class NetconfEventSourceTest {
DOMMountPoint domMountPointMock;
JoinTopicInput joinTopicInputMock;
- @BeforeClass
- public static void initTestClass() throws IllegalAccessException, InstantiationException {
- }
-
@Before
public void setUp() throws Exception {
Map<String, String> streamMap = new HashMap<>();
- streamMap.put("string1", "string2");
+ streamMap.put("uriStr1", "string2");
domMountPointMock = mock(DOMMountPoint.class);
DOMNotificationPublishService domNotificationPublishServiceMock = mock(DOMNotificationPublishService.class);
- MountPoint mountPointMock = mock(MountPoint.class);
RpcConsumerRegistry rpcConsumerRegistryMock = mock(RpcConsumerRegistry.class);
Optional<BindingService> onlyOptionalMock = (Optional<BindingService>) mock(Optional.class);
NotificationsService notificationsServiceMock = mock(NotificationsService.class);
- doReturn(onlyOptionalMock).when(mountPointMock).getService(RpcConsumerRegistry.class);
- doReturn(rpcConsumerRegistryMock).when(onlyOptionalMock).get();
doReturn(notificationsServiceMock).when(rpcConsumerRegistryMock).getRpcService(NotificationsService.class);
- netconfEventSource = new NetconfEventSource("nodeId1", streamMap, domMountPointMock, domNotificationPublishServiceMock, mountPointMock);
+ org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node node
+ = mock(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.class);
+ org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId nodeId
+ = new org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId("NodeId1");
+ doReturn(nodeId).when(node).getNodeId();
+ netconfEventSource = new NetconfEventSource(node, streamMap, domMountPointMock, domNotificationPublishServiceMock);
}
@Test
- public void constructorTest() {
- assertNotNull("Instance has not been created correctly.", netconfEventSource);
+ public void onDataChangedTest(){
+ InstanceIdentifier brmIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(new NodeId("brm"))).augmentation(NetconfNode.class);
+ AsyncDataChangeEvent asyncDataChangeEventMock = mock(AsyncDataChangeEvent.class);
+ NetconfNode dataObjectMock = mock(NetconfNode.class);
+ Map<InstanceIdentifier, DataObject> dataChangeMap = new HashMap<>();
+ dataChangeMap.put(brmIdent, dataObjectMock);
+ doReturn(dataChangeMap).when(asyncDataChangeEventMock).getOriginalData();
+ doReturn(dataChangeMap).when(asyncDataChangeEventMock).getUpdatedData();
+ doReturn(true).when(dataObjectMock).isConnected();
+ netconfEventSource.onDataChanged(asyncDataChangeEventMock);
+ verify(dataObjectMock, times(2)).isConnected();
}
@Test
private void joinTopicTestHelper() throws Exception{
joinTopicInputMock = mock(JoinTopicInput.class);
+ TopicId topicId = new TopicId("topicID007");
+ doReturn(topicId).when(joinTopicInputMock).getTopicId();
NotificationPattern notificationPatternMock = mock(NotificationPattern.class);
doReturn(notificationPatternMock).when(joinTopicInputMock).getNotificationPattern();
- doReturn("regexString1").when(notificationPatternMock).getValue();
+ doReturn("uriStr1").when(notificationPatternMock).getValue();
SchemaContext schemaContextMock = mock(SchemaContext.class);
doReturn(schemaContextMock).when(domMountPointMock).getSchemaContext();
DOMNotificationService domNotificationServiceMock = mock(DOMNotificationService.class);
doReturn(domNotificationServiceMock).when(domNotificationServiceOptionalMock).get();
- ListenerRegistration listenerRegistrationMock = mock(ListenerRegistration.class);
- doReturn(listenerRegistrationMock).when(domNotificationServiceMock).registerNotificationListener(any(NetconfEventSource.class), any(List.class));
- }
-
- @Test (expected=NullPointerException.class)
- public void onNotificationTest() {
- DOMNotification domNotificationMock = mock(DOMNotification.class);
- ContainerNode containerNodeMock = mock(ContainerNode.class);
- SchemaContext schemaContextMock = mock(SchemaContext.class);
- SchemaPath schemaPathMock = mock(SchemaPath.class);
- doReturn(schemaContextMock).when(domMountPointMock).getSchemaContext();
- doReturn(schemaPathMock).when(domNotificationMock).getType();
- doReturn(containerNodeMock).when(domNotificationMock).getBody();
- netconfEventSource.onNotification(domNotificationMock);
- }
-
- @Test
- public void onDataChangedTest() {
- InstanceIdentifier brmIdent = InstanceIdentifier.create(Nodes.class)
- .child(Node.class, new NodeKey(new NodeId("brm"))).augmentation(NetconfNode.class);
- AsyncDataChangeEvent asyncDataChangeEventMock = mock(AsyncDataChangeEvent.class);
- NetconfNode dataObjectMock = mock(NetconfNode.class);
- Map<InstanceIdentifier, DataObject> dataChangeMap = new HashMap<>();
- dataChangeMap.put(brmIdent, dataObjectMock);
- doReturn(dataChangeMap).when(asyncDataChangeEventMock).getOriginalData();
- doReturn(dataChangeMap).when(asyncDataChangeEventMock).getUpdatedData();
-
- netconfEventSource.onDataChanged(asyncDataChangeEventMock);
- verify(dataObjectMock, times(2)).isConnected();
- }
-
- @Test
- public void onDataChangedResubscribeTest() throws Exception{
- InstanceIdentifier brmIdent = InstanceIdentifier.create(Nodes.class)
- .child(Node.class, new NodeKey(new NodeId("brm"))).augmentation(NetconfNode.class);
- AsyncDataChangeEvent asyncDataChangeEventMock = mock(AsyncDataChangeEvent.class);
- NetconfNode dataObjectMock = mock(NetconfNode.class);
- Map<InstanceIdentifier, DataObject> dataChangeMap = new HashMap<>();
- dataChangeMap.put(brmIdent, dataObjectMock);
- doReturn(dataChangeMap).when(asyncDataChangeEventMock).getUpdatedData();
- doReturn(true).when(dataObjectMock).isConnected();
-
- Set<String> localSet = getActiveStreams();
- localSet.add("activeStream1");
+ ListenerRegistration<NetconfEventSource> listenerRegistrationMock = (ListenerRegistration<NetconfEventSource>)mock(ListenerRegistration.class);
+ doReturn(listenerRegistrationMock).when(domNotificationServiceMock).registerNotificationListener(any(NetconfEventSource.class), any(SchemaPath.class));
Optional<DOMService> optionalMock = (Optional<DOMService>) mock(Optional.class);
doReturn(optionalMock).when(domMountPointMock).getService(DOMRpcService.class);
doReturn(domRpcServiceMock).when(optionalMock).get();
CheckedFuture checkedFutureMock = mock(CheckedFuture.class);
doReturn(checkedFutureMock).when(domRpcServiceMock).invokeRpc(any(SchemaPath.class), any(ContainerNode.class));
-
- netconfEventSource.onDataChanged(asyncDataChangeEventMock);
- verify(dataObjectMock, times(1)).isConnected();
- assertEquals("Size of set has not been set correctly.", 1, getActiveStreams().size());
- }
-
- private Set getActiveStreams() throws Exception{
- Field nesField = NetconfEventSource.class.getDeclaredField("activeStreams");
- nesField.setAccessible(true);
- return (Set) nesField.get(netconfEventSource);
}
-}
+}
\ No newline at end of file
*/
package org.opendaylight.controller.messagebus.app.impl;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.mock;
-
public class TopicDOMNotificationTest {
+ private static final String containerNodeBodyMockToString = "containerNodeBodyMock";
ContainerNode containerNodeBodyMock;
TopicDOMNotification topicDOMNotification;
@Before
public void setUp() throws Exception {
containerNodeBodyMock = mock(ContainerNode.class);
+ doReturn(containerNodeBodyMockToString).when(containerNodeBodyMock).toString();
topicDOMNotification = new TopicDOMNotification(containerNodeBodyMock);
}
@Test
public void getToStringTest() {
- String bodyString = "TopicDOMNotification [body=" + containerNodeBodyMock + "]";
+ String bodyString = "TopicDOMNotification [body=" + containerNodeBodyMockToString + "]";
assertEquals("String has not been created correctly.", bodyString, topicDOMNotification.toString());
}
-}
+}
\ No newline at end of file
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
+/**
+ * @author ppalmar
+ *
+ */
public class UtilTest {
- @Test
- public void testMD5Hash() throws Exception {
- // empty string
- createAndAssertHash("", "d41d8cd98f00b204e9800998ecf8427e");
-
- // non-empty string
- createAndAssertHash("The Guardian", "69b929ae473ed732d5fb8e0a55a8dc8d");
-
- // the same hash for the same string
- createAndAssertHash("The Independent", "db793706d70c37dcc16454fa8eb21b1c");
- createAndAssertHash("The Independent", "db793706d70c37dcc16454fa8eb21b1c"); // one more time
-
- // different strings must have different hashes
- createAndAssertHash("orange", "fe01d67a002dfa0f3ac084298142eccd");
- createAndAssertHash("yellow", "d487dd0b55dfcacdd920ccbdaeafa351");
- }
-
- //TODO: IllegalArgumentException would be better
- @Test(expected = RuntimeException.class)
- public void testMD5HashInvalidInput() throws Exception {
- Util.md5String(null);
- }
-
- @Test
- public void testWildcardToRegex() throws Exception {
- // empty wildcard string
- createAndAssertRegex("", "^$");
-
- // wildcard string is a char to be replaced
- createAndAssertRegex("*", "^.*$");
- createAndAssertRegex("?", "^.$");
- final String relevantChars = "()[]$^.{}|\\";
- for (final char c : relevantChars.toCharArray()) {
- final char oneChar[] = {c};
- final String wildcardStr = new String(oneChar);
- final String expectedRegex = "^\\" + c + "$";
- createAndAssertRegex(wildcardStr, expectedRegex);
- }
-
- // wildcard string consists of more chars
- createAndAssertRegex("a", "^a$");
- createAndAssertRegex("aBc", "^aBc$");
- createAndAssertRegex("a1b2C34", "^a1b2C34$");
- createAndAssertRegex("*?()[]$^.{}|\\X", "^.*.\\(\\)\\[\\]\\$\\^\\.\\{\\}\\|\\\\X$");
- createAndAssertRegex("a*BB?37|42$", "^a.*BB.37\\|42\\$$");
- }
-
@Test
public void testResultFor() throws Exception {
{
final String expectedResult = "dummy string";
- RpcResult<String> rpcResult = Util.resultFor(expectedResult).get();
+ RpcResult<String> rpcResult = Util.resultRpcSuccessFor(expectedResult).get();
assertEquals(expectedResult, rpcResult.getResult());
assertTrue(rpcResult.isSuccessful());
assertTrue(rpcResult.getErrors().isEmpty());
}
{
final Integer expectedResult = 42;
- RpcResult<Integer> rpcResult = Util.resultFor(expectedResult).get();
+ RpcResult<Integer> rpcResult = Util.resultRpcSuccessFor(expectedResult).get();
assertEquals(expectedResult, rpcResult.getResult());
assertTrue(rpcResult.isSuccessful());
assertTrue(rpcResult.getErrors().isEmpty());
}
}
- private static void createAndAssertHash(final String inString, final String expectedHash) {
- assertEquals("Incorrect hash.", expectedHash, Util.md5String(inString));
- }
-
- private static void createAndAssertRegex(final String wildcardStr, final String expectedRegex) {
- assertEquals("Incorrect regex string.", expectedRegex, Util.wildcardToRegex(wildcardStr));
- }
-
private static List<SchemaPath> createSchemaPathList() {
final QName qname1 = QName.create("urn:odl:xxx", "2015-01-01", "localName");
final QName qname2 = QName.create("urn:odl:yyy", "2015-01-01", "localName");
--- /dev/null
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-parent</artifactId>
+ <version>1.3.0-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>messagebus-spi</artifactId>
+ <name>${project.artifactId}</name>
+
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>messagebus-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl
+ </codeGeneratorClass>
+ <outputBaseDir>
+ ${project.build.directory}/generated-sources/sal
+ </outputBaseDir>
+ </generator>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ </codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>
+ urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang
+ </namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.messagebus.spi;
+
+import java.util.List;
+
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Event source is a node in topology which is able to produces notifications.
+ * To register event source you use {@link EventSourceRegistry#registerEventSource(EventSource)()}.
+ * EventSourceRegistry will request registered event source to publish notifications
+ * whenever EventSourceRegistry has been asked to publish a certain type of notifications.
+ * EventSourceRegistry will call method JoinTopic to request EventSource to publish notification.
+ * Event source must implement method JoinTopic (from superinterface {@link EventSourceService}).
+ */
+
+public interface EventSource extends EventSourceService, AutoCloseable {
+
+ /**
+ * Identifier of node associated with event source
+ *
+ * @return instance of NodeKey
+ */
+ NodeKey getSourceNodeKey();
+
+ /**
+ * List the types of notifications which source can produce.
+ *
+ * @return list of available notification
+ */
+ List<SchemaPath> getAvailableNotifications();
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.messagebus.spi;
+
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+
+/**
+ * Instance of EventSourceRegistration is returned by {@link EventSourceRegistry#registerEventSource(EventSource)}
+ * and it is used to unregister EventSource.
+ *
+ */
+public interface EventSourceRegistration <T extends EventSource> extends ObjectRegistration<T>{
+
+ @Override
+ public void close();
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.messagebus.spi;
+
+/**
+ *EventSourceRegistry is used to register {@link EventSource}.
+ *
+ */
+public interface EventSourceRegistry extends AutoCloseable {
+
+ /**
+ * Registers the given EventSource for public consumption. The EventSource is
+ * associated with the node identified via {@linkEventSource#getSourceNodeKey}.
+ *
+ * @param eventSource the EventSource instance to register
+ * @return an EventSourceRegistration instance that is used to unregister the EventSource via {@link EventSourceRegistrationImpl#close()}.
+ */
+ <T extends EventSource> EventSourceRegistration<T> registerEventSource(T eventSource);
+
+}
\ No newline at end of file
--- /dev/null
+module messagebus-event-source-registry {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:messagebus:spi:eventsourceregistry";
+ prefix "mb-esr";
+
+ import config { prefix config; revision-date 2013-04-05; }
+
+ description
+ "Event source registry service interface definition for MessageBus";
+
+ revision "2015-04-02" {
+ description
+ "Initial revision";
+ }
+
+ identity event-source-registry {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.messagebus.spi.EventSourceRegistry";
+ }
+
+}
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>model-inventory</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>model-topology</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<modules>
<module>sal-rest-docgen-maven</module>
<module>sal-akka-raft</module>
+ <module>sal-akka-raft-example</module>
<!--InMemory DOM DataStore-->
<module>sal-inmemory-datastore</module>
<!-- Message Bus -->
<module>messagebus-api</module>
+ <module>messagebus-spi</module>
<module>messagebus-impl</module>
<module>messagebus-config</module>
</modules>
</modules>
</profile>
</profiles>
-</project>
\ No newline at end of file
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-parent</artifactId>
+ <version>1.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>sal-akka-raft-example</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
+ </scm>
+</project>
import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.Map;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
/**
* A sample actor showing how the RaftActor is to be extended
*/
-public class ExampleActor extends RaftActor {
+public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
- private final Map<String, String> state = new HashMap();
- private final DataPersistenceProvider dataPersistenceProvider;
+ private final Map<String, String> state = new HashMap<>();
private long persistIdentifier = 1;
private final Optional<ActorRef> roleChangeNotifier;
public ExampleActor(String id, Map<String, String> peerAddresses,
Optional<ConfigParams> configParams) {
super(id, peerAddresses, configParams);
- this.dataPersistenceProvider = new PersistentDataProvider();
+ setPersistence(true);
roleChangeNotifier = createRoleChangeNotifier(id);
}
}
}
- @Override protected void createSnapshot() {
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
ByteString bs = null;
try {
bs = fromObject(state);
getSelf().tell(new CaptureSnapshotReply(bs.toByteArray()), null);
}
- @Override protected void applySnapshot(byte [] snapshot) {
+ @Override
+ public void applySnapshot(byte [] snapshot) {
state.clear();
try {
- state.putAll((HashMap) toObject(snapshot));
+ state.putAll((HashMap<String, String>) toObject(snapshot));
} catch (Exception e) {
LOG.error("Exception in applying snapshot", e);
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
+ LOG.debug("Snapshot applied to state : {}", ((HashMap<?, ?>) state).size());
}
}
}
- @Override
- protected DataPersistenceProvider persistence() {
- return dataPersistenceProvider;
- }
-
@Override public void onReceiveRecover(Object message)throws Exception {
super.onReceiveRecover(message);
}
}
@Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return this;
+ }
+
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ public void appendRecoveredLogEntry(Payload data) {
}
@Override
- protected void appendRecoveredLogEntry(Payload data) {
+ public void applyCurrentLogRecoveryBatch() {
}
@Override
- protected void applyCurrentLogRecoveryBatch() {
+ public void onRecoveryComplete() {
}
@Override
- protected void onRecoveryComplete() {
+ public void applyRecoverySnapshot(byte[] snapshot) {
}
@Override
- protected void applyRecoverySnapshot(byte[] snapshot) {
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return this;
}
}
}
// override this method to return the protobuff related extension fields and their values
- @Override public Map<GeneratedMessage.GeneratedExtension, String> encode() {
- Map<GeneratedMessage.GeneratedExtension, String> map = new HashMap<>();
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, String> encode() {
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, String> map = new HashMap<>();
map.put(KeyValueMessages.key, getKey());
map.put(KeyValueMessages.value, getValue());
return map;
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-akka-raft</artifactId>
<packaging>bundle</packaging>
<artifactId>commons-io</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-slf4j_${scala.version}</artifactId>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
- <excludes>**/protobuff/**/*</excludes>
+ <excludes>**/protobuff/**/*,**/target/**/*</excludes>
</configuration>
</plugin>
*/
package org.opendaylight.controller.cluster.raft;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
// We define this as ArrayList so we can use ensureCapacity.
- protected ArrayList<ReplicatedLogEntry> journal;
+ private ArrayList<ReplicatedLogEntry> journal;
private long snapshotIndex = -1;
private long snapshotTerm = -1;
private ArrayList<ReplicatedLogEntry> snapshottedJournal;
private long previousSnapshotIndex = -1;
private long previousSnapshotTerm = -1;
- protected int dataSize = 0;
+ private int dataSize = 0;
public AbstractReplicatedLogImpl(long snapshotIndex,
long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries) {
this.snapshotIndex = snapshotIndex;
this.snapshotTerm = snapshotTerm;
this.journal = new ArrayList<>(unAppliedEntries);
+
+ for(ReplicatedLogEntry entry: journal) {
+ dataSize += entry.size();
+ }
}
public AbstractReplicatedLogImpl() {
}
@Override
- public void removeFrom(long logEntryIndex) {
+ public long removeFrom(long logEntryIndex) {
int adjustedIndex = adjustedIndex(logEntryIndex);
if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
// physical index should be less than list size and >= 0
- return;
+ return -1;
+ }
+
+ for(int i = adjustedIndex; i < journal.size(); i++) {
+ dataSize -= journal.get(i).size();
}
+
journal.subList(adjustedIndex , journal.size()).clear();
+
+ return adjustedIndex;
}
@Override
public void append(ReplicatedLogEntry replicatedLogEntry) {
journal.add(replicatedLogEntry);
+ dataSize += replicatedLogEntry.size();
}
@Override
List<ReplicatedLogEntry> snapshotJournalEntries = journal.subList(0, (int) (snapshotCapturedIndex - snapshotIndex));
snapshottedJournal.addAll(snapshotJournalEntries);
- clear(0, (int) (snapshotCapturedIndex - snapshotIndex));
+ snapshotJournalEntries.clear();
previousSnapshotIndex = snapshotIndex;
setSnapshotIndex(snapshotCapturedIndex);
snapshotTerm = previousSnapshotTerm;
previousSnapshotTerm = -1;
}
+
+ @VisibleForTesting
+ ReplicatedLogEntry getAtPhysicalIndex(int index) {
+ return journal.get(index);
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.slf4j.Logger;
+
+/**
+ * Implementation of ElectionTerm for the RaftActor.
+ */
+class ElectionTermImpl implements ElectionTerm {
+ /**
+ * Identifier of the actor whose election term information this is
+ */
+ private long currentTerm = 0;
+ private String votedFor = null;
+
+ private final DataPersistenceProvider persistence;
+
+ private final Logger log;
+ private final String logId;
+
+ ElectionTermImpl(DataPersistenceProvider persistence, String logId, Logger log) {
+ this.persistence = persistence;
+ this.logId = logId;
+ this.log = log;
+ }
+
+ @Override
+ public long getCurrentTerm() {
+ return currentTerm;
+ }
+
+ @Override
+ public String getVotedFor() {
+ return votedFor;
+ }
+
+ @Override public void update(long currentTerm, String votedFor) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Set currentTerm={}, votedFor={}", logId, currentTerm, votedFor);
+ }
+ this.currentTerm = currentTerm;
+ this.votedFor = votedFor;
+ }
+
+ @Override
+ public void updateAndPersist(long currentTerm, String votedFor){
+ update(currentTerm, votedFor);
+ // FIXME : Maybe first persist then update the state
+ persistence.persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), NoopProcedure.instance());
+ }
+}
\ No newline at end of file
*/
long timeSinceLastActivity();
+ /**
+ * This method checks if it is ok to replicate
+ *
+ * @return true if it is ok to replicate
+ */
+ boolean okToReplicate();
}
import com.google.common.base.Stopwatch;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLongFieldUpdater;
public class FollowerLogInformationImpl implements FollowerLogInformation {
- private static final AtomicLongFieldUpdater<FollowerLogInformationImpl> NEXT_INDEX_UPDATER = AtomicLongFieldUpdater.newUpdater(FollowerLogInformationImpl.class, "nextIndex");
- private static final AtomicLongFieldUpdater<FollowerLogInformationImpl> MATCH_INDEX_UPDATER = AtomicLongFieldUpdater.newUpdater(FollowerLogInformationImpl.class, "matchIndex");
-
private final String id;
private final Stopwatch stopwatch = Stopwatch.createUnstarted();
private final RaftActorContext context;
- private volatile long nextIndex;
+ private long nextIndex;
+
+ private long matchIndex;
+
+ private long lastReplicatedIndex = -1L;
+
+ private final Stopwatch lastReplicatedStopwatch = Stopwatch.createUnstarted();
- private volatile long matchIndex;
public FollowerLogInformationImpl(String id, long matchIndex, RaftActorContext context) {
this.id = id;
}
@Override
- public long incrNextIndex(){
- return NEXT_INDEX_UPDATER.incrementAndGet(this);
+ public long incrNextIndex() {
+ return nextIndex++;
}
@Override
public long decrNextIndex() {
- return NEXT_INDEX_UPDATER.decrementAndGet(this);
+ return nextIndex--;
}
@Override
@Override
public long incrMatchIndex(){
- return MATCH_INDEX_UPDATER.incrementAndGet(this);
+ return matchIndex++;
}
@Override
return stopwatch.elapsed(TimeUnit.MILLISECONDS);
}
+ @Override
+ public boolean okToReplicate() {
+ // Return false if we are trying to send duplicate data before the heartbeat interval
+ if(getNextIndex() == lastReplicatedIndex){
+ if(lastReplicatedStopwatch.elapsed(TimeUnit.MILLISECONDS) < context.getConfigParams()
+ .getHeartBeatInterval().toMillis()){
+ return false;
+ }
+ }
+
+ resetLastReplicated();
+ return true;
+ }
+
+ private void resetLastReplicated(){
+ lastReplicatedIndex = getNextIndex();
+ if(lastReplicatedStopwatch.isRunning()){
+ lastReplicatedStopwatch.reset();
+ }
+ lastReplicatedStopwatch.start();
+ }
+
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
.append(context.getConfigParams().getElectionTimeOutInterval().toMillis()).append("]");
return builder.toString();
}
-
-
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+
+/**
+ * An akka Procedure that does nothing.
+ *
+ * @author Thomas Pantelis
+ */
+public class NoopProcedure<T> implements Procedure<T> {
+
+ private static final NoopProcedure<Object> INSTANCE = new NoopProcedure<>();
+
+ private NoopProcedure() {
+ }
+
+ @SuppressWarnings("unchecked")
+ public static <T> NoopProcedure<T> instance() {
+ return (NoopProcedure<T>) INSTANCE;
+ }
+
+ @Override
+ public void apply(Object notUsed) {
+ }
+}
/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.japi.Procedure;
-import akka.persistence.RecoveryCompleted;
-import akka.persistence.SaveSnapshotFailure;
-import akka.persistence.SaveSnapshotSuccess;
-import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Objects;
import com.google.common.base.Optional;
-import com.google.common.base.Stopwatch;
-import com.google.protobuf.ByteString;
+import com.google.common.collect.Lists;
import java.io.Serializable;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
+import org.apache.commons.lang3.time.DurationFormatUtils;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
+import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
-import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
-import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
+import org.opendaylight.controller.cluster.raft.behaviors.DelegatingRaftActorBehavior;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
+import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
- private static final Procedure<ApplyJournalEntries> APPLY_JOURNAL_ENTRIES_PERSIST_CALLBACK =
- new Procedure<ApplyJournalEntries>() {
- @Override
- public void apply(ApplyJournalEntries param) throws Exception {
- }
- };
-
protected final Logger LOG = LoggerFactory.getLogger(getClass());
/**
* The current state determines the current behavior of a RaftActor
* A Raft Actor always starts off in the Follower State
*/
- private RaftActorBehavior currentBehavior;
+ private final DelegatingRaftActorBehavior currentBehavior = new DelegatingRaftActorBehavior();
/**
* This context should NOT be passed directly to any other actor it is
*/
private final RaftActorContextImpl context;
- /**
- * The in-memory journal
- */
- private ReplicatedLogImpl replicatedLog = new ReplicatedLogImpl();
+ private final DelegatingPersistentDataProvider delegatingPersistenceProvider = new DelegatingPersistentDataProvider(null);
- private CaptureSnapshot captureSnapshot = null;
+ private RaftActorRecoverySupport raftRecovery;
- private Stopwatch recoveryTimer;
+ private RaftActorSnapshotMessageSupport snapshotSupport;
- private int currentRecoveryBatchCount;
+ private final BehaviorStateHolder reusableBehaviorStateHolder = new BehaviorStateHolder();
public RaftActor(String id, Map<String, String> peerAddresses) {
this(id, peerAddresses, Optional.<ConfigParams>absent());
Optional<ConfigParams> configParams) {
context = new RaftActorContextImpl(this.getSelf(),
- this.getContext(), id, new ElectionTermImpl(),
- -1, -1, replicatedLog, peerAddresses,
+ this.getContext(), id, new ElectionTermImpl(delegatingPersistenceProvider, id, LOG),
+ -1, -1, peerAddresses,
(configParams.isPresent() ? configParams.get(): new DefaultConfigParamsImpl()),
- LOG);
- }
+ delegatingPersistenceProvider, LOG);
- private void initRecoveryTimer() {
- if(recoveryTimer == null) {
- recoveryTimer = Stopwatch.createStarted();
- }
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, currentBehavior));
}
@Override
context.getConfigParams().getJournalRecoveryLogBatchSize());
super.preStart();
+
+ snapshotSupport = newRaftActorSnapshotMessageSupport();
}
@Override
public void postStop() {
- if(currentBehavior != null) {
+ if(currentBehavior.getDelegate() != null) {
try {
currentBehavior.close();
} catch (Exception e) {
@Override
public void handleRecover(Object message) {
- if(persistence().isRecoveryApplicable()) {
- if (message instanceof SnapshotOffer) {
- onRecoveredSnapshot((SnapshotOffer) message);
- } else if (message instanceof ReplicatedLogEntry) {
- onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
- } else if (message instanceof ApplyLogEntries) {
- // Handle this message for backwards compatibility with pre-Lithium versions.
- onRecoveredApplyLogEntries(((ApplyLogEntries) message).getToIndex());
- } else if (message instanceof ApplyJournalEntries) {
- onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
- } else if (message instanceof DeleteEntries) {
- replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
- } else if (message instanceof RecoveryCompleted) {
- onRecoveryCompletedMessage();
- }
- } else {
- if (message instanceof RecoveryCompleted) {
+ if(raftRecovery == null) {
+ raftRecovery = newRaftActorRecoverySupport();
+ }
+
+ boolean recoveryComplete = raftRecovery.handleRecoveryMessage(message);
+ if(recoveryComplete) {
+ if(!persistence().isRecoveryApplicable()) {
// Delete all the messages from the akka journal so that we do not end up with consistency issues
// Note I am not using the dataPersistenceProvider and directly using the akka api here
deleteMessages(lastSequenceNr());
// Delete all the akka snapshots as they will not be needed
deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), scala.Long.MaxValue()));
-
- onRecoveryComplete();
-
- initializeBehavior();
}
- }
- }
-
- private void onRecoveredSnapshot(SnapshotOffer offer) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: SnapshotOffer called..", persistenceId());
- }
-
- initRecoveryTimer();
-
- Snapshot snapshot = (Snapshot) offer.snapshot();
-
- // Create a replicated log with the snapshot information
- // The replicated log can be used later on to retrieve this snapshot
- // when we need to install it on a peer
- replicatedLog = new ReplicatedLogImpl(snapshot);
-
- context.setReplicatedLog(replicatedLog);
- context.setLastApplied(snapshot.getLastAppliedIndex());
- context.setCommitIndex(snapshot.getLastAppliedIndex());
-
- Stopwatch timer = Stopwatch.createStarted();
-
- // Apply the snapshot to the actors state
- applyRecoverySnapshot(snapshot.getState());
-
- timer.stop();
- LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
- replicatedLog.size(), persistenceId(), timer.toString(),
- replicatedLog.getSnapshotIndex(), replicatedLog.getSnapshotTerm());
- }
-
- private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received ReplicatedLogEntry for recovery: {}", persistenceId(), logEntry.getIndex());
- }
-
- replicatedLog.append(logEntry);
- }
-
- private void onRecoveredApplyLogEntries(long toIndex) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received ApplyLogEntries for recovery, applying to state: {} to {}",
- persistenceId(), context.getLastApplied() + 1, toIndex);
- }
-
- for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
- batchRecoveredLogEntry(replicatedLog.get(i));
- }
-
- context.setLastApplied(toIndex);
- context.setCommitIndex(toIndex);
- }
- private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
- initRecoveryTimer();
+ onRecoveryComplete();
- int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
- if(currentRecoveryBatchCount == 0) {
- startLogRecoveryBatch(batchSize);
- }
-
- appendRecoveredLogEntry(logEntry.getData());
+ initializeBehavior();
- if(++currentRecoveryBatchCount >= batchSize) {
- endCurrentLogRecoveryBatch();
+ raftRecovery = null;
}
}
- private void endCurrentLogRecoveryBatch() {
- applyCurrentLogRecoveryBatch();
- currentRecoveryBatchCount = 0;
- }
-
- private void onRecoveryCompletedMessage() {
- if(currentRecoveryBatchCount > 0) {
- endCurrentLogRecoveryBatch();
- }
-
- onRecoveryComplete();
-
- String recoveryTime = "";
- if(recoveryTimer != null) {
- recoveryTimer.stop();
- recoveryTime = " in " + recoveryTimer.toString();
- recoveryTimer = null;
- }
-
- LOG.info(
- "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog.lastIndex(), replicatedLog.getSnapshotIndex(),
- replicatedLog.getSnapshotTerm(), replicatedLog.size());
-
- initializeBehavior();
+ protected RaftActorRecoverySupport newRaftActorRecoverySupport() {
+ return new RaftActorRecoverySupport(context, currentBehavior, getRaftActorRecoveryCohort());
}
protected void initializeBehavior(){
}
protected void changeCurrentBehavior(RaftActorBehavior newBehavior){
- RaftActorBehavior oldBehavior = currentBehavior;
- currentBehavior = newBehavior;
- handleBehaviorChange(oldBehavior, currentBehavior);
+ reusableBehaviorStateHolder.init(getCurrentBehavior());
+ setCurrentBehavior(newBehavior);
+ handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
}
- @Override public void handleCommand(Object message) {
+ @Override
+ public void handleCommand(Object message) {
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
LOG.debug("{}: Persisting ApplyLogEntries with index={}", persistenceId(), applyEntries.getToIndex());
}
- persistence().persist(applyEntries, APPLY_JOURNAL_ENTRIES_PERSIST_CALLBACK);
-
- } else if(message instanceof ApplySnapshot ) {
- Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: ApplySnapshot called on Follower Actor " +
- "snapshotIndex:{}, snapshotTerm:{}", persistenceId(), snapshot.getLastAppliedIndex(),
- snapshot.getLastAppliedTerm()
- );
- }
-
- applySnapshot(snapshot.getState());
-
- //clears the followers log, sets the snapshot index to ensure adjusted-index works
- replicatedLog = new ReplicatedLogImpl(snapshot);
- context.setReplicatedLog(replicatedLog);
- context.setLastApplied(snapshot.getLastAppliedIndex());
+ persistence().persist(applyEntries, NoopProcedure.instance());
} else if (message instanceof FindLeader) {
getSender().tell(
new FindLeaderReply(getLeaderAddress()),
getSelf()
);
-
- } else if (message instanceof SaveSnapshotSuccess) {
- SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
- LOG.info("{}: SaveSnapshotSuccess received for snapshot", persistenceId());
-
- long sequenceNumber = success.metadata().sequenceNr();
-
- commitSnapshot(sequenceNumber);
-
- } else if (message instanceof SaveSnapshotFailure) {
- SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
-
- LOG.error("{}: SaveSnapshotFailure received for snapshot Cause:",
- persistenceId(), saveSnapshotFailure.cause());
-
- context.getReplicatedLog().snapshotRollback();
-
- LOG.info("{}: Replicated Log rollbacked. Snapshot will be attempted in the next cycle." +
- "snapshotIndex:{}, snapshotTerm:{}, log-size:{}", persistenceId(),
- context.getReplicatedLog().getSnapshotIndex(),
- context.getReplicatedLog().getSnapshotTerm(),
- context.getReplicatedLog().size());
-
- } else if (message instanceof CaptureSnapshot) {
- LOG.debug("{}: CaptureSnapshot received by actor: {}", persistenceId(), message);
-
- if(captureSnapshot == null) {
- captureSnapshot = (CaptureSnapshot)message;
- createSnapshot();
+ } else if(message instanceof GetOnDemandRaftState) {
+ onGetOnDemandRaftStats();
+ } else if(!snapshotSupport.handleSnapshotMessage(message)) {
+ reusableBehaviorStateHolder.init(getCurrentBehavior());
+
+ setCurrentBehavior(currentBehavior.handleMessage(getSender(), message));
+
+ handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
+ }
+ }
+
+ protected RaftActorSnapshotMessageSupport newRaftActorSnapshotMessageSupport() {
+ return new RaftActorSnapshotMessageSupport(context, currentBehavior,
+ getRaftActorSnapshotCohort());
+ }
+
+ private void onGetOnDemandRaftStats() {
+ // Debugging message to retrieve raft stats.
+
+ OnDemandRaftState.Builder builder = OnDemandRaftState.builder()
+ .commitIndex(context.getCommitIndex())
+ .currentTerm(context.getTermInformation().getCurrentTerm())
+ .inMemoryJournalDataSize(replicatedLog().dataSize())
+ .inMemoryJournalLogSize(replicatedLog().size())
+ .isSnapshotCaptureInitiated(context.getSnapshotManager().isCapturing())
+ .lastApplied(context.getLastApplied())
+ .lastIndex(replicatedLog().lastIndex())
+ .lastTerm(replicatedLog().lastTerm())
+ .leader(getLeaderId())
+ .raftState(currentBehavior.state().toString())
+ .replicatedToAllIndex(currentBehavior.getReplicatedToAllIndex())
+ .snapshotIndex(replicatedLog().getSnapshotIndex())
+ .snapshotTerm(replicatedLog().getSnapshotTerm())
+ .votedFor(context.getTermInformation().getVotedFor())
+ .peerAddresses(new HashMap<>(context.getPeerAddresses()));
+
+ ReplicatedLogEntry lastLogEntry = getLastLogEntry();
+ if (lastLogEntry != null) {
+ builder.lastLogIndex(lastLogEntry.getIndex());
+ builder.lastLogTerm(lastLogEntry.getTerm());
+ }
+
+ if(getCurrentBehavior() instanceof AbstractLeader) {
+ AbstractLeader leader = (AbstractLeader)getCurrentBehavior();
+ Collection<String> followerIds = leader.getFollowerIds();
+ List<FollowerInfo> followerInfoList = Lists.newArrayListWithCapacity(followerIds.size());
+ for(String id: followerIds) {
+ final FollowerLogInformation info = leader.getFollower(id);
+ followerInfoList.add(new FollowerInfo(id, info.getNextIndex(), info.getMatchIndex(),
+ info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(info.timeSinceLastActivity())));
}
- } else if (message instanceof CaptureSnapshotReply){
- handleCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
+ builder.followerInfoList(followerInfoList);
+ }
- } else {
- RaftActorBehavior oldBehavior = currentBehavior;
- currentBehavior = currentBehavior.handleMessage(getSender(), message);
+ sender().tell(builder.build(), self());
- handleBehaviorChange(oldBehavior, currentBehavior);
- }
}
- private void handleBehaviorChange(RaftActorBehavior oldBehavior, RaftActorBehavior currentBehavior) {
+ private void handleBehaviorChange(BehaviorStateHolder oldBehaviorState, RaftActorBehavior currentBehavior) {
+ RaftActorBehavior oldBehavior = oldBehaviorState.getBehavior();
+
if (oldBehavior != currentBehavior){
onStateChanged();
}
- String oldBehaviorLeaderId = oldBehavior == null? null : oldBehavior.getLeaderId();
- String oldBehaviorState = oldBehavior == null? null : oldBehavior.state().name();
+ String oldBehaviorLeaderId = oldBehavior == null ? null : oldBehaviorState.getLeaderId();
+ String oldBehaviorStateName = oldBehavior == null ? null : oldBehavior.state().name();
// it can happen that the state has not changed but the leader has changed.
- onLeaderChanged(oldBehaviorLeaderId, currentBehavior.getLeaderId());
+ Optional<ActorRef> roleChangeNotifier = getRoleChangeNotifier();
+ if(!Objects.equal(oldBehaviorLeaderId, currentBehavior.getLeaderId())) {
+ if(roleChangeNotifier.isPresent()) {
+ roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId()), getSelf());
+ }
+
+ onLeaderChanged(oldBehaviorLeaderId, currentBehavior.getLeaderId());
+ }
- if (getRoleChangeNotifier().isPresent() &&
+ if (roleChangeNotifier.isPresent() &&
(oldBehavior == null || (oldBehavior.state() != currentBehavior.state()))) {
- getRoleChangeNotifier().get().tell(
- new RoleChanged(getId(), oldBehaviorState , currentBehavior.state().name()),
- getSelf());
+ roleChangeNotifier.get().tell(new RoleChanged(getId(), oldBehaviorStateName ,
+ currentBehavior.state().name()), getSelf());
}
}
+ protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId) {
+ return new LeaderStateChanged(memberId, leaderId);
+ }
+
/**
* When a derived RaftActor needs to persist something it must call
* persistData.
final RaftActorContext raftContext = getRaftActorContext();
- replicatedLog
- .appendAndPersist(replicatedLogEntry, new Procedure<ReplicatedLogEntry>() {
- @Override
- public void apply(ReplicatedLogEntry replicatedLogEntry) throws Exception {
- if(!hasFollowers()){
- // Increment the Commit Index and the Last Applied values
- raftContext.setCommitIndex(replicatedLogEntry.getIndex());
- raftContext.setLastApplied(replicatedLogEntry.getIndex());
-
- // Apply the state immediately
- applyState(clientActor, identifier, data);
-
- // Send a ApplyJournalEntries message so that we write the fact that we applied
- // the state to durable storage
- self().tell(new ApplyJournalEntries(replicatedLogEntry.getIndex()), self());
-
- // Check if the "real" snapshot capture has been initiated. If no then do the fake snapshot
- if(!context.isSnapshotCaptureInitiated()){
- raftContext.getReplicatedLog().snapshotPreCommit(raftContext.getLastApplied(),
- raftContext.getTermInformation().getCurrentTerm());
- raftContext.getReplicatedLog().snapshotCommit();
- } else {
- LOG.debug("{}: Skipping fake snapshotting for {} because real snapshotting is in progress",
- persistenceId(), getId());
- }
- } else if (clientActor != null) {
- // Send message for replication
- currentBehavior.handleMessage(getSelf(),
- new Replicate(clientActor, identifier,
- replicatedLogEntry)
- );
- }
-
- }
- }); }
+ replicatedLog().appendAndPersist(replicatedLogEntry, new Procedure<ReplicatedLogEntry>() {
+ @Override
+ public void apply(ReplicatedLogEntry replicatedLogEntry) throws Exception {
+ if(!hasFollowers()){
+ // Increment the Commit Index and the Last Applied values
+ raftContext.setCommitIndex(replicatedLogEntry.getIndex());
+ raftContext.setLastApplied(replicatedLogEntry.getIndex());
+
+ // Apply the state immediately
+ applyState(clientActor, identifier, data);
+
+ // Send a ApplyJournalEntries message so that we write the fact that we applied
+ // the state to durable storage
+ self().tell(new ApplyJournalEntries(replicatedLogEntry.getIndex()), self());
+
+ context.getSnapshotManager().trimLog(context.getLastApplied(), currentBehavior);
+
+ } else if (clientActor != null) {
+ // Send message for replication
+ currentBehavior.handleMessage(getSelf(),
+ new Replicate(clientActor, identifier, replicatedLogEntry));
+ }
+ }
+ });
+ }
+
+ private ReplicatedLog replicatedLog() {
+ return context.getReplicatedLog();
+ }
protected String getId() {
return context.getId();
}
+ @VisibleForTesting
+ void setCurrentBehavior(RaftActorBehavior behavior) {
+ currentBehavior.setDelegate(behavior);
+ }
+
+ protected RaftActorBehavior getCurrentBehavior() {
+ return currentBehavior.getDelegate();
+ }
+
/**
* Derived actors can call the isLeader method to check if the current
* RaftActor is the Leader or not
}
protected ReplicatedLogEntry getLastLogEntry() {
- return replicatedLog.last();
+ return replicatedLog().last();
}
protected Long getCurrentTerm(){
context.setConfigParams(configParams);
}
+ public final DataPersistenceProvider persistence() {
+ return delegatingPersistenceProvider.getDelegate();
+ }
+
+ public void setPersistence(DataPersistenceProvider provider) {
+ delegatingPersistenceProvider.setDelegate(provider);
+ }
+
+ protected void setPersistence(boolean persistent) {
+ if(persistent) {
+ setPersistence(new PersistentDataProvider(this));
+ } else {
+ setPersistence(new NonPersistentDataProvider() {
+ /**
+ * The way snapshotting works is,
+ * <ol>
+ * <li> RaftActor calls createSnapshot on the Shard
+ * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
+ * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save
+ * the snapshot. The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the
+ * RaftActor gets SaveSnapshot success it commits the snapshot to the in-memory journal. This
+ * commitSnapshot is mimicking what is done in SaveSnapshotSuccess.
+ * </ol>
+ */
+ @Override
+ public void saveSnapshot(Object o) {
+ // Make saving Snapshot successful
+ // Committing the snapshot here would end up calling commit in the creating state which would
+ // be a state violation. That's why now we send a message to commit the snapshot.
+ self().tell(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT, self());
+ }
+ });
+ }
+ }
+
/**
* setPeerAddress sets the address of a known peer at a later time.
* <p>
context.setPeerAddress(peerId, peerAddress);
}
- protected void commitSnapshot(long sequenceNumber) {
- context.getReplicatedLog().snapshotCommit();
-
- // TODO: Not sure if we want to be this aggressive with trimming stuff
- trimPersistentData(sequenceNumber);
- }
-
/**
* The applyState method will be called by the RaftActor when some data
* needs to be applied to the actor's state
Object data);
/**
- * This method is called during recovery at the start of a batch of state entries. Derived
- * classes should perform any initialization needed to start a batch.
+ * Returns the RaftActorRecoveryCohort to participate in persistence recovery.
*/
- protected abstract void startLogRecoveryBatch(int maxBatchSize);
-
- /**
- * This method is called during recovery to append state data to the current batch. This method
- * is called 1 or more times after {@link #startLogRecoveryBatch}.
- *
- * @param data the state data
- */
- protected abstract void appendRecoveredLogEntry(Payload data);
-
- /**
- * This method is called during recovery to reconstruct the state of the actor.
- *
- * @param snapshotBytes A snapshot of the state of the actor
- */
- protected abstract void applyRecoverySnapshot(byte[] snapshotBytes);
-
- /**
- * This method is called during recovery at the end of a batch to apply the current batched
- * log entries. This method is called after {@link #appendRecoveredLogEntry}.
- */
- protected abstract void applyCurrentLogRecoveryBatch();
+ @Nonnull
+ protected abstract RaftActorRecoveryCohort getRaftActorRecoveryCohort();
/**
* This method is called when recovery is complete.
protected abstract void onRecoveryComplete();
/**
- * This method will be called by the RaftActor when a snapshot needs to be
- * created. The derived actor should respond with its current state.
- * <p/>
- * During recovery the state that is returned by the derived actor will
- * be passed back to it by calling the applySnapshot method
- *
- * @return The current state of the actor
- */
- protected abstract void createSnapshot();
-
- /**
- * This method can be called at any other point during normal
- * operations when the derived actor is out of sync with it's peers
- * and the only way to bring it in sync is by applying a snapshot
- *
- * @param snapshotBytes A snapshot of the state of the actor
+ * Returns the RaftActorSnapshotCohort to participate in persistence recovery.
*/
- protected abstract void applySnapshot(byte[] snapshotBytes);
+ @Nonnull
+ protected abstract RaftActorSnapshotCohort getRaftActorSnapshotCohort();
/**
* This method will be called by the RaftActor when the state of the
*/
protected abstract void onStateChanged();
- protected abstract DataPersistenceProvider persistence();
-
/**
* Notifier Actor for this RaftActor to notify when a role change happens
* @return ActorRef - ActorRef of the notifier or Optional.absent if none.
protected void onLeaderChanged(String oldLeader, String newLeader){};
- private void trimPersistentData(long sequenceNumber) {
- // Trim akka snapshots
- // FIXME : Not sure how exactly the SnapshotSelectionCriteria is applied
- // For now guessing that it is ANDed.
- persistence().deleteSnapshots(new SnapshotSelectionCriteria(
- sequenceNumber - context.getConfigParams().getSnapshotBatchCount(), 43200000));
-
- // Trim akka journal
- persistence().deleteMessages(sequenceNumber);
- }
-
private String getLeaderAddress(){
if(isLeader()){
return getSelf().path().toString();
return peerAddress;
}
- private void handleCaptureSnapshotReply(byte[] snapshotBytes) {
- LOG.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", persistenceId(), snapshotBytes.length);
-
- // create a snapshot object from the state provided and save it
- // when snapshot is saved async, SaveSnapshotSuccess is raised.
-
- Snapshot sn = Snapshot.create(snapshotBytes,
- context.getReplicatedLog().getFrom(captureSnapshot.getLastAppliedIndex() + 1),
- captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
- captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
-
- persistence().saveSnapshot(sn);
-
- LOG.info("{}: Persisting of snapshot done:{}", persistenceId(), sn.getLogMessage());
-
- long dataThreshold = Runtime.getRuntime().totalMemory() *
- getRaftActorContext().getConfigParams().getSnapshotDataThresholdPercentage() / 100;
- if (context.getReplicatedLog().dataSize() > dataThreshold) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: dataSize {} exceeds dataThreshold {} - doing snapshotPreCommit with index {}",
- persistenceId(), context.getReplicatedLog().dataSize(), dataThreshold,
- captureSnapshot.getLastAppliedIndex());
- }
-
- // if memory is less, clear the log based on lastApplied.
- // this could/should only happen if one of the followers is down
- // as normally we keep removing from the log when its replicated to all.
- context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getLastAppliedIndex(),
- captureSnapshot.getLastAppliedTerm());
-
- // Don't reset replicatedToAllIndex to -1 as this may prevent us from trimming the log after an
- // install snapshot to a follower.
- if(captureSnapshot.getReplicatedToAllIndex() >= 0) {
- getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
- }
- } else if(captureSnapshot.getReplicatedToAllIndex() != -1){
- // clear the log based on replicatedToAllIndex
- context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getReplicatedToAllIndex(),
- captureSnapshot.getReplicatedToAllTerm());
-
- getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
- } else {
- // The replicatedToAllIndex was not found in the log
- // This means that replicatedToAllIndex never moved beyond -1 or that it is already in the snapshot.
- // In this scenario we may need to save the snapshot to the akka persistence
- // snapshot for recovery but we do not need to do the replicated log trimming.
- context.getReplicatedLog().snapshotPreCommit(replicatedLog.getSnapshotIndex(),
- replicatedLog.getSnapshotTerm());
- }
-
-
- LOG.info("{}: Removed in-memory snapshotted entries, adjusted snaphsotIndex: {} " +
- "and term: {}", persistenceId(), replicatedLog.getSnapshotIndex(),
- replicatedLog.getSnapshotTerm());
-
- if (isLeader() && captureSnapshot.isInstallSnapshotInitiated()) {
- // this would be call straight to the leader and won't initiate in serialization
- currentBehavior.handleMessage(getSelf(), new SendInstallSnapshot(
- ByteString.copyFrom(snapshotBytes)));
- }
-
- captureSnapshot = null;
- context.setSnapshotCaptureInitiated(false);
- }
-
protected boolean hasFollowers(){
- return getRaftActorContext().getPeerAddresses().keySet().size() > 0;
- }
-
- private class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
-
- private static final int DATA_SIZE_DIVIDER = 5;
- private long dataSizeSinceLastSnapshot = 0;
-
- public ReplicatedLogImpl(Snapshot snapshot) {
- super(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
- snapshot.getUnAppliedEntries());
- }
-
- public ReplicatedLogImpl() {
- super();
- }
-
- @Override public void removeFromAndPersist(long logEntryIndex) {
- int adjustedIndex = adjustedIndex(logEntryIndex);
-
- if (adjustedIndex < 0) {
- return;
- }
-
- // FIXME: Maybe this should be done after the command is saved
- journal.subList(adjustedIndex , journal.size()).clear();
-
- persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>() {
-
- @Override
- public void apply(DeleteEntries param)
- throws Exception {
- //FIXME : Doing nothing for now
- dataSize = 0;
- for (ReplicatedLogEntry entry : journal) {
- dataSize += entry.size();
- }
- }
- });
- }
-
- @Override public void appendAndPersist(
- final ReplicatedLogEntry replicatedLogEntry) {
- appendAndPersist(replicatedLogEntry, null);
- }
-
- public void appendAndPersist(
- final ReplicatedLogEntry replicatedLogEntry,
- final Procedure<ReplicatedLogEntry> callback) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Append log entry and persist {} ", persistenceId(), replicatedLogEntry);
- }
-
- // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
- journal.add(replicatedLogEntry);
-
- // When persisting events with persist it is guaranteed that the
- // persistent actor will not receive further commands between the
- // persist call and the execution(s) of the associated event
- // handler. This also holds for multiple persist calls in context
- // of a single command.
- persistence().persist(replicatedLogEntry,
- new Procedure<ReplicatedLogEntry>() {
- @Override
- public void apply(ReplicatedLogEntry evt) throws Exception {
- int logEntrySize = replicatedLogEntry.size();
-
- dataSize += logEntrySize;
- long dataSizeForCheck = dataSize;
-
- dataSizeSinceLastSnapshot += logEntrySize;
- long journalSize = lastIndex() + 1;
-
- if(!hasFollowers()) {
- // When we do not have followers we do not maintain an in-memory log
- // due to this the journalSize will never become anything close to the
- // snapshot batch count. In fact will mostly be 1.
- // Similarly since the journal's dataSize depends on the entries in the
- // journal the journal's dataSize will never reach a value close to the
- // memory threshold.
- // By maintaining the dataSize outside the journal we are tracking essentially
- // what we have written to the disk however since we no longer are in
- // need of doing a snapshot just for the sake of freeing up memory we adjust
- // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often
- // as if we were maintaining a real snapshot
- dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER;
- }
-
- long dataThreshold = Runtime.getRuntime().totalMemory() *
- getRaftActorContext().getConfigParams().getSnapshotDataThresholdPercentage() / 100;
-
- // when a snaphsot is being taken, captureSnapshot != null
- if (!context.isSnapshotCaptureInitiated() &&
- ( journalSize % context.getConfigParams().getSnapshotBatchCount() == 0 ||
- dataSizeForCheck > dataThreshold)) {
-
- dataSizeSinceLastSnapshot = 0;
-
- LOG.info("{}: Initiating Snapshot Capture, journalSize = {}, dataSizeForCheck = {}," +
- " dataThreshold = {}", persistenceId(), journalSize, dataSizeForCheck, dataThreshold);
-
- long lastAppliedIndex = -1;
- long lastAppliedTerm = -1;
-
- ReplicatedLogEntry lastAppliedEntry = get(context.getLastApplied());
- if (!hasFollowers()) {
- lastAppliedIndex = replicatedLogEntry.getIndex();
- lastAppliedTerm = replicatedLogEntry.getTerm();
- } else if (lastAppliedEntry != null) {
- lastAppliedIndex = lastAppliedEntry.getIndex();
- lastAppliedTerm = lastAppliedEntry.getTerm();
- }
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Snapshot Capture logSize: {}", persistenceId(), journal.size());
- LOG.debug("{}: Snapshot Capture lastApplied:{} ",
- persistenceId(), context.getLastApplied());
- LOG.debug("{}: Snapshot Capture lastAppliedIndex:{}", persistenceId(),
- lastAppliedIndex);
- LOG.debug("{}: Snapshot Capture lastAppliedTerm:{}", persistenceId(),
- lastAppliedTerm);
- }
-
- // send a CaptureSnapshot to self to make the expensive operation async.
- long replicatedToAllIndex = getCurrentBehavior().getReplicatedToAllIndex();
- ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
- getSelf().tell(new CaptureSnapshot(lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
- (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
- (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1)),
- null);
- context.setSnapshotCaptureInitiated(true);
- }
- if (callback != null){
- callback.apply(replicatedLogEntry);
- }
- }
- }
- );
- }
-
+ return getRaftActorContext().hasFollowers();
}
+ /**
+ * @deprecated Deprecated in favor of {@link org.opendaylight.controller.cluster.raft.base.messages.DeleteEntriesTest}
+ * whose type for fromIndex is long instead of int. This class was kept for backwards
+ * compatibility with Helium.
+ */
+ @Deprecated
static class DeleteEntries implements Serializable {
private static final long serialVersionUID = 1L;
private final int fromIndex;
}
}
-
- private class ElectionTermImpl implements ElectionTerm {
- /**
- * Identifier of the actor whose election term information this is
- */
- private long currentTerm = 0;
- private String votedFor = null;
-
- @Override
- public long getCurrentTerm() {
- return currentTerm;
- }
-
- @Override
- public String getVotedFor() {
- return votedFor;
- }
-
- @Override public void update(long currentTerm, String votedFor) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Set currentTerm={}, votedFor={}", persistenceId(), currentTerm, votedFor);
- }
- this.currentTerm = currentTerm;
- this.votedFor = votedFor;
- }
-
- @Override
- public void updateAndPersist(long currentTerm, String votedFor){
- update(currentTerm, votedFor);
- // FIXME : Maybe first persist then update the state
- persistence().persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
-
- @Override public void apply(UpdateElectionTerm param)
- throws Exception {
-
- }
- });
- }
- }
-
static class UpdateElectionTerm implements Serializable {
private static final long serialVersionUID = 1L;
private final long currentTerm;
}
}
- protected class NonPersistentRaftDataProvider extends NonPersistentDataProvider {
-
- public NonPersistentRaftDataProvider(){
+ private static class BehaviorStateHolder {
+ private RaftActorBehavior behavior;
+ private String leaderId;
+ void init(RaftActorBehavior behavior) {
+ this.behavior = behavior;
+ this.leaderId = behavior != null ? behavior.getLeaderId() : null;
}
- /**
- * The way snapshotting works is,
- * <ol>
- * <li> RaftActor calls createSnapshot on the Shard
- * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
- * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save the snapshot.
- * The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the RaftActor gets SaveSnapshot
- * success it commits the snapshot to the in-memory journal. This commitSnapshot is mimicking what is done
- * in SaveSnapshotSuccess.
- * </ol>
- * @param o
- */
- @Override
- public void saveSnapshot(Object o) {
- // Make saving Snapshot successful
- commitSnapshot(-1L);
+ RaftActorBehavior getBehavior() {
+ return behavior;
}
- }
- @VisibleForTesting
- void setCurrentBehavior(AbstractRaftActorBehavior behavior) {
- currentBehavior = behavior;
- }
-
- protected RaftActorBehavior getCurrentBehavior() {
- return currentBehavior;
+ String getLeaderId() {
+ return leaderId;
+ }
}
-
}
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Supplier;
import java.util.Map;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.slf4j.Logger;
/**
*/
ConfigParams getConfigParams();
- void setSnapshotCaptureInitiated(boolean snapshotCaptureInitiated);
+ SnapshotManager getSnapshotManager();
- boolean isSnapshotCaptureInitiated();
+ DataPersistenceProvider getPersistenceProvider();
+
+ boolean hasFollowers();
+
+ long getTotalMemory();
+
+ @VisibleForTesting
+ void setTotalMemoryRetriever(Supplier<Long> retriever);
}
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.UntypedActorContext;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Supplier;
import java.util.Map;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.slf4j.Logger;
public class RaftActorContextImpl implements RaftActorContext {
private ConfigParams configParams;
- private boolean snapshotCaptureInitiated;
+ @VisibleForTesting
+ private Supplier<Long> totalMemoryRetriever;
- public RaftActorContextImpl(ActorRef actor, UntypedActorContext context,
- String id,
- ElectionTerm termInformation, long commitIndex,
- long lastApplied, ReplicatedLog replicatedLog,
- Map<String, String> peerAddresses, ConfigParams configParams,
- Logger logger) {
+ // Snapshot manager will need to be created on demand as it needs raft actor context which cannot
+ // be passed to it in the constructor
+ private SnapshotManager snapshotManager;
+
+ private final DataPersistenceProvider persistenceProvider;
+
+ public RaftActorContextImpl(ActorRef actor, UntypedActorContext context, String id,
+ ElectionTerm termInformation, long commitIndex, long lastApplied, Map<String, String> peerAddresses,
+ ConfigParams configParams, DataPersistenceProvider persistenceProvider, Logger logger) {
this.actor = actor;
this.context = context;
this.id = id;
this.termInformation = termInformation;
this.commitIndex = commitIndex;
this.lastApplied = lastApplied;
- this.replicatedLog = replicatedLog;
this.peerAddresses = peerAddresses;
this.configParams = configParams;
+ this.persistenceProvider = persistenceProvider;
this.LOG = logger;
}
return configParams;
}
- @Override
- public void setSnapshotCaptureInitiated(boolean snapshotCaptureInitiated) {
- this.snapshotCaptureInitiated = snapshotCaptureInitiated;
- }
-
- @Override
- public boolean isSnapshotCaptureInitiated() {
- return snapshotCaptureInitiated;
- }
-
@Override public void addToPeers(String name, String address) {
peerAddresses.put(name, address);
}
peerAddresses.put(peerId, peerAddress);
}
+
+ @Override
+ public SnapshotManager getSnapshotManager() {
+ if(snapshotManager == null){
+ snapshotManager = new SnapshotManager(this, LOG);
+ }
+ return snapshotManager;
+ }
+
+ @Override
+ public long getTotalMemory() {
+ return totalMemoryRetriever != null ? totalMemoryRetriever.get() : Runtime.getRuntime().totalMemory();
+ }
+
+ @Override
+ public void setTotalMemoryRetriever(Supplier<Long> retriever) {
+ totalMemoryRetriever = retriever;
+ }
+
+ @Override
+ public boolean hasFollowers() {
+ return getPeerAddresses().keySet().size() > 0;
+ }
+
+ @Override
+ public DataPersistenceProvider getPersistenceProvider() {
+ return persistenceProvider;
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+
+/**
+ * Interface for a class that participates in raft actor persistence recovery.
+ *
+ * @author Thomas Pantelis
+ */
+public interface RaftActorRecoveryCohort {
+
+ /**
+ * This method is called during recovery at the start of a batch of state entries. Derived
+ * classes should perform any initialization needed to start a batch.
+ */
+ void startLogRecoveryBatch(int maxBatchSize);
+
+ /**
+ * This method is called during recovery to append state data to the current batch. This method
+ * is called 1 or more times after {@link #startLogRecoveryBatch}.
+ *
+ * @param data the state data
+ */
+ void appendRecoveredLogEntry(Payload data);
+
+ /**
+ * This method is called during recovery to reconstruct the state of the actor.
+ *
+ * @param snapshotBytes A snapshot of the state of the actor
+ */
+ void applyRecoverySnapshot(byte[] snapshotBytes);
+
+ /**
+ * This method is called during recovery at the end of a batch to apply the current batched
+ * log entries. This method is called after {@link #appendRecoveredLogEntry}.
+ */
+ void applyCurrentLogRecoveryBatch();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotOffer;
+import com.google.common.base.Stopwatch;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+/**
+ * Support class that handles persistence recovery for a RaftActor.
+ *
+ * @author Thomas Pantelis
+ */
+class RaftActorRecoverySupport {
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+ private final RaftActorRecoveryCohort cohort;
+
+ private int currentRecoveryBatchCount;
+
+ private Stopwatch recoveryTimer;
+ private final Logger log;
+
+ RaftActorRecoverySupport(RaftActorContext context, RaftActorBehavior currentBehavior,
+ RaftActorRecoveryCohort cohort) {
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ this.cohort = cohort;
+ this.log = context.getLogger();
+ }
+
+ boolean handleRecoveryMessage(Object message) {
+ boolean recoveryComplete = false;
+ if(context.getPersistenceProvider().isRecoveryApplicable()) {
+ if (message instanceof SnapshotOffer) {
+ onRecoveredSnapshot((SnapshotOffer) message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
+ } else if (message instanceof ApplyLogEntries) {
+ // Handle this message for backwards compatibility with pre-Lithium versions.
+ onRecoveredApplyLogEntries(((ApplyLogEntries) message).getToIndex());
+ } else if (message instanceof ApplyJournalEntries) {
+ onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries) {
+ // Handle this message for backwards compatibility with pre-Lithium versions.
+ replicatedLog().removeFrom(((org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ recoveryComplete = true;
+ }
+ } else if (message instanceof RecoveryCompleted) {
+ recoveryComplete = true;
+ }
+
+ return recoveryComplete;
+ }
+
+ private ReplicatedLog replicatedLog() {
+ return context.getReplicatedLog();
+ }
+
+ private void initRecoveryTimer() {
+ if(recoveryTimer == null) {
+ recoveryTimer = Stopwatch.createStarted();
+ }
+ }
+
+ private void onRecoveredSnapshot(SnapshotOffer offer) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: SnapshotOffer called..", context.getId());
+ }
+
+ initRecoveryTimer();
+
+ Snapshot snapshot = (Snapshot) offer.snapshot();
+
+ // Create a replicated log with the snapshot information
+ // The replicated log can be used later on to retrieve this snapshot
+ // when we need to install it on a peer
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, currentBehavior));
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
+
+ Stopwatch timer = Stopwatch.createStarted();
+
+ // Apply the snapshot to the actors state
+ cohort.applyRecoverySnapshot(snapshot.getState());
+
+ timer.stop();
+ log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
+ context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
+ }
+
+ private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Received ReplicatedLogEntry for recovery: index: {}, size: {}", context.getId(),
+ logEntry.getIndex(), logEntry.size());
+ }
+
+ replicatedLog().append(logEntry);
+ }
+
+ private void onRecoveredApplyLogEntries(long toIndex) {
+ long lastUnappliedIndex = context.getLastApplied() + 1;
+
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Received apply journal entries for recovery, applying to state: {} to {}",
+ context.getId(), lastUnappliedIndex, toIndex);
+ }
+
+ long lastApplied = lastUnappliedIndex - 1;
+ for (long i = lastUnappliedIndex; i <= toIndex; i++) {
+ ReplicatedLogEntry logEntry = replicatedLog().get(i);
+ if(logEntry != null) {
+ lastApplied++;
+ batchRecoveredLogEntry(logEntry);
+ } else {
+ // Shouldn't happen but cover it anyway.
+ log.error("Log entry not found for index {}", i);
+ break;
+ }
+ }
+
+ context.setLastApplied(lastApplied);
+ context.setCommitIndex(lastApplied);
+ }
+
+ private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ initRecoveryTimer();
+
+ int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+ if(currentRecoveryBatchCount == 0) {
+ cohort.startLogRecoveryBatch(batchSize);
+ }
+
+ cohort.appendRecoveredLogEntry(logEntry.getData());
+
+ if(++currentRecoveryBatchCount >= batchSize) {
+ endCurrentLogRecoveryBatch();
+ }
+ }
+
+ private void endCurrentLogRecoveryBatch() {
+ cohort.applyCurrentLogRecoveryBatch();
+ currentRecoveryBatchCount = 0;
+ }
+
+ private void onRecoveryCompletedMessage() {
+ if(currentRecoveryBatchCount > 0) {
+ endCurrentLogRecoveryBatch();
+ }
+
+ String recoveryTime = "";
+ if(recoveryTimer != null) {
+ recoveryTimer.stop();
+ recoveryTime = " in " + recoveryTimer.toString();
+ recoveryTimer = null;
+ }
+
+ log.info("Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+ "Persistence Id = " + context.getId() +
+ " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, " +
+ "journal-size = {}", replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+ replicatedLog().getSnapshotTerm(), replicatedLog().size());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+
+/**
+ * Interface for a class that participates in raft actor snapshotting.
+ *
+ * @author Thomas Pantelis
+ */
+public interface RaftActorSnapshotCohort {
+
+ /**
+ * This method is called by the RaftActor when a snapshot needs to be
+ * created. The implementation should send a CaptureSnapshotReply to the given actor.
+ *
+ * @param actorRef the actor to which to respond
+ */
+ void createSnapshot(ActorRef actorRef);
+
+ /**
+ * This method is called to apply a snapshot installed by the leader.
+ *
+ * @param snapshotBytes a snapshot of the state of the actor
+ */
+ void applySnapshot(byte[] snapshotBytes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+/**
+ * Handles snapshot related messages for a RaftActor.
+ *
+ * @author Thomas Pantelis
+ */
+class RaftActorSnapshotMessageSupport {
+ static final String COMMIT_SNAPSHOT = "commit_snapshot";
+
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+ private final RaftActorSnapshotCohort cohort;
+ private final Logger log;
+
+ private final Procedure<Void> createSnapshotProcedure = new Procedure<Void>() {
+ @Override
+ public void apply(Void notUsed) throws Exception {
+ cohort.createSnapshot(context.getActor());
+ }
+ };
+
+ RaftActorSnapshotMessageSupport(RaftActorContext context, RaftActorBehavior currentBehavior,
+ RaftActorSnapshotCohort cohort) {
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ this.cohort = cohort;
+ this.log = context.getLogger();
+
+ context.getSnapshotManager().setCreateSnapshotCallable(createSnapshotProcedure);
+ }
+
+ boolean handleSnapshotMessage(Object message) {
+ if(message instanceof ApplySnapshot ) {
+ onApplySnapshot(((ApplySnapshot) message).getSnapshot());
+ return true;
+ } else if (message instanceof SaveSnapshotSuccess) {
+ onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
+ return true;
+ } else if (message instanceof SaveSnapshotFailure) {
+ onSaveSnapshotFailure((SaveSnapshotFailure) message);
+ return true;
+ } else if (message instanceof CaptureSnapshotReply) {
+ onCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
+ return true;
+ } else if (message.equals(COMMIT_SNAPSHOT)) {
+ context.getSnapshotManager().commit(-1);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private void onCaptureSnapshotReply(byte[] snapshotBytes) {
+ log.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", context.getId(), snapshotBytes.length);
+
+ context.getSnapshotManager().persist(snapshotBytes, currentBehavior, context.getTotalMemory());
+ }
+
+ private void onSaveSnapshotFailure(SaveSnapshotFailure saveSnapshotFailure) {
+ log.error("{}: SaveSnapshotFailure received for snapshot Cause:",
+ context.getId(), saveSnapshotFailure.cause());
+
+ context.getSnapshotManager().rollback();
+ }
+
+ private void onSaveSnapshotSuccess(SaveSnapshotSuccess success) {
+ log.info("{}: SaveSnapshotSuccess received for snapshot", context.getId());
+
+ long sequenceNumber = success.metadata().sequenceNr();
+
+ context.getSnapshotManager().commit(sequenceNumber);
+ }
+
+ private void onApplySnapshot(Snapshot snapshot) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: ApplySnapshot called on Follower Actor " +
+ "snapshotIndex:{}, snapshotTerm:{}", context.getId(), snapshot.getLastAppliedIndex(),
+ snapshot.getLastAppliedTerm());
+ }
+
+ cohort.applySnapshot(snapshot.getState());
+
+ //clears the followers log, sets the snapshot index to ensure adjusted-index works
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, currentBehavior));
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ }
+}
package org.opendaylight.controller.cluster.raft;
+import akka.japi.Procedure;
import java.util.List;
/**
* information
*
* @param index the index of the log entry
+ * @return the adjusted index of the first log entry removed or -1 if log entry not found.
*/
- void removeFrom(long index);
+ long removeFrom(long index);
/**
*/
void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry);
+ void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback);
+
/**
*
* @param index the index of the log entry
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+import java.util.Collections;
+import java.util.List;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+
+/**
+ * Implementation of ReplicatedLog used by the RaftActor.
+ */
+class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
+ private static final int DATA_SIZE_DIVIDER = 5;
+
+ private long dataSizeSinceLastSnapshot = 0L;
+ private final RaftActorContext context;
+ private final RaftActorBehavior currentBehavior;
+
+ private final Procedure<DeleteEntries> deleteProcedure = new Procedure<DeleteEntries>() {
+ @Override
+ public void apply(DeleteEntries notUsed) {
+ }
+ };
+
+ static ReplicatedLog newInstance(Snapshot snapshot, RaftActorContext context,
+ RaftActorBehavior currentBehavior) {
+ return new ReplicatedLogImpl(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
+ snapshot.getUnAppliedEntries(), context, currentBehavior);
+ }
+
+ static ReplicatedLog newInstance(RaftActorContext context, RaftActorBehavior currentBehavior) {
+ return new ReplicatedLogImpl(-1L, -1L, Collections.<ReplicatedLogEntry>emptyList(), context,
+ currentBehavior);
+ }
+
+ private ReplicatedLogImpl(long snapshotIndex, long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries,
+ RaftActorContext context, RaftActorBehavior currentBehavior) {
+ super(snapshotIndex, snapshotTerm, unAppliedEntries);
+ this.context = context;
+ this.currentBehavior = currentBehavior;
+ }
+
+ @Override
+ public void removeFromAndPersist(long logEntryIndex) {
+ // FIXME: Maybe this should be done after the command is saved
+ long adjustedIndex = removeFrom(logEntryIndex);
+ if(adjustedIndex >= 0) {
+ context.getPersistenceProvider().persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+ }
+ }
+
+ @Override
+ public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) {
+ appendAndPersist(replicatedLogEntry, null);
+ }
+
+ @Override
+ public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
+ final Procedure<ReplicatedLogEntry> callback) {
+
+ if(context.getLogger().isDebugEnabled()) {
+ context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry);
+ }
+
+ // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
+ append(replicatedLogEntry);
+
+ // When persisting events with persist it is guaranteed that the
+ // persistent actor will not receive further commands between the
+ // persist call and the execution(s) of the associated event
+ // handler. This also holds for multiple persist calls in context
+ // of a single command.
+ context.getPersistenceProvider().persist(replicatedLogEntry,
+ new Procedure<ReplicatedLogEntry>() {
+ @Override
+ public void apply(ReplicatedLogEntry evt) throws Exception {
+ int logEntrySize = replicatedLogEntry.size();
+
+ long dataSizeForCheck = dataSize();
+
+ dataSizeSinceLastSnapshot += logEntrySize;
+
+ if (!context.hasFollowers()) {
+ // When we do not have followers we do not maintain an in-memory log
+ // due to this the journalSize will never become anything close to the
+ // snapshot batch count. In fact will mostly be 1.
+ // Similarly since the journal's dataSize depends on the entries in the
+ // journal the journal's dataSize will never reach a value close to the
+ // memory threshold.
+ // By maintaining the dataSize outside the journal we are tracking essentially
+ // what we have written to the disk however since we no longer are in
+ // need of doing a snapshot just for the sake of freeing up memory we adjust
+ // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often
+ // as if we were maintaining a real snapshot
+ dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER;
+ }
+ long journalSize = replicatedLogEntry.getIndex() + 1;
+ long dataThreshold = context.getTotalMemory() *
+ context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
+
+ if ((journalSize % context.getConfigParams().getSnapshotBatchCount() == 0
+ || dataSizeForCheck > dataThreshold)) {
+
+ boolean started = context.getSnapshotManager().capture(replicatedLogEntry,
+ currentBehavior.getReplicatedToAllIndex());
+
+ if(started){
+ dataSizeSinceLastSnapshot = 0;
+ }
+ }
+
+ if (callback != null){
+ callback.apply(replicatedLogEntry);
+ }
+ }
+ }
+ );
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ByteString;
+import java.util.List;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+
+public class SnapshotManager implements SnapshotState {
+
+ private final SnapshotState IDLE = new Idle();
+ private final SnapshotState PERSISTING = new Persisting();
+ private final SnapshotState CREATING = new Creating();
+
+ private final Logger LOG;
+ private final RaftActorContext context;
+ private final LastAppliedTermInformationReader lastAppliedTermInformationReader =
+ new LastAppliedTermInformationReader();
+ private final ReplicatedToAllTermInformationReader replicatedToAllTermInformationReader =
+ new ReplicatedToAllTermInformationReader();
+
+
+ private SnapshotState currentState = IDLE;
+ private CaptureSnapshot captureSnapshot;
+ private long lastSequenceNumber = -1;
+
+ private Procedure<Void> createSnapshotProcedure;
+
+ public SnapshotManager(RaftActorContext context, Logger logger) {
+ this.context = context;
+ this.LOG = logger;
+ }
+
+ @Override
+ public boolean isCapturing() {
+ return currentState.isCapturing();
+ }
+
+ @Override
+ public boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower) {
+ return currentState.captureToInstall(lastLogEntry, replicatedToAllIndex, targetFollower);
+ }
+
+ @Override
+ public boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex) {
+ return currentState.capture(lastLogEntry, replicatedToAllIndex);
+ }
+
+ @Override
+ public void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory) {
+ currentState.persist(snapshotBytes, currentBehavior, totalMemory);
+ }
+
+ @Override
+ public void commit(long sequenceNumber) {
+ currentState.commit(sequenceNumber);
+ }
+
+ @Override
+ public void rollback() {
+ currentState.rollback();
+ }
+
+ @Override
+ public long trimLog(long desiredTrimIndex, RaftActorBehavior currentBehavior) {
+ return currentState.trimLog(desiredTrimIndex, currentBehavior);
+ }
+
+ public void setCreateSnapshotCallable(Procedure<Void> createSnapshotProcedure) {
+ this.createSnapshotProcedure = createSnapshotProcedure;
+ }
+
+ @VisibleForTesting
+ public CaptureSnapshot getCaptureSnapshot() {
+ return captureSnapshot;
+ }
+
+ private boolean hasFollowers(){
+ return context.getPeerAddresses().keySet().size() > 0;
+ }
+
+ private String persistenceId(){
+ return context.getId();
+ }
+
+ private class AbstractSnapshotState implements SnapshotState {
+
+ @Override
+ public boolean isCapturing() {
+ return false;
+ }
+
+ @Override
+ public boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex) {
+ LOG.debug("capture should not be called in state {}", this);
+ return false;
+ }
+
+ @Override
+ public boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower) {
+ LOG.debug("captureToInstall should not be called in state {}", this);
+ return false;
+ }
+
+ @Override
+ public void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory) {
+ LOG.debug("persist should not be called in state {}", this);
+ }
+
+ @Override
+ public void commit(long sequenceNumber) {
+ LOG.debug("commit should not be called in state {}", this);
+ }
+
+ @Override
+ public void rollback() {
+ LOG.debug("rollback should not be called in state {}", this);
+ }
+
+ @Override
+ public long trimLog(long desiredTrimIndex, RaftActorBehavior currentBehavior) {
+ LOG.debug("trimLog should not be called in state {}", this);
+ return -1;
+ }
+
+ protected long doTrimLog(long desiredTrimIndex, RaftActorBehavior currentBehavior){
+ // we would want to keep the lastApplied as its used while capturing snapshots
+ long lastApplied = context.getLastApplied();
+ long tempMin = Math.min(desiredTrimIndex, (lastApplied > -1 ? lastApplied - 1 : -1));
+
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: performSnapshotWithoutCapture: desiredTrimIndex: {}, lastApplied: {}, tempMin: {}",
+ persistenceId(), desiredTrimIndex, lastApplied, tempMin);
+ }
+
+ if (tempMin > -1 && context.getReplicatedLog().isPresent(tempMin)) {
+ LOG.debug("{}: fakeSnapshot purging log to {} for term {}", persistenceId(), tempMin,
+ context.getTermInformation().getCurrentTerm());
+
+ //use the term of the temp-min, since we check for isPresent, entry will not be null
+ ReplicatedLogEntry entry = context.getReplicatedLog().get(tempMin);
+ context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
+ context.getReplicatedLog().snapshotCommit();
+ return tempMin;
+ } else if(tempMin > currentBehavior.getReplicatedToAllIndex()) {
+ // It's possible a follower was lagging and an install snapshot advanced its match index past
+ // the current replicatedToAllIndex. Since the follower is now caught up we should advance the
+ // replicatedToAllIndex (to tempMin). The fact that tempMin wasn't found in the log is likely
+ // due to a previous snapshot triggered by the memory threshold exceeded, in that case we
+ // trim the log to the last applied index even if previous entries weren't replicated to all followers.
+ currentBehavior.setReplicatedToAllIndex(tempMin);
+ }
+ return -1;
+ }
+ }
+
+ private class Idle extends AbstractSnapshotState {
+
+ private boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower) {
+ TermInformationReader lastAppliedTermInfoReader =
+ lastAppliedTermInformationReader.init(context.getReplicatedLog(), context.getLastApplied(),
+ lastLogEntry, hasFollowers());
+
+ long lastAppliedIndex = lastAppliedTermInfoReader.getIndex();
+ long lastAppliedTerm = lastAppliedTermInfoReader.getTerm();
+
+ TermInformationReader replicatedToAllTermInfoReader =
+ replicatedToAllTermInformationReader.init(context.getReplicatedLog(), replicatedToAllIndex);
+
+ long newReplicatedToAllIndex = replicatedToAllTermInfoReader.getIndex();
+ long newReplicatedToAllTerm = replicatedToAllTermInfoReader.getTerm();
+
+ // send a CaptureSnapshot to self to make the expensive operation async.
+
+ List<ReplicatedLogEntry> unAppliedEntries = context.getReplicatedLog().getFrom(lastAppliedIndex + 1);
+
+ captureSnapshot = new CaptureSnapshot(lastLogEntry.getIndex(),
+ lastLogEntry.getTerm(), lastAppliedIndex, lastAppliedTerm,
+ newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries, targetFollower != null);
+
+ if(captureSnapshot.isInstallSnapshotInitiated()) {
+ LOG.info("{}: Initiating snapshot capture {} to install on {}",
+ persistenceId(), captureSnapshot, targetFollower);
+ } else {
+ LOG.info("{}: Initiating snapshot capture {}", persistenceId(), captureSnapshot);
+ }
+
+ lastSequenceNumber = context.getPersistenceProvider().getLastSequenceNumber();
+
+ LOG.debug("lastSequenceNumber prior to capture: {}", lastSequenceNumber);
+
+ SnapshotManager.this.currentState = CREATING;
+
+ try {
+ createSnapshotProcedure.apply(null);
+ } catch (Exception e) {
+ SnapshotManager.this.currentState = IDLE;
+ LOG.error("Error creating snapshot", e);
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex) {
+ return capture(lastLogEntry, replicatedToAllIndex, null);
+ }
+
+ @Override
+ public boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower) {
+ return capture(lastLogEntry, replicatedToAllIndex, targetFollower);
+ }
+
+ @Override
+ public String toString() {
+ return "Idle";
+ }
+
+ @Override
+ public long trimLog(long desiredTrimIndex, RaftActorBehavior currentBehavior) {
+ return doTrimLog(desiredTrimIndex, currentBehavior);
+ }
+ }
+
+ private class Creating extends AbstractSnapshotState {
+
+ @Override
+ public boolean isCapturing() {
+ return true;
+ }
+
+ @Override
+ public void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory) {
+ // create a snapshot object from the state provided and save it
+ // when snapshot is saved async, SaveSnapshotSuccess is raised.
+
+ Snapshot sn = Snapshot.create(snapshotBytes,
+ captureSnapshot.getUnAppliedEntries(),
+ captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
+ captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
+
+ context.getPersistenceProvider().saveSnapshot(sn);
+
+ LOG.info("{}: Persisting of snapshot done:{}", persistenceId(), sn.getLogMessage());
+
+ long dataThreshold = totalMemory *
+ context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
+ if (context.getReplicatedLog().dataSize() > dataThreshold) {
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{}: dataSize {} exceeds dataThreshold {} - doing snapshotPreCommit with index {}",
+ persistenceId(), context.getReplicatedLog().dataSize(), dataThreshold,
+ captureSnapshot.getLastAppliedIndex());
+ }
+
+ // if memory is less, clear the log based on lastApplied.
+ // this could/should only happen if one of the followers is down
+ // as normally we keep removing from the log when its replicated to all.
+ context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getLastAppliedIndex(),
+ captureSnapshot.getLastAppliedTerm());
+
+ // Don't reset replicatedToAllIndex to -1 as this may prevent us from trimming the log after an
+ // install snapshot to a follower.
+ if(captureSnapshot.getReplicatedToAllIndex() >= 0) {
+ currentBehavior.setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+ }
+
+ } else if(captureSnapshot.getReplicatedToAllIndex() != -1){
+ // clear the log based on replicatedToAllIndex
+ context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getReplicatedToAllIndex(),
+ captureSnapshot.getReplicatedToAllTerm());
+
+ currentBehavior.setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+ } else {
+ // The replicatedToAllIndex was not found in the log
+ // This means that replicatedToAllIndex never moved beyond -1 or that it is already in the snapshot.
+ // In this scenario we may need to save the snapshot to the akka persistence
+ // snapshot for recovery but we do not need to do the replicated log trimming.
+ context.getReplicatedLog().snapshotPreCommit(context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm());
+ }
+
+ LOG.info("{}: Removed in-memory snapshotted entries, adjusted snaphsotIndex:{} " +
+ "and term:{}", persistenceId(), captureSnapshot.getLastAppliedIndex(),
+ captureSnapshot.getLastAppliedTerm());
+
+ if (context.getId().equals(currentBehavior.getLeaderId())
+ && captureSnapshot.isInstallSnapshotInitiated()) {
+ // this would be call straight to the leader and won't initiate in serialization
+ currentBehavior.handleMessage(context.getActor(), new SendInstallSnapshot(
+ ByteString.copyFrom(snapshotBytes)));
+ }
+
+ captureSnapshot = null;
+ SnapshotManager.this.currentState = PERSISTING;
+ }
+
+ @Override
+ public String toString() {
+ return "Creating";
+ }
+
+ }
+
+ private class Persisting extends AbstractSnapshotState {
+
+ @Override
+ public void commit(long sequenceNumber) {
+ context.getReplicatedLog().snapshotCommit();
+ context.getPersistenceProvider().deleteSnapshots(new SnapshotSelectionCriteria(
+ sequenceNumber - context.getConfigParams().getSnapshotBatchCount(), 43200000));
+
+ context.getPersistenceProvider().deleteMessages(lastSequenceNumber);
+
+ lastSequenceNumber = -1;
+ SnapshotManager.this.currentState = IDLE;
+ }
+
+ @Override
+ public void rollback() {
+ context.getReplicatedLog().snapshotRollback();
+
+ LOG.info("{}: Replicated Log rolled back. Snapshot will be attempted in the next cycle." +
+ "snapshotIndex:{}, snapshotTerm:{}, log-size:{}", persistenceId(),
+ context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm(),
+ context.getReplicatedLog().size());
+
+ SnapshotManager.this.currentState = IDLE;
+ }
+
+ @Override
+ public String toString() {
+ return "Persisting";
+ }
+
+ }
+
+ private static interface TermInformationReader {
+ long getIndex();
+ long getTerm();
+ }
+
+ static class LastAppliedTermInformationReader implements TermInformationReader{
+ private long index;
+ private long term;
+
+ public LastAppliedTermInformationReader init(ReplicatedLog log, long originalIndex,
+ ReplicatedLogEntry lastLogEntry, boolean hasFollowers){
+ ReplicatedLogEntry entry = log.get(originalIndex);
+ this.index = -1L;
+ this.term = -1L;
+ if (!hasFollowers) {
+ if(lastLogEntry != null) {
+ index = lastLogEntry.getIndex();
+ term = lastLogEntry.getTerm();
+ }
+ } else if (entry != null) {
+ index = entry.getIndex();
+ term = entry.getTerm();
+ } else if(log.getSnapshotIndex() > -1){
+ index = log.getSnapshotIndex();
+ term = log.getSnapshotTerm();
+ }
+ return this;
+ }
+
+ @Override
+ public long getIndex(){
+ return this.index;
+ }
+
+ @Override
+ public long getTerm(){
+ return this.term;
+ }
+ }
+
+ private static class ReplicatedToAllTermInformationReader implements TermInformationReader{
+ private long index;
+ private long term;
+
+ ReplicatedToAllTermInformationReader init(ReplicatedLog log, long originalIndex){
+ ReplicatedLogEntry entry = log.get(originalIndex);
+ this.index = -1L;
+ this.term = -1L;
+
+ if (entry != null) {
+ index = entry.getIndex();
+ term = entry.getTerm();
+ }
+
+ return this;
+ }
+
+ @Override
+ public long getIndex(){
+ return this.index;
+ }
+
+ @Override
+ public long getTerm(){
+ return this.term;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+
+public interface SnapshotState {
+ /**
+ * Should return true when a snapshot is being captured
+ * @return
+ */
+ boolean isCapturing();
+
+ /**
+ * Initiate capture snapshot
+ *
+ * @param lastLogEntry the last entry in the replicated log
+ * @param replicatedToAllIndex the current replicatedToAllIndex
+ *
+ * @return true if capture was started
+ */
+ boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex);
+
+ /**
+ * Initiate capture snapshot for the purposing of installing that snapshot
+ *
+ * @param lastLogEntry
+ * @param replicatedToAllIndex
+ * @param targetFollower
+ *
+ * @return true if capture was started
+ */
+ boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower);
+
+ /**
+ * Persist the snapshot
+ *
+ * @param snapshotBytes
+ * @param currentBehavior
+ * @param totalMemory
+ */
+ void persist(byte[] snapshotBytes, RaftActorBehavior currentBehavior, long totalMemory);
+
+ /**
+ * Commit the snapshot by trimming the log
+ *
+ * @param sequenceNumber
+ */
+ void commit(long sequenceNumber);
+
+ /**
+ * Rollback the snapshot
+ */
+ void rollback();
+
+ /**
+ * Trim the log
+ *
+ * @param desiredTrimIndex
+ * @return the actual trim index
+ */
+ long trimLog(long desiredTrimIndex, RaftActorBehavior currentBehavior);
+}
package org.opendaylight.controller.cluster.raft.base.messages;
+import java.util.Collections;
+import java.util.List;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+
public class CaptureSnapshot {
- private long lastAppliedIndex;
- private long lastAppliedTerm;
- private long lastIndex;
- private long lastTerm;
- private boolean installSnapshotInitiated;
- private long replicatedToAllIndex;
- private long replicatedToAllTerm;
+ private final long lastAppliedIndex;
+ private final long lastAppliedTerm;
+ private final long lastIndex;
+ private final long lastTerm;
+ private final boolean installSnapshotInitiated;
+ private final long replicatedToAllIndex;
+ private final long replicatedToAllTerm;
+ private final List<ReplicatedLogEntry> unAppliedEntries;
- public CaptureSnapshot(long lastIndex, long lastTerm,
- long lastAppliedIndex, long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm) {
- this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, replicatedToAllIndex , replicatedToAllTerm, false);
+ public CaptureSnapshot(long lastIndex, long lastTerm, long lastAppliedIndex, long lastAppliedTerm,
+ long replicatedToAllIndex, long replicatedToAllTerm, List<ReplicatedLogEntry> unAppliedEntries) {
+ this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, replicatedToAllIndex, replicatedToAllTerm,
+ unAppliedEntries, false);
}
- public CaptureSnapshot(long lastIndex, long lastTerm,long lastAppliedIndex,
- long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm, boolean installSnapshotInitiated) {
+ public CaptureSnapshot(long lastIndex, long lastTerm, long lastAppliedIndex,
+ long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm,
+ List<ReplicatedLogEntry> unAppliedEntries, boolean installSnapshotInitiated) {
this.lastIndex = lastIndex;
this.lastTerm = lastTerm;
this.lastAppliedIndex = lastAppliedIndex;
this.installSnapshotInitiated = installSnapshotInitiated;
this.replicatedToAllIndex = replicatedToAllIndex;
this.replicatedToAllTerm = replicatedToAllTerm;
+ this.unAppliedEntries = unAppliedEntries != null ? unAppliedEntries : Collections.<ReplicatedLogEntry>emptyList();
}
public long getLastAppliedIndex() {
public long getReplicatedToAllTerm() {
return replicatedToAllTerm;
}
+
+ public List<ReplicatedLogEntry> getUnAppliedEntries() {
+ return unAppliedEntries;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("CaptureSnapshot [lastAppliedIndex=").append(lastAppliedIndex).append(", lastAppliedTerm=")
+ .append(lastAppliedTerm).append(", lastIndex=").append(lastIndex).append(", lastTerm=")
+ .append(lastTerm).append(", installSnapshotInitiated=").append(installSnapshotInitiated)
+ .append(", replicatedToAllIndex=").append(replicatedToAllIndex).append(", replicatedToAllTerm=")
+ .append(replicatedToAllTerm).append(", unAppliedEntries size=").append(unAppliedEntries.size()).append("]");
+ return builder.toString();
+ }
+
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import java.io.Serializable;
+
+/**
+ * Internal message that is stored in the akka's persistent journal to delete journal entries.
+ *
+ * @author Thomas Pantelis
+ */
+public class DeleteEntries implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final long fromIndex;
+
+ public DeleteEntries(long fromIndex) {
+ this.fromIndex = fromIndex;
+ }
+
+ public long getFromIndex() {
+ return fromIndex;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("DeleteEntries [fromIndex=").append(fromIndex).append("]");
+ return builder.toString();
+ }
+}
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
*
* @return Collection of follower IDs
*/
- protected final Collection<String> getFollowerIds() {
+ public final Collection<String> getFollowerIds() {
return followerToLog.keySet();
}
applyLogToStateMachine(context.getCommitIndex());
}
- if (!context.isSnapshotCaptureInitiated()) {
+ if (!context.getSnapshotManager().isCapturing()) {
purgeInMemoryLog();
}
followerToSnapshot.markSendStatus(false);
}
- if (wasLastChunk && !context.isSnapshotCaptureInitiated()) {
+ if (wasLastChunk && !context.getSnapshotManager().isCapturing()) {
// Since the follower is now caught up try to purge the log.
purgeInMemoryLog();
} else if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
if (followerActor != null) {
long followerNextIndex = followerLogInformation.getNextIndex();
boolean isFollowerActive = followerLogInformation.isFollowerActive();
+ boolean sendAppendEntries = false;
+ List<ReplicatedLogEntry> entries = Collections.emptyList();
if (mapFollowerToSnapshot.get(followerId) != null) {
// if install snapshot is in process , then sent next chunk if possible
sendSnapshotChunk(followerActor, followerId);
} else if(sendHeartbeat) {
// we send a heartbeat even if we have not received a reply for the last chunk
- sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
- Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ sendAppendEntries = true;
}
} else {
long leaderLastIndex = context.getReplicatedLog().lastIndex();
followerNextIndex, followerId);
// FIXME : Sending one entry at a time
- final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
-
- sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
-
+ if(followerLogInformation.okToReplicate()) {
+ entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
+ sendAppendEntries = true;
+ }
} else if (isFollowerActive && followerNextIndex >= 0 &&
- leaderLastIndex > followerNextIndex && !context.isSnapshotCaptureInitiated()) {
+ leaderLastIndex > followerNextIndex && !context.getSnapshotManager().isCapturing()) {
// if the followers next index is not present in the leaders log, and
// if the follower is just not starting and if leader's index is more than followers index
// then snapshot should be sent
}
// Send heartbeat to follower whenever install snapshot is initiated.
- sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
- Collections.<ReplicatedLogEntry>emptyList(), followerId);
-
+ sendAppendEntries = true;
initiateCaptureSnapshot(followerId, followerNextIndex);
} else if(sendHeartbeat) {
- //we send an AppendEntries, even if the follower is inactive
+ // we send an AppendEntries, even if the follower is inactive
// in-order to update the followers timestamp, in case it becomes active again
- sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
- Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ sendAppendEntries = true;
}
}
+
+ if(sendAppendEntries) {
+ sendAppendEntriesToFollower(followerActor, followerNextIndex,
+ entries, followerId);
+ }
}
}
final ActorSelection followerActor = context.getPeerActorSelection(followerId);
sendSnapshotChunk(followerActor, followerId);
- } else if (!context.isSnapshotCaptureInitiated()) {
- ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
- long lastAppliedIndex = -1;
- long lastAppliedTerm = -1;
-
- if (lastAppliedEntry != null) {
- lastAppliedIndex = lastAppliedEntry.getIndex();
- lastAppliedTerm = lastAppliedEntry.getTerm();
- } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
- lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
- lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
- }
-
- boolean isInstallSnapshotInitiated = true;
- long replicatedToAllIndex = super.getReplicatedToAllIndex();
- ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
-
- CaptureSnapshot captureSnapshot = new CaptureSnapshot(
- lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
- (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
- (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1),
- isInstallSnapshotInitiated);
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Initiating install snapshot to follower {}: {}", logName(), followerId,
- captureSnapshot);
- }
-
- actor().tell(captureSnapshot, actor());
- context.setSnapshotCaptureInitiated(true);
+ } else {
+ context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(),
+ this.getReplicatedToAllIndex(), followerId);
}
}
}
*/
public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
+ protected static final ElectionTimeout ELECTION_TIMEOUT = new ElectionTimeout();
+
/**
* Information about the RaftActor whose behavior this class represents
*/
// message is sent to itself
electionCancel =
context.getActorSystem().scheduler().scheduleOnce(interval,
- context.getActor(), new ElectionTimeout(),
+ context.getActor(), ELECTION_TIMEOUT,
context.getActorSystem().dispatcher(), context.getActor());
}
* @param snapshotCapturedIndex
*/
protected void performSnapshotWithoutCapture(final long snapshotCapturedIndex) {
- // we would want to keep the lastApplied as its used while capturing snapshots
- long lastApplied = context.getLastApplied();
- long tempMin = Math.min(snapshotCapturedIndex, (lastApplied > -1 ? lastApplied - 1 : -1));
-
- if(LOG.isTraceEnabled()) {
- LOG.trace("{}: performSnapshotWithoutCapture: snapshotCapturedIndex: {}, lastApplied: {}, tempMin: {}",
- logName, snapshotCapturedIndex, lastApplied, tempMin);
- }
+ long actualIndex = context.getSnapshotManager().trimLog(snapshotCapturedIndex, this);
- if (tempMin > -1 && context.getReplicatedLog().isPresent(tempMin)) {
- LOG.debug("{}: fakeSnapshot purging log to {} for term {}", logName(), tempMin,
- context.getTermInformation().getCurrentTerm());
-
- //use the term of the temp-min, since we check for isPresent, entry will not be null
- ReplicatedLogEntry entry = context.getReplicatedLog().get(tempMin);
- context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
- context.getReplicatedLog().snapshotCommit();
- setReplicatedToAllIndex(tempMin);
- } else if(tempMin > getReplicatedToAllIndex()) {
- // It's possible a follower was lagging and an install snapshot advanced its match index past
- // the current replicatedToAllIndex. Since the follower is now caught up we should advance the
- // replicatedToAllIndex (to tempMin). The fact that tempMin wasn't found in the log is likely
- // due to a previous snapshot triggered by the memory threshold exceeded, in that case we
- // trim the log to the last applied index even if previous entries weren't replicated to all followers.
- setReplicatedToAllIndex(tempMin);
+ if(actualIndex != -1){
+ setReplicatedToAllIndex(actualIndex);
}
}
votesRequired = getMajorityVoteCount(peers.size());
startNewTerm();
- scheduleElection(electionDuration());
+
+ if(context.getPeerAddresses().isEmpty()){
+ actor().tell(ELECTION_TIMEOUT, actor());
+ } else {
+ scheduleElection(electionDuration());
+ }
+
+
}
@Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.RaftState;
+
+/**
+ * A RaftActorBehavior implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingRaftActorBehavior implements RaftActorBehavior {
+ private RaftActorBehavior delegate;
+
+ public RaftActorBehavior getDelegate() {
+ return delegate;
+ }
+
+ public void setDelegate(RaftActorBehavior delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void close() throws Exception {
+ delegate.close();
+ }
+
+ @Override
+ public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+ return delegate.handleMessage(sender, message);
+ }
+
+ @Override
+ public RaftState state() {
+ return delegate.state();
+ }
+
+ @Override
+ public String getLeaderId() {
+ return delegate.getLeaderId();
+ }
+
+ @Override
+ public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+ delegate.setReplicatedToAllIndex(replicatedToAllIndex);
+ }
+
+ @Override
+ public long getReplicatedToAllIndex() {
+ return delegate.getReplicatedToAllIndex();
+ }
+}
public Follower(RaftActorContext context) {
super(context, RaftState.Follower);
- scheduleElection(electionDuration());
-
initialSyncStatusTracker = new InitialSyncStatusTracker(context.getActor());
+
+ if(context.getPeerAddresses().isEmpty()){
+ actor().tell(ELECTION_TIMEOUT, actor());
+ } else {
+ scheduleElection(electionDuration());
+ }
+
}
private boolean isLogEntryPresent(long index){
// to make it easier to read. Before refactoring ensure tests
// cover the code properly
+ if (snapshotTracker != null) {
+ // if snapshot install is in progress, follower should just acknowledge append entries with a reply.
+ AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
+ lastIndex(), lastTerm());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{}: snapshot install is in progress, replying immediately with {}", logName(), reply);
+ }
+ sender.tell(reply, actor());
+
+ return this;
+ }
+
// 1. Reply false if term < currentTerm (§5.1)
// This is handled in the appendEntries method of the base class
sender.tell(reply, actor());
- if (!context.isSnapshotCaptureInitiated()) {
+ if (!context.getSnapshotManager().isCapturing()) {
super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+import java.beans.ConstructorProperties;
+
+/**
+ * A bean class containing a snapshot of information for a follower returned from GetOnDemandRaftStats.
+ *
+ * @author Thomas Pantelis
+ */
+public class FollowerInfo {
+ private final String id;
+ private final long nextIndex;
+ private final long matchIndex;
+ private final boolean isActive;
+ private final String timeSinceLastActivity;
+
+ @ConstructorProperties({"id","nextIndex", "matchIndex", "isActive", "timeSinceLastActivity"})
+ public FollowerInfo(String id, long nextIndex, long matchIndex, boolean isActive, String timeSinceLastActivity) {
+ this.id = id;
+ this.nextIndex = nextIndex;
+ this.matchIndex = matchIndex;
+ this.isActive = isActive;
+ this.timeSinceLastActivity = timeSinceLastActivity;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public long getNextIndex() {
+ return nextIndex;
+ }
+
+ public long getMatchIndex() {
+ return matchIndex;
+ }
+
+ public boolean isActive() {
+ return isActive;
+ }
+
+ public String getTimeSinceLastActivity() {
+ return timeSinceLastActivity;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+/**
+ * Local message sent to a RaftActor to obtain a snapshot of statistical information. Returns an
+ * OnDemandRaftState instance.
+ *
+ * @author Thomas Pantelis
+ */
+public class GetOnDemandRaftState {
+ public static final GetOnDemandRaftState INSTANCE = new GetOnDemandRaftState();
+
+ private GetOnDemandRaftState() {
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.client.messages;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The response to a GetOnDemandRaftState message,
+ *
+ * @author Thomas Pantelis
+ */
+public class OnDemandRaftState {
+ private long lastLogIndex = -1L;
+ private long lastLogTerm = -1L;
+ private long currentTerm = -1L;
+ private long commitIndex = -1L;
+ private long lastApplied = -1L;
+ private long lastIndex = -1L;
+ private long lastTerm = -1L;
+ private long snapshotIndex = -1L;
+ private long snapshotTerm = -1L;
+ private long replicatedToAllIndex = -1L;
+ private long inMemoryJournalDataSize;
+ private long inMemoryJournalLogSize;
+ private String leader;
+ private String raftState;
+ private String votedFor;
+ private boolean isSnapshotCaptureInitiated;
+
+ private List<FollowerInfo> followerInfoList = Collections.emptyList();
+ private Map<String, String> peerAddresses = Collections.emptyMap();
+
+ private OnDemandRaftState() {
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public long getLastLogIndex() {
+ return lastLogIndex;
+ }
+
+ public long getLastLogTerm() {
+ return lastLogTerm;
+ }
+
+ public long getCurrentTerm() {
+ return currentTerm;
+ }
+
+ public long getCommitIndex() {
+ return commitIndex;
+ }
+
+ public long getLastApplied() {
+ return lastApplied;
+ }
+
+ public long getLastIndex() {
+ return lastIndex;
+ }
+
+ public long getLastTerm() {
+ return lastTerm;
+ }
+
+ public long getSnapshotIndex() {
+ return snapshotIndex;
+ }
+
+ public long getSnapshotTerm() {
+ return snapshotTerm;
+ }
+
+ public long getReplicatedToAllIndex() {
+ return replicatedToAllIndex;
+ }
+
+ public long getInMemoryJournalDataSize() {
+ return inMemoryJournalDataSize;
+ }
+
+ public long getInMemoryJournalLogSize() {
+ return inMemoryJournalLogSize;
+ }
+
+ public String getLeader() {
+ return leader;
+ }
+
+ public String getRaftState() {
+ return raftState;
+ }
+
+ public String getVotedFor() {
+ return votedFor;
+ }
+
+ public boolean isSnapshotCaptureInitiated() {
+ return isSnapshotCaptureInitiated;
+ }
+
+ public List<FollowerInfo> getFollowerInfoList() {
+ return followerInfoList;
+ }
+
+ public Map<String, String> getPeerAddresses() {
+ return peerAddresses;
+ }
+
+ public static class Builder {
+ private final OnDemandRaftState stats = new OnDemandRaftState();
+
+ public Builder lastLogIndex(long value) {
+ stats.lastLogIndex = value;
+ return this;
+ }
+
+ public Builder lastLogTerm(long value) {
+ stats.lastLogTerm = value;
+ return this;
+ }
+
+ public Builder currentTerm(long value) {
+ stats.currentTerm = value;
+ return this;
+ }
+
+ public Builder commitIndex(long value) {
+ stats.commitIndex = value;
+ return this;
+ }
+
+ public Builder lastApplied(long value) {
+ stats.lastApplied = value;
+ return this;
+ }
+
+ public Builder lastIndex(long value) {
+ stats.lastIndex = value;
+ return this;
+ }
+
+ public Builder lastTerm(long value) {
+ stats.lastTerm = value;
+ return this;
+ }
+
+ public Builder snapshotIndex(long value) {
+ stats.snapshotIndex = value;
+ return this;
+ }
+
+ public Builder snapshotTerm(long value) {
+ stats.snapshotTerm = value;
+ return this;
+ }
+
+ public Builder replicatedToAllIndex(long value) {
+ stats.replicatedToAllIndex = value;
+ return this;
+ }
+
+ public Builder inMemoryJournalDataSize(long value) {
+ stats.inMemoryJournalDataSize = value;
+ return this;
+ }
+
+ public Builder inMemoryJournalLogSize(long value) {
+ stats.inMemoryJournalLogSize = value;
+ return this;
+ }
+
+ public Builder leader(String value) {
+ stats.leader = value;
+ return this;
+ }
+
+ public Builder raftState(String value) {
+ stats.raftState = value;
+ return this;
+ }
+
+ public Builder votedFor(String value) {
+ stats.votedFor = value;
+ return this;
+ }
+
+ public Builder followerInfoList(List<FollowerInfo> followerInfoList) {
+ stats.followerInfoList = followerInfoList;
+ return this;
+ }
+
+ public Builder peerAddresses(Map<String, String> peerAddresses) {
+ stats.peerAddresses = peerAddresses;
+ return this;
+ }
+
+ public Builder isSnapshotCaptureInitiated(boolean value) {
+ stats.isSnapshotCaptureInitiated = value;
+ return this;
+ }
+
+ public OnDemandRaftState build() {
+ return stats;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import akka.actor.ActorRef;
+import akka.actor.InvalidActorNameException;
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.actor.Terminated;
+import akka.dispatch.Dispatchers;
+import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.base.Supplier;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import org.junit.After;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * Abstract base for an integration test that tests end-to-end RaftActor and behavior functionality.
+ *
+ * @author Thomas Pantelis
+ */
+public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest {
+
+ public static class TestRaftActor extends MockRaftActor {
+
+ private final TestActorRef<MessageCollectorActor> collectorActor;
+ private final Map<Class<?>, Boolean> dropMessages = new ConcurrentHashMap<>();
+
+ private TestRaftActor(String id, Map<String, String> peerAddresses, ConfigParams config,
+ TestActorRef<MessageCollectorActor> collectorActor) {
+ super(id, peerAddresses, Optional.of(config), null);
+ this.collectorActor = collectorActor;
+ }
+
+ public static Props props(String id, Map<String, String> peerAddresses, ConfigParams config,
+ TestActorRef<MessageCollectorActor> collectorActor) {
+ return Props.create(TestRaftActor.class, id, peerAddresses, config, collectorActor).
+ withDispatcher(Dispatchers.DefaultDispatcherId());
+ }
+
+ void startDropMessages(Class<?> msgClass) {
+ dropMessages.put(msgClass, Boolean.TRUE);
+ }
+
+ void stopDropMessages(Class<?> msgClass) {
+ dropMessages.remove(msgClass);
+ }
+
+ void setMockTotalMemory(final long mockTotalMemory) {
+ if(mockTotalMemory > 0) {
+ getRaftActorContext().setTotalMemoryRetriever(new Supplier<Long>() {
+ @Override
+ public Long get() {
+ return mockTotalMemory;
+ }
+
+ });
+ } else {
+ getRaftActorContext().setTotalMemoryRetriever(null);
+ }
+ }
+
+ @Override
+ public void handleCommand(Object message) {
+ if(message instanceof MockPayload) {
+ MockPayload payload = (MockPayload)message;
+ super.persistData(collectorActor, payload.toString(), payload);
+ return;
+ }
+
+ try {
+ if(!dropMessages.containsKey(message.getClass())) {
+ super.handleCommand(message);
+ }
+ } finally {
+ if(!(message instanceof SendHeartBeat)) {
+ try {
+ collectorActor.tell(message, ActorRef.noSender());
+ } catch (Exception e) {
+ LOG.error("MessageCollectorActor error", e);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
+ try {
+ actorRef.tell(new CaptureSnapshotReply(RaftActorTest.fromObject(getState()).toByteArray()), actorRef);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ public ActorRef collectorActor() {
+ return collectorActor;
+ }
+ }
+
+ protected final Logger testLog = LoggerFactory.getLogger(getClass());
+
+ protected final TestActorFactory factory = new TestActorFactory(getSystem());
+
+ protected String leaderId = factory.generateActorId("leader");
+ protected DefaultConfigParamsImpl leaderConfigParams;
+ protected TestActorRef<TestRaftActor> leaderActor;
+ protected ActorRef leaderCollectorActor;
+ protected RaftActorContext leaderContext;
+ protected RaftActorBehavior leader;
+
+ protected String follower1Id = factory.generateActorId("follower");
+ protected TestActorRef<TestRaftActor> follower1Actor;
+ protected ActorRef follower1CollectorActor;
+ protected RaftActorBehavior follower1;
+ protected RaftActorContext follower1Context;
+
+ protected String follower2Id = factory.generateActorId("follower");
+ protected TestActorRef<TestRaftActor> follower2Actor;
+ protected ActorRef follower2CollectorActor;
+ protected RaftActorBehavior follower2;
+ protected RaftActorContext follower2Context;
+
+ protected ImmutableMap<String, String> peerAddresses;
+
+ protected long initialTerm = 5;
+ protected long currentTerm;
+
+ protected List<Object> expSnapshotState = new ArrayList<>();
+
+ @After
+ public void tearDown() {
+ InMemoryJournal.clear();
+ InMemorySnapshotStore.clear();
+ factory.close();
+ }
+
+ protected DefaultConfigParamsImpl newLeaderConfigParams() {
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(100, TimeUnit.MILLISECONDS));
+ configParams.setElectionTimeoutFactor(1);
+ configParams.setSnapshotBatchCount(4);
+ configParams.setSnapshotDataThresholdPercentage(70);
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ return configParams;
+ }
+
+ protected DefaultConfigParamsImpl newFollowerConfigParams() {
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(500, TimeUnit.MILLISECONDS));
+ configParams.setElectionTimeoutFactor(1000);
+ return configParams;
+ }
+
+ protected void waitUntilLeader(ActorRef actorRef) {
+ RaftActorTestKit.waitUntilLeader(actorRef);
+ }
+
+ protected TestActorRef<TestRaftActor> newTestRaftActor(String id, Map<String, String> peerAddresses,
+ ConfigParams configParams) {
+ TestActorRef<MessageCollectorActor> collectorActor = factory.createTestActor(
+ MessageCollectorActor.props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ factory.generateActorId(id + "-collector"));
+
+ InvalidActorNameException lastEx = null;
+ for(int i = 0; i < 10; i++) {
+ try {
+ return factory.createTestActor(TestRaftActor.props(id,
+ peerAddresses != null ? peerAddresses : Collections.<String, String>emptyMap(),
+ configParams, collectorActor), id);
+ } catch (InvalidActorNameException e) {
+ lastEx = e;
+ Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ }
+ }
+
+ throw lastEx;
+ }
+
+ protected void killActor(TestActorRef<TestRaftActor> leaderActor) {
+ JavaTestKit testkit = new JavaTestKit(getSystem());
+ testkit.watch(leaderActor);
+
+ leaderActor.tell(PoisonPill.getInstance(), null);
+ testkit.expectMsgClass(JavaTestKit.duration("5 seconds"), Terminated.class);
+
+ testkit.unwatch(leaderActor);
+ }
+
+ protected void verifyApplyJournalEntries(ActorRef actor, final long expIndex) {
+ MessageCollectorActor.expectFirstMatching(actor, ApplyJournalEntries.class, new Predicate<ApplyJournalEntries>() {
+ @Override
+ public boolean apply(ApplyJournalEntries msg) {
+ return msg.getToIndex() == expIndex;
+ }
+ });
+ }
+
+ @SuppressWarnings("unchecked")
+ protected void verifySnapshot(String prefix, Snapshot snapshot, long lastAppliedTerm,
+ int lastAppliedIndex, long lastTerm, long lastIndex)
+ throws Exception {
+ assertEquals(prefix + " Snapshot getLastAppliedTerm", lastAppliedTerm, snapshot.getLastAppliedTerm());
+ assertEquals(prefix + " Snapshot getLastAppliedIndex", lastAppliedIndex, snapshot.getLastAppliedIndex());
+ assertEquals(prefix + " Snapshot getLastTerm", lastTerm, snapshot.getLastTerm());
+ assertEquals(prefix + " Snapshot getLastIndex", lastIndex, snapshot.getLastIndex());
+
+ List<Object> actualState = (List<Object>)MockRaftActor.toObject(snapshot.getState());
+ assertEquals(prefix + " Snapshot getState size", expSnapshotState.size(), actualState.size());
+ for(int i = 0; i < expSnapshotState.size(); i++) {
+ assertEquals(prefix + " Snapshot state " + i, expSnapshotState.get(i), actualState.get(i));
+ }
+ }
+
+ protected void verifyPersistedJournal(String persistenceId, List<? extends ReplicatedLogEntry> expJournal) {
+ List<ReplicatedLogEntry> journal = InMemoryJournal.get(persistenceId, ReplicatedLogEntry.class);
+ assertEquals("Journal ReplicatedLogEntry count", expJournal.size(), journal.size());
+ for(int i = 0; i < expJournal.size(); i++) {
+ ReplicatedLogEntry expected = expJournal.get(i);
+ ReplicatedLogEntry actual = journal.get(i);
+ verifyReplicatedLogEntry(expected, actual.getTerm(), actual.getIndex(), actual.getData());
+ }
+ }
+
+ protected MockPayload sendPayloadData(ActorRef leaderActor, String data) {
+ return sendPayloadData(leaderActor, data, 0);
+ }
+
+ protected MockPayload sendPayloadData(ActorRef leaderActor, String data, int size) {
+ MockPayload payload;
+ if(size > 0) {
+ payload = new MockPayload(data, size);
+ } else {
+ payload = new MockPayload(data);
+ }
+
+ leaderActor.tell(payload, ActorRef.noSender());
+ return payload;
+ }
+
+ protected void verifyApplyState(ApplyState applyState, ActorRef expClientActor,
+ String expId, long expTerm, long expIndex, MockPayload payload) {
+ assertEquals("ApplyState getClientActor", expClientActor, applyState.getClientActor());
+ assertEquals("ApplyState getIdentifier", expId, applyState.getIdentifier());
+ ReplicatedLogEntry replicatedLogEntry = applyState.getReplicatedLogEntry();
+ verifyReplicatedLogEntry(replicatedLogEntry, expTerm, expIndex, payload);
+ }
+
+ protected void verifyReplicatedLogEntry(ReplicatedLogEntry replicatedLogEntry, long expTerm, long expIndex,
+ Payload payload) {
+ assertEquals("ReplicatedLogEntry getTerm", expTerm, replicatedLogEntry.getTerm());
+ assertEquals("ReplicatedLogEntry getIndex", expIndex, replicatedLogEntry.getIndex());
+ assertEquals("ReplicatedLogEntry getData", payload, replicatedLogEntry.getData());
+ }
+
+ protected String testActorPath(String id){
+ return "akka://test/user" + id;
+ }
+}
/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import akka.japi.Procedure;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
}
- @After
- public void tearDown() {
- replicatedLogImpl.journal.clear();
- replicatedLogImpl.setSnapshotIndex(-1);
- replicatedLogImpl.setSnapshotTerm(-1);
- replicatedLogImpl = null;
+ @Test
+ public void testEmptyLog() {
+ replicatedLogImpl = new MockAbstractReplicatedLogImpl();
+
+ assertEquals("size", 0, replicatedLogImpl.size());
+ assertEquals("dataSize", 0, replicatedLogImpl.dataSize());
+ assertEquals("getSnapshotIndex", -1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", -1, replicatedLogImpl.getSnapshotTerm());
+ assertEquals("lastIndex", -1, replicatedLogImpl.lastIndex());
+ assertEquals("lastTerm", -1, replicatedLogImpl.lastTerm());
+ assertEquals("isPresent", false, replicatedLogImpl.isPresent(0));
+ assertEquals("isInSnapshot", false, replicatedLogImpl.isInSnapshot(0));
+ Assert.assertNull("get(0)", replicatedLogImpl.get(0));
+ Assert.assertNull("last", replicatedLogImpl.last());
+
+ List<ReplicatedLogEntry> list = replicatedLogImpl.getFrom(0, 1);
+ assertEquals("getFrom size", 0, list.size());
+
+ assertEquals("removeFrom", -1, replicatedLogImpl.removeFrom(1));
+
+ replicatedLogImpl.setSnapshotIndex(2);
+ replicatedLogImpl.setSnapshotTerm(1);
+
+ assertEquals("getSnapshotIndex", 2, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", 1, replicatedLogImpl.getSnapshotTerm());
+ assertEquals("lastIndex", 2, replicatedLogImpl.lastIndex());
+ assertEquals("lastTerm", 1, replicatedLogImpl.lastTerm());
}
@Test
// now create a snapshot of 3 entries, with 1 unapplied entry left in the log
// It removes the entries which have made it to snapshot
// and updates the snapshot index and term
- Map<Long, String> state = takeSnapshot(3);
+ takeSnapshot(3);
// check the values after the snapshot.
// each index value passed in the test is the logical index (log entry index)
assertEquals(2, replicatedLogImpl.getFrom(6).size());
// take a second snapshot with 5 entries with 0 unapplied entries left in the log
- state = takeSnapshot(5);
+ takeSnapshot(5);
assertEquals(0, replicatedLogImpl.size());
assertNull(replicatedLogImpl.last());
replicatedLogImpl.snapshotPreCommit(-1, -1);
assertEquals(8, replicatedLogImpl.size());
assertEquals(-1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(-1, replicatedLogImpl.getSnapshotTerm());
- replicatedLogImpl.snapshotPreCommit(4, 3);
+ replicatedLogImpl.snapshotPreCommit(4, 2);
assertEquals(3, replicatedLogImpl.size());
assertEquals(4, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(2, replicatedLogImpl.getSnapshotTerm());
replicatedLogImpl.snapshotPreCommit(6, 3);
assertEquals(1, replicatedLogImpl.size());
assertEquals(6, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(3, replicatedLogImpl.getSnapshotTerm());
replicatedLogImpl.snapshotPreCommit(7, 3);
assertEquals(0, replicatedLogImpl.size());
assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(3, replicatedLogImpl.getSnapshotTerm());
//running it again on an empty list should not throw exception
replicatedLogImpl.snapshotPreCommit(7, 3);
assertEquals(0, replicatedLogImpl.size());
assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+ assertEquals(3, replicatedLogImpl.getSnapshotTerm());
+ }
+ @Test
+ public void testSnapshotCommit() {
+
+ replicatedLogImpl.snapshotPreCommit(1, 1);
+
+ replicatedLogImpl.snapshotCommit();
+
+ assertEquals("size", 2, replicatedLogImpl.size());
+ assertEquals("dataSize", 2, replicatedLogImpl.dataSize());
+ assertEquals("getSnapshotIndex", 1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", 1, replicatedLogImpl.getSnapshotTerm());
+ assertEquals("lastIndex", 3, replicatedLogImpl.lastIndex());
+ assertEquals("lastTerm", 2, replicatedLogImpl.lastTerm());
+
+ Assert.assertNull("get(0)", replicatedLogImpl.get(0));
+ Assert.assertNull("get(1)", replicatedLogImpl.get(1));
+ Assert.assertNotNull("get(2)", replicatedLogImpl.get(2));
+ Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
+ }
+
+ @Test
+ public void testSnapshotRollback() {
+
+ replicatedLogImpl.snapshotPreCommit(1, 1);
+
+ assertEquals("size", 2, replicatedLogImpl.size());
+ assertEquals("getSnapshotIndex", 1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", 1, replicatedLogImpl.getSnapshotTerm());
+
+ replicatedLogImpl.snapshotRollback();
+
+ assertEquals("size", 4, replicatedLogImpl.size());
+ assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
+ assertEquals("getSnapshotIndex", -1, replicatedLogImpl.getSnapshotIndex());
+ assertEquals("getSnapshotTerm", -1, replicatedLogImpl.getSnapshotTerm());
+ Assert.assertNotNull("get(0)", replicatedLogImpl.get(0));
+ Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
}
@Test
assertTrue(replicatedLogImpl.isPresent(5));
}
+ @Test
+ public void testRemoveFrom() {
+
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("E", 2)));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("F", 3)));
+
+ assertEquals("dataSize", 9, replicatedLogImpl.dataSize());
+
+ long adjusted = replicatedLogImpl.removeFrom(4);
+ assertEquals("removeFrom - adjusted", 4, adjusted);
+ assertEquals("size", 4, replicatedLogImpl.size());
+ assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
+
+ takeSnapshot(1);
+
+ adjusted = replicatedLogImpl.removeFrom(2);
+ assertEquals("removeFrom - adjusted", 1, adjusted);
+ assertEquals("size", 1, replicatedLogImpl.size());
+ assertEquals("dataSize", 1, replicatedLogImpl.dataSize());
+
+ assertEquals("removeFrom - adjusted", -1, replicatedLogImpl.removeFrom(0));
+ assertEquals("removeFrom - adjusted", -1, replicatedLogImpl.removeFrom(100));
+ }
+
// create a snapshot for test
public Map<Long, String> takeSnapshot(final int numEntries) {
Map<Long, String> map = new HashMap<>(numEntries);
- List<ReplicatedLogEntry> entries = replicatedLogImpl.getEntriesTill(numEntries);
- for (ReplicatedLogEntry entry : entries) {
+
+ long lastIndex = 0;
+ long lastTerm = 0;
+ for(int i = 0; i < numEntries; i++) {
+ ReplicatedLogEntry entry = replicatedLogImpl.getAtPhysicalIndex(i);
map.put(entry.getIndex(), entry.getData().toString());
+ lastIndex = entry.getIndex();
+ lastTerm = entry.getTerm();
}
- int term = (int) replicatedLogImpl.lastTerm();
- int lastIndex = (int) entries.get(entries.size() - 1).getIndex();
- entries.clear();
- replicatedLogImpl.setSnapshotTerm(term);
- replicatedLogImpl.setSnapshotIndex(lastIndex);
+ replicatedLogImpl.snapshotPreCommit(lastIndex, lastTerm);
+ replicatedLogImpl.snapshotCommit();
return map;
}
@Override
- public int dataSize() {
- return -1;
- }
-
- public List<ReplicatedLogEntry> getEntriesTill(final int index) {
- return journal.subList(0, index);
+ public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.verify;
+import akka.japi.Procedure;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for ElectionTermImpl.
+ *
+ * @author Thomas Pantelis
+ */
+public class ElectionTermImplTest {
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testUpdateAndPersist() throws Exception {
+ ElectionTermImpl impl = new ElectionTermImpl(mockPersistence, "test", LOG);
+
+ impl.updateAndPersist(10, "member-1");
+
+ assertEquals("getCurrentTerm", 10, impl.getCurrentTerm());
+ assertEquals("getVotedFor", "member-1", impl.getVotedFor());
+
+ ArgumentCaptor<Object> message = ArgumentCaptor.forClass(Object.class);
+ ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
+ verify(mockPersistence).persist(message.capture(), procedure.capture());
+
+ assertEquals("Message type", UpdateElectionTerm.class, message.getValue().getClass());
+ UpdateElectionTerm update = (UpdateElectionTerm)message.getValue();
+ assertEquals("getCurrentTerm", 10, update.getCurrentTerm());
+ assertEquals("getVotedFor", "member-1", update.getVotedFor());
+
+ procedure.getValue().apply(null);
+ }
+}
stopwatch.stop();
return stopwatch.elapsed(TimeUnit.MILLISECONDS);
}
+
+ @Test
+ public void testOkToReplicate(){
+ MockRaftActorContext context = new MockRaftActorContext();
+ context.setCommitIndex(9);
+ FollowerLogInformation followerLogInformation =
+ new FollowerLogInformationImpl(
+ "follower1", 10, context);
+
+ assertTrue(followerLogInformation.okToReplicate());
+ assertFalse(followerLogInformation.okToReplicate());
+
+ // wait for 150 milliseconds and it should work again
+ Uninterruptibles.sleepUninterruptibly(150, TimeUnit.MILLISECONDS);
+ assertTrue(followerLogInformation.okToReplicate());
+
+ //increment next index and try immediately and it should work again
+ followerLogInformation.incrNextIndex();
+ assertTrue(followerLogInformation.okToReplicate());
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+
+public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
+
+ final RaftActor actorDelegate;
+ final RaftActorRecoveryCohort recoveryCohortDelegate;
+ final RaftActorSnapshotCohort snapshotCohortDelegate;
+ private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+ private final List<Object> state;
+ private ActorRef roleChangeNotifier;
+ private final CountDownLatch initializeBehaviorComplete = new CountDownLatch(1);
+ private RaftActorRecoverySupport raftActorRecoverySupport;
+ private RaftActorSnapshotMessageSupport snapshotMessageSupport;
+
+ public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
+ private static final long serialVersionUID = 1L;
+ private final Map<String, String> peerAddresses;
+ private final String id;
+ private final Optional<ConfigParams> config;
+ private final DataPersistenceProvider dataPersistenceProvider;
+ private final ActorRef roleChangeNotifier;
+ private RaftActorSnapshotMessageSupport snapshotMessageSupport;
+
+ private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider,
+ ActorRef roleChangeNotifier) {
+ this.peerAddresses = peerAddresses;
+ this.id = id;
+ this.config = config;
+ this.dataPersistenceProvider = dataPersistenceProvider;
+ this.roleChangeNotifier = roleChangeNotifier;
+ }
+
+ @Override
+ public MockRaftActor create() throws Exception {
+ MockRaftActor mockRaftActor = new MockRaftActor(id, peerAddresses, config,
+ dataPersistenceProvider);
+ mockRaftActor.roleChangeNotifier = this.roleChangeNotifier;
+ mockRaftActor.snapshotMessageSupport = snapshotMessageSupport;
+ return mockRaftActor;
+ }
+ }
+
+ public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
+ DataPersistenceProvider dataPersistenceProvider) {
+ super(id, peerAddresses, config);
+ state = new ArrayList<>();
+ this.actorDelegate = mock(RaftActor.class);
+ this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
+ this.snapshotCohortDelegate = mock(RaftActorSnapshotCohort.class);
+ if(dataPersistenceProvider == null){
+ setPersistence(true);
+ } else {
+ setPersistence(dataPersistenceProvider);
+ }
+ }
+
+ public void setRaftActorRecoverySupport(RaftActorRecoverySupport support) {
+ raftActorRecoverySupport = support;
+ }
+
+ @Override
+ public RaftActorRecoverySupport newRaftActorRecoverySupport() {
+ return raftActorRecoverySupport != null ? raftActorRecoverySupport : super.newRaftActorRecoverySupport();
+ }
+
+ @Override
+ protected RaftActorSnapshotMessageSupport newRaftActorSnapshotMessageSupport() {
+ return snapshotMessageSupport != null ? snapshotMessageSupport : super.newRaftActorSnapshotMessageSupport();
+ }
+
+ public void waitForRecoveryComplete() {
+ try {
+ assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void waitForInitializeBehaviorComplete() {
+ try {
+ assertEquals("Behavior initialized", true, initializeBehaviorComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+
+ public void waitUntilLeader(){
+ for(int i = 0;i < 10; i++){
+ if(isLeader()){
+ break;
+ }
+ Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ }
+ }
+
+ public List<Object> getState() {
+ return state;
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, null));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, RaftActorSnapshotMessageSupport snapshotMessageSupport){
+ MockRaftActorCreator creator = new MockRaftActorCreator(peerAddresses, id, config, null, null);
+ creator.snapshotMessageSupport = snapshotMessageSupport;
+ return Props.create(creator);
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, null));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, ActorRef roleChangeNotifier){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, roleChangeNotifier));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, ActorRef roleChangeNotifier,
+ DataPersistenceProvider dataPersistenceProvider){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, roleChangeNotifier));
+ }
+
+ @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+ actorDelegate.applyState(clientActor, identifier, data);
+ LOG.info("{}: applyState called: {}", persistenceId(), data);
+
+ state.add(data);
+ }
+
+ @Override
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return this;
+ }
+
+ @Override
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return this;
+ }
+
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ public void appendRecoveredLogEntry(Payload data) {
+ state.add(data);
+ }
+
+ @Override
+ public void applyCurrentLogRecoveryBatch() {
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ actorDelegate.onRecoveryComplete();
+ recoveryComplete.countDown();
+ }
+
+ @Override
+ protected void initializeBehavior() {
+ super.initializeBehavior();
+ initializeBehaviorComplete.countDown();
+ }
+
+ @Override
+ public void applyRecoverySnapshot(byte[] bytes) {
+ recoveryCohortDelegate.applyRecoverySnapshot(bytes);
+ try {
+ Object data = toObject(bytes);
+ if (data instanceof List) {
+ state.addAll((List<?>) data);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
+ LOG.info("{}: createSnapshot called", persistenceId());
+ snapshotCohortDelegate.createSnapshot(actorRef);
+ }
+
+ @Override
+ public void applySnapshot(byte [] snapshot) {
+ LOG.info("{}: applySnapshot called", persistenceId());
+ snapshotCohortDelegate.applySnapshot(snapshot);
+ }
+
+ @Override
+ protected void onStateChanged() {
+ actorDelegate.onStateChanged();
+ }
+
+ @Override
+ protected Optional<ActorRef> getRoleChangeNotifier() {
+ return Optional.fromNullable(roleChangeNotifier);
+ }
+
+ @Override public String persistenceId() {
+ return this.getId();
+ }
+
+ public static Object toObject(byte[] bs) throws ClassNotFoundException, IOException {
+ Object obj = null;
+ ByteArrayInputStream bis = null;
+ ObjectInputStream ois = null;
+ try {
+ bis = new ByteArrayInputStream(bs);
+ ois = new ObjectInputStream(bis);
+ obj = ois.readObject();
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (ois != null) {
+ ois.close();
+ }
+ }
+ return obj;
+ }
+
+ public ReplicatedLog getReplicatedLog(){
+ return this.getRaftActorContext().getReplicatedLog();
+ }
+}
\ No newline at end of file
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
+import akka.japi.Procedure;
import com.google.common.base.Preconditions;
+import com.google.common.base.Supplier;
import com.google.protobuf.GeneratedMessage;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.test.MockPayloadMessages;
private Map<String, String> peerAddresses = new HashMap<>();
private ConfigParams configParams;
private boolean snapshotCaptureInitiated;
+ private SnapshotManager snapshotManager;
+ private DataPersistenceProvider persistenceProvider = new NonPersistentDataProvider();
public MockRaftActorContext(){
electionTerm = new ElectionTerm() {
}
@Override
- public void setSnapshotCaptureInitiated(boolean snapshotCaptureInitiated) {
- this.snapshotCaptureInitiated = snapshotCaptureInitiated;
+ public SnapshotManager getSnapshotManager() {
+ if(this.snapshotManager == null){
+ this.snapshotManager = new SnapshotManager(this, getLogger());
+ this.snapshotManager.setCreateSnapshotCallable(NoopProcedure.<Void>instance());
+ }
+ return this.snapshotManager;
+ }
+
+ public void setConfigParams(ConfigParams configParams) {
+ this.configParams = configParams;
}
@Override
- public boolean isSnapshotCaptureInitiated() {
- return snapshotCaptureInitiated;
+ public long getTotalMemory() {
+ return Runtime.getRuntime().totalMemory();
}
- public void setConfigParams(ConfigParams configParams) {
- this.configParams = configParams;
+ @Override
+ public void setTotalMemoryRetriever(Supplier<Long> retriever) {
+ }
+
+ @Override
+ public boolean hasFollowers() {
+ return getPeerAddresses().keySet().size() > 0;
+ }
+
+ @Override
+ public DataPersistenceProvider getPersistenceProvider() {
+ return persistenceProvider;
+ }
+
+ public void setPersistenceProvider(DataPersistenceProvider persistenceProvider) {
+ this.persistenceProvider = persistenceProvider;
}
public static class SimpleReplicatedLog extends AbstractReplicatedLogImpl {
@Override public void removeFromAndPersist(long index) {
removeFrom(index);
}
+
+ @Override
+ public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
+ append(replicatedLogEntry);
+
+ if(callback != null) {
+ try {
+ callback.apply(replicatedLogEntry);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
}
public static class MockPayload extends Payload implements Serializable {
private static final long serialVersionUID = 3121380393130864247L;
private String value = "";
+ private int size;
- public MockPayload(){
-
+ public MockPayload() {
}
public MockPayload(String s) {
this.value = s;
+ size = value.length();
+ }
+
+ public MockPayload(String s, int size) {
+ this(s);
+ this.size = size;
}
- @Override public Map<GeneratedMessage.GeneratedExtension, String> encode() {
- Map<GeneratedMessage.GeneratedExtension, String> map = new HashMap<GeneratedMessage.GeneratedExtension, String>();
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, String> encode() {
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, String> map = new HashMap<>();
map.put(MockPayloadMessages.value, value);
return map;
}
@Override
public int size() {
- return value.length();
+ return size;
}
@Override public String getClientPayloadClassName() {
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotOffer;
+import java.util.Arrays;
+import java.util.Collections;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for RaftActorRecoverySupport.
+ *
+ * @author Thomas Pantelis
+ */
+public class RaftActorRecoverySupportTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Mock
+ private RaftActorBehavior mockBehavior;
+
+ @Mock
+ private RaftActorRecoveryCohort mockCohort;
+
+ private RaftActorRecoverySupport support;
+
+ private RaftActorContext context;
+ private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+
+ context = new RaftActorContextImpl(null, null, "test", new ElectionTermImpl(mockPersistence, "test", LOG),
+ -1, -1, Collections.<String,String>emptyMap(), configParams, mockPersistence, LOG);
+
+ support = new RaftActorRecoverySupport(context, mockBehavior , mockCohort);
+
+ doReturn(true).when(mockPersistence).isRecoveryApplicable();
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockBehavior));
+ }
+
+ private void sendMessageToSupport(Object message) {
+ sendMessageToSupport(message, false);
+ }
+
+ private void sendMessageToSupport(Object message, boolean expComplete) {
+ boolean complete = support.handleRecoveryMessage(message);
+ assertEquals("complete", expComplete, complete);
+ }
+
+ @Test
+ public void testOnReplicatedLogEntry() {
+ MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1", 5));
+
+ sendMessageToSupport(logEntry);
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Journal data size", 5, context.getReplicatedLog().dataSize());
+ assertEquals("Last index", 1, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", -1, context.getLastApplied());
+ assertEquals("Commit index", -1, context.getCommitIndex());
+ assertEquals("Snapshot term", -1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", -1, context.getReplicatedLog().getSnapshotIndex());
+ }
+
+ @Test
+ public void testOnApplyJournalEntries() {
+ configParams.setJournalRecoveryLogBatchSize(5);
+
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 3, new MockRaftActorContext.MockPayload("3")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 4, new MockRaftActorContext.MockPayload("4")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 5, new MockRaftActorContext.MockPayload("5")));
+
+ sendMessageToSupport(new ApplyJournalEntries(2));
+
+ assertEquals("Last applied", 2, context.getLastApplied());
+ assertEquals("Commit index", 2, context.getCommitIndex());
+
+ sendMessageToSupport(new ApplyJournalEntries(4));
+
+ assertEquals("Last applied", 4, context.getLastApplied());
+ assertEquals("Last applied", 4, context.getLastApplied());
+
+ sendMessageToSupport(new ApplyJournalEntries(5));
+
+ assertEquals("Last index", 5, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", 5, context.getLastApplied());
+ assertEquals("Commit index", 5, context.getCommitIndex());
+ assertEquals("Snapshot term", -1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", -1, context.getReplicatedLog().getSnapshotIndex());
+
+ InOrder inOrder = Mockito.inOrder(mockCohort);
+ inOrder.verify(mockCohort).startLogRecoveryBatch(5);
+
+ for(int i = 0; i < replicatedLog.size() - 1; i++) {
+ inOrder.verify(mockCohort).appendRecoveredLogEntry(replicatedLog.get(i).getData());
+ }
+
+ inOrder.verify(mockCohort).applyCurrentLogRecoveryBatch();
+ inOrder.verify(mockCohort).startLogRecoveryBatch(5);
+ inOrder.verify(mockCohort).appendRecoveredLogEntry(replicatedLog.get(replicatedLog.size() - 1).getData());
+
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOnApplyLogEntries() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+
+ sendMessageToSupport(new ApplyLogEntries(0));
+
+ assertEquals("Last applied", 0, context.getLastApplied());
+ assertEquals("Commit index", 0, context.getCommitIndex());
+ }
+
+ @Test
+ public void testOnSnapshotOffer() {
+
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 3, new MockRaftActorContext.MockPayload("3")));
+
+ byte[] snapshotBytes = {1,2,3,4,5};
+
+ ReplicatedLogEntry unAppliedEntry1 = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 4, new MockRaftActorContext.MockPayload("4", 4));
+
+ ReplicatedLogEntry unAppliedEntry2 = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 5, new MockRaftActorContext.MockPayload("5", 5));
+
+ int lastAppliedDuringSnapshotCapture = 3;
+ int lastIndexDuringSnapshotCapture = 5;
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes, Arrays.asList(unAppliedEntry1, unAppliedEntry2),
+ lastIndexDuringSnapshotCapture, 1, lastAppliedDuringSnapshotCapture, 1);
+
+ SnapshotMetadata metadata = new SnapshotMetadata("test", 6, 12345);
+ SnapshotOffer snapshotOffer = new SnapshotOffer(metadata , snapshot);
+
+ sendMessageToSupport(snapshotOffer);
+
+ assertEquals("Journal log size", 2, context.getReplicatedLog().size());
+ assertEquals("Journal data size", 9, context.getReplicatedLog().dataSize());
+ assertEquals("Last index", lastIndexDuringSnapshotCapture, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", lastAppliedDuringSnapshotCapture, context.getLastApplied());
+ assertEquals("Commit index", lastAppliedDuringSnapshotCapture, context.getCommitIndex());
+ assertEquals("Snapshot term", 1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", lastAppliedDuringSnapshotCapture, context.getReplicatedLog().getSnapshotIndex());
+
+ verify(mockCohort).applyRecoverySnapshot(snapshotBytes);
+ }
+
+ @Test
+ public void testOnRecoveryCompletedWithRemainingBatch() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+
+ sendMessageToSupport(new ApplyJournalEntries(1));
+
+ sendMessageToSupport(RecoveryCompleted.getInstance(), true);
+
+ assertEquals("Last applied", 1, context.getLastApplied());
+ assertEquals("Commit index", 1, context.getCommitIndex());
+
+ InOrder inOrder = Mockito.inOrder(mockCohort);
+ inOrder.verify(mockCohort).startLogRecoveryBatch(anyInt());
+
+ for(int i = 0; i < replicatedLog.size(); i++) {
+ inOrder.verify(mockCohort).appendRecoveredLogEntry(replicatedLog.get(i).getData());
+ }
+
+ inOrder.verify(mockCohort).applyCurrentLogRecoveryBatch();
+
+ inOrder.verifyNoMoreInteractions();
+ }
+
+ @Test
+ public void testOnRecoveryCompletedWithNoRemainingBatch() {
+ sendMessageToSupport(RecoveryCompleted.getInstance(), true);
+
+ verifyNoMoreInteractions(mockCohort);
+ }
+
+ @Test
+ public void testOnDeprecatedDeleteEntries() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+
+ sendMessageToSupport(new org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries(1));
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Last index", 0, context.getReplicatedLog().lastIndex());
+ }
+
+ @Test
+ public void testOnDeleteEntries() {
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 0, new MockRaftActorContext.MockPayload("0")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1")));
+ replicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 2, new MockRaftActorContext.MockPayload("2")));
+
+ sendMessageToSupport(new DeleteEntries(1));
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Last index", 0, context.getReplicatedLog().lastIndex());
+ }
+
+ @Test
+ public void testUpdateElectionTerm() {
+
+ sendMessageToSupport(new UpdateElectionTerm(5, "member2"));
+
+ assertEquals("Current term", 5, context.getTermInformation().getCurrentTerm());
+ assertEquals("Voted For", "member2", context.getTermInformation().getVotedFor());
+ }
+
+ @Test
+ public void testRecoveryWithPersistenceDisabled() {
+ doReturn(false).when(mockPersistence).isRecoveryApplicable();
+
+ Snapshot snapshot = Snapshot.create(new byte[]{1}, Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1);
+ SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
+
+ sendMessageToSupport(snapshotOffer);
+
+ sendMessageToSupport(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 4, new MockRaftActorContext.MockPayload("4")));
+ sendMessageToSupport(new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 5, new MockRaftActorContext.MockPayload("5")));
+
+ sendMessageToSupport(new ApplyJournalEntries(4));
+
+ sendMessageToSupport(new DeleteEntries(5));
+
+ sendMessageToSupport(new org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries(5));
+
+ assertEquals("Journal log size", 0, context.getReplicatedLog().size());
+ assertEquals("Last index", -1, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", -1, context.getLastApplied());
+ assertEquals("Commit index", -1, context.getCommitIndex());
+ assertEquals("Snapshot term", -1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", -1, context.getReplicatedLog().getSnapshotIndex());
+
+ sendMessageToSupport(new UpdateElectionTerm(5, "member2"));
+
+ assertEquals("Current term", 0, context.getTermInformation().getCurrentTerm());
+ assertEquals("Voted For", null, context.getTermInformation().getVotedFor());
+
+ sendMessageToSupport(RecoveryCompleted.getInstance(), true);
+
+ verifyNoMoreInteractions(mockCohort);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.same;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+import akka.actor.ActorRef;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import akka.persistence.SnapshotMetadata;
+import java.util.Arrays;
+import java.util.Collections;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for RaftActorSnapshotMessageSupport.
+ *
+ * @author Thomas Pantelis
+ */
+public class RaftActorSnapshotMessageSupportTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Mock
+ private RaftActorBehavior mockBehavior;
+
+ @Mock
+ private RaftActorSnapshotCohort mockCohort;
+
+ @Mock
+ private SnapshotManager mockSnapshotManager;
+
+ @Mock
+ ActorRef mockRaftActorRef;
+
+ private RaftActorSnapshotMessageSupport support;
+
+ private RaftActorContext context;
+ private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+
+ context = new RaftActorContextImpl(mockRaftActorRef, null, "test",
+ new ElectionTermImpl(mockPersistence, "test", LOG),
+ -1, -1, Collections.<String,String>emptyMap(), configParams, mockPersistence, LOG) {
+ @Override
+ public SnapshotManager getSnapshotManager() {
+ return mockSnapshotManager;
+ }
+ };
+
+ support = new RaftActorSnapshotMessageSupport(context, mockBehavior, mockCohort);
+
+ doReturn(true).when(mockPersistence).isRecoveryApplicable();
+
+ context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, mockBehavior));
+ }
+
+ private void sendMessageToSupport(Object message) {
+ sendMessageToSupport(message, true);
+ }
+
+ private void sendMessageToSupport(Object message, boolean expHandled) {
+ boolean handled = support.handleSnapshotMessage(message);
+ assertEquals("complete", expHandled, handled);
+ }
+
+ @Test
+ public void testOnApplySnapshot() {
+
+ ReplicatedLog replicatedLog = context.getReplicatedLog();
+ replicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("1")));
+
+ byte[] snapshotBytes = {1,2,3,4,5};
+
+ ReplicatedLogEntry unAppliedEntry = new MockReplicatedLogEntry(1, 2, new MockPayload("2"));
+
+ long lastAppliedDuringSnapshotCapture = 1;
+ long lastIndexDuringSnapshotCapture = 2;
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes, Arrays.asList(unAppliedEntry),
+ lastIndexDuringSnapshotCapture, 1, lastAppliedDuringSnapshotCapture, 1);
+
+ sendMessageToSupport(new ApplySnapshot(snapshot));
+
+ assertEquals("Journal log size", 1, context.getReplicatedLog().size());
+ assertEquals("Last index", lastIndexDuringSnapshotCapture, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", lastAppliedDuringSnapshotCapture, context.getLastApplied());
+ assertEquals("Commit index", -1, context.getCommitIndex());
+ assertEquals("Snapshot term", 1, context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Snapshot index", lastAppliedDuringSnapshotCapture, context.getReplicatedLog().getSnapshotIndex());
+
+ verify(mockCohort).applySnapshot(snapshotBytes);
+ }
+
+ @Test
+ public void testOnCaptureSnapshotReply() {
+
+ byte[] snapshot = {1,2,3,4,5};
+ sendMessageToSupport(new CaptureSnapshotReply(snapshot));
+
+ verify(mockSnapshotManager).persist(same(snapshot), same(mockBehavior), anyLong());
+ }
+
+ @Test
+ public void testOnSaveSnapshotSuccess() {
+
+ long sequenceNumber = 100;
+ sendMessageToSupport(new SaveSnapshotSuccess(new SnapshotMetadata("foo", sequenceNumber, 1234L)));
+
+ verify(mockSnapshotManager).commit(sequenceNumber);
+ }
+
+ @Test
+ public void testOnSaveSnapshotFailure() {
+
+ sendMessageToSupport(new SaveSnapshotFailure(new SnapshotMetadata("foo", 100, 1234L),
+ new Throwable("mock")));
+
+ verify(mockSnapshotManager).rollback();
+ }
+
+ @Test
+ public void testOnCommitSnapshot() {
+
+ sendMessageToSupport(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
+
+ verify(mockSnapshotManager).commit(-1);
+ }
+
+ @Test
+ public void testUnhandledMessage() {
+
+ sendMessageToSupport("unhandled", false);
+ }
+}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.same;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Terminated;
-import akka.japi.Creator;
import akka.japi.Procedure;
-import akka.pattern.Patterns;
-import akka.persistence.RecoveryCompleted;
import akka.persistence.SaveSnapshotFailure;
import akka.persistence.SaveSnapshotSuccess;
import akka.persistence.SnapshotMetadata;
import akka.persistence.SnapshotOffer;
-import akka.persistence.SnapshotSelectionCriteria;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import akka.util.Timeout;
import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
+import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
-import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
-import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
-import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.concurrent.duration.FiniteDuration;
public class RaftActorTest extends AbstractActorTest {
+ static final Logger TEST_LOG = LoggerFactory.getLogger(RaftActorTest.class);
+
private TestActorFactory factory;
@Before
@After
public void tearDown() throws Exception {
factory.close();
- MockAkkaJournal.clearJournal();
- MockSnapshotStore.setMockSnapshot(null);
- }
-
- public static class MockRaftActor extends RaftActor {
-
- private final DataPersistenceProvider dataPersistenceProvider;
- private final RaftActor delegate;
- private final CountDownLatch recoveryComplete = new CountDownLatch(1);
- private final List<Object> state;
- private ActorRef roleChangeNotifier;
- private final CountDownLatch initializeBehaviorComplete = new CountDownLatch(1);
-
- public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
- private static final long serialVersionUID = 1L;
- private final Map<String, String> peerAddresses;
- private final String id;
- private final Optional<ConfigParams> config;
- private final DataPersistenceProvider dataPersistenceProvider;
- private final ActorRef roleChangeNotifier;
-
- private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
- Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider,
- ActorRef roleChangeNotifier) {
- this.peerAddresses = peerAddresses;
- this.id = id;
- this.config = config;
- this.dataPersistenceProvider = dataPersistenceProvider;
- this.roleChangeNotifier = roleChangeNotifier;
- }
-
- @Override
- public MockRaftActor create() throws Exception {
- MockRaftActor mockRaftActor = new MockRaftActor(id, peerAddresses, config,
- dataPersistenceProvider);
- mockRaftActor.roleChangeNotifier = this.roleChangeNotifier;
- return mockRaftActor;
- }
- }
-
- public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
- DataPersistenceProvider dataPersistenceProvider) {
- super(id, peerAddresses, config);
- state = new ArrayList<>();
- this.delegate = mock(RaftActor.class);
- if(dataPersistenceProvider == null){
- this.dataPersistenceProvider = new PersistentDataProvider();
- } else {
- this.dataPersistenceProvider = dataPersistenceProvider;
- }
- }
-
- public void waitForRecoveryComplete() {
- try {
- assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
- public void waitForInitializeBehaviorComplete() {
- try {
- assertEquals("Behavior initialized", true, initializeBehaviorComplete.await(5, TimeUnit.SECONDS));
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
- public List<Object> getState() {
- return state;
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, null));
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider, null));
- }
-
- public static Props props(final String id, final Map<String, String> peerAddresses,
- Optional<ConfigParams> config, ActorRef roleChangeNotifier){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null, roleChangeNotifier));
- }
-
- @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
- delegate.applyState(clientActor, identifier, data);
- LOG.info("applyState called");
- }
-
- @Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
- }
-
- @Override
- protected void appendRecoveredLogEntry(Payload data) {
- state.add(data);
- }
-
- @Override
- protected void applyCurrentLogRecoveryBatch() {
- }
-
- @Override
- protected void onRecoveryComplete() {
- delegate.onRecoveryComplete();
- recoveryComplete.countDown();
- }
-
- @Override
- protected void initializeBehavior() {
- super.initializeBehavior();
- initializeBehaviorComplete.countDown();
- }
-
- @Override
- protected void applyRecoverySnapshot(byte[] bytes) {
- delegate.applyRecoverySnapshot(bytes);
- try {
- Object data = toObject(bytes);
- if (data instanceof List) {
- state.addAll((List<?>) data);
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- @Override protected void createSnapshot() {
- delegate.createSnapshot();
- }
-
- @Override protected void applySnapshot(byte [] snapshot) {
- delegate.applySnapshot(snapshot);
- }
-
- @Override protected void onStateChanged() {
- delegate.onStateChanged();
- }
-
- @Override
- protected DataPersistenceProvider persistence() {
- return this.dataPersistenceProvider;
- }
-
- @Override
- protected Optional<ActorRef> getRoleChangeNotifier() {
- return Optional.fromNullable(roleChangeNotifier);
- }
-
- @Override public String persistenceId() {
- return this.getId();
- }
-
- private Object toObject(byte[] bs) throws ClassNotFoundException, IOException {
- Object obj = null;
- ByteArrayInputStream bis = null;
- ObjectInputStream ois = null;
- try {
- bis = new ByteArrayInputStream(bs);
- ois = new ObjectInputStream(bis);
- obj = ois.readObject();
- } finally {
- if (bis != null) {
- bis.close();
- }
- if (ois != null) {
- ois.close();
- }
- }
- return obj;
- }
-
- public ReplicatedLog getReplicatedLog(){
- return this.getRaftActorContext().getReplicatedLog();
- }
-
+ InMemoryJournal.clear();
+ InMemorySnapshotStore.clear();
}
-
- private static class RaftActorTestKit extends JavaTestKit {
- private final ActorRef raftActor;
-
- public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
- super(actorSystem);
-
- raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>absent()), actorName);
-
- }
-
-
- public ActorRef getRaftActor() {
- return raftActor;
- }
-
- public boolean waitForLogMessage(final Class<?> logEventClass, String message){
- // Wait for a specific log message to show up
- return
- new JavaTestKit.EventFilter<Boolean>(logEventClass
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(raftActor.path().toString())
- .message(message)
- .occurrences(1).exec();
-
-
- }
-
- protected void waitUntilLeader(){
- waitUntilLeader(raftActor);
- }
-
- protected void waitUntilLeader(ActorRef actorRef) {
- FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
- for(int i = 0; i < 20 * 5; i++) {
- Future<Object> future = Patterns.ask(actorRef, new FindLeader(), new Timeout(duration));
- try {
- FindLeaderReply resp = (FindLeaderReply) Await.result(future, duration);
- if(resp.getLeaderActor() != null) {
- return;
- }
- } catch(TimeoutException e) {
- } catch(Exception e) {
- System.err.println("FindLeader threw ex");
- e.printStackTrace();
- }
-
-
- Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
- }
-
- Assert.fail("Leader not found for actorRef " + actorRef.path());
- }
-
- }
-
-
@Test
public void testConstruction() {
new RaftActorTestKit(getSystem(), "testConstruction").waitUntilLeader();
}
@Test
- public void testRaftActorRecovery() throws Exception {
+ public void testRaftActorRecoveryWithPersistenceEnabled() throws Exception {
+ TEST_LOG.info("testRaftActorRecoveryWithPersistenceEnabled starting");
+
new JavaTestKit(getSystem()) {{
String persistenceId = factory.generateActorId("follower-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
// Set the heartbeat interval high to essentially disable election otherwise the test
// may fail if the actor is switched to Leader and the commitIndex is set to the last
// log entry.
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ ImmutableMap<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
+ peerAddresses, Optional.<ConfigParams>of(config)), persistenceId);
watch(followerActor);
Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1,
lastAppliedDuringSnapshotCapture, 1);
- MockSnapshotStore.setMockSnapshot(snapshot);
- MockSnapshotStore.setPersistenceId(persistenceId);
+ InMemorySnapshotStore.addSnapshot(persistenceId, snapshot);
// add more entries after snapshot is taken
List<ReplicatedLogEntry> entries = new ArrayList<>();
ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
- new MockRaftActorContext.MockPayload("F"));
+ new MockRaftActorContext.MockPayload("F", 2));
ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
- new MockRaftActorContext.MockPayload("G"));
+ new MockRaftActorContext.MockPayload("G", 3));
ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
- new MockRaftActorContext.MockPayload("H"));
+ new MockRaftActorContext.MockPayload("H", 4));
entries.add(entry2);
entries.add(entry3);
entries.add(entry4);
int lastAppliedToState = 5;
int lastIndex = 7;
- MockAkkaJournal.addToJournal(5, entry2);
+ InMemoryJournal.addEntry(persistenceId, 5, entry2);
// 2 entries are applied to state besides the 4 entries in snapshot
- MockAkkaJournal.addToJournal(6, new ApplyJournalEntries(lastAppliedToState));
- MockAkkaJournal.addToJournal(7, entry3);
- MockAkkaJournal.addToJournal(8, entry4);
-
+ InMemoryJournal.addEntry(persistenceId, 6, new ApplyJournalEntries(lastAppliedToState));
+ InMemoryJournal.addEntry(persistenceId, 7, entry3);
+ InMemoryJournal.addEntry(persistenceId, 8, entry4);
// kill the actor
followerActor.tell(PoisonPill.getInstance(), null);
//reinstate the actor
TestActorRef<MockRaftActor> ref = factory.createTestActor(
- MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
- Optional.<ConfigParams>of(config)));
+ MockRaftActor.props(persistenceId, peerAddresses, Optional.<ConfigParams>of(config)));
- ref.underlyingActor().waitForRecoveryComplete();
+ MockRaftActor mockRaftActor = ref.underlyingActor();
- RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ mockRaftActor.waitForRecoveryComplete();
+
+ RaftActorContext context = mockRaftActor.getRaftActorContext();
assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
context.getReplicatedLog().size());
+ assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize());
assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
- assertEquals("Recovered state size", 6, ref.underlyingActor().getState().size());
+ assertEquals("Recovered state size", 6, mockRaftActor.getState().size());
+
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
+ assertEquals("getRaftState", RaftState.Follower, mockRaftActor.getRaftState());
}};
+
+ TEST_LOG.info("testRaftActorRecoveryWithPersistenceEnabled ending");
}
@Test
- public void testRaftActorRecoveryWithPreLithuimApplyLogEntries() throws Exception {
+ public void testRaftActorRecoveryWithPersistenceDisabled() throws Exception {
new JavaTestKit(getSystem()) {{
- String persistenceId = factory.generateActorId("leader-");
+ String persistenceId = factory.generateActorId("follower-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- // Setup the persisted journal with some entries
- ReplicatedLogEntry entry0 = new MockRaftActorContext.MockReplicatedLogEntry(1, 0,
- new MockRaftActorContext.MockPayload("zero"));
- ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 1,
- new MockRaftActorContext.MockPayload("oen"));
- ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
- new MockRaftActorContext.MockPayload("two"));
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- long seqNr = 1;
- MockAkkaJournal.addToJournal(seqNr++, entry0);
- MockAkkaJournal.addToJournal(seqNr++, entry1);
- MockAkkaJournal.addToJournal(seqNr++, new ApplyLogEntries(1));
- MockAkkaJournal.addToJournal(seqNr++, entry2);
+ TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
+ ImmutableMap.<String, String>builder().put("member1", "address").build(),
+ Optional.<ConfigParams>of(config), new NonPersistentDataProvider()), persistenceId);
- int lastAppliedToState = 1;
- int lastIndex = 2;
+ MockRaftActor mockRaftActor = ref.underlyingActor();
- //reinstate the actor
- TestActorRef<MockRaftActor> leaderActor = factory.createTestActor(
- MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
- Optional.<ConfigParams>of(config)));
+ mockRaftActor.waitForRecoveryComplete();
- leaderActor.underlyingActor().waitForRecoveryComplete();
+ mockRaftActor.waitForInitializeBehaviorComplete();
- RaftActorContext context = leaderActor.underlyingActor().getRaftActorContext();
- assertEquals("Journal log size", 3, context.getReplicatedLog().size());
- assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
- assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
- assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
+ assertEquals("getRaftState", RaftState.Follower, mockRaftActor.getRaftState());
}};
}
- /**
- * This test verifies that when recovery is applicable (typically when persistence is true) the RaftActor does
- * process recovery messages
- *
- * @throws Exception
- */
-
@Test
- public void testHandleRecoveryWhenDataPersistenceRecoveryApplicable() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- // Wait for akka's recovery to complete so it doesn't interfere.
- mockRaftActor.waitForRecoveryComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- Lists.<ReplicatedLogEntry>newArrayList(), 3, 1, 3, 1);
-
- mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
-
- verify(mockRaftActor.delegate).applyRecoverySnapshot(eq(snapshotBytes.toByteArray()));
-
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
-
- ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
-
- assertEquals("add replicated log entry", 1, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
-
- assertEquals("add replicated log entry", 2, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new ApplyJournalEntries(1));
-
- assertEquals("commit index 1", 1, mockRaftActor.getRaftActorContext().getCommitIndex());
-
- // The snapshot had 4 items + we added 2 more items during the test
- // We start removing from 5 and we should get 1 item in the replicated log
- mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(5));
-
- assertEquals("remove log entries", 1, replicatedLog.size());
-
- mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
-
- assertEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
- assertEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
-
- mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
-
- }};
- }
-
- /**
- * This test verifies that when recovery is not applicable (typically when persistence is false) the RaftActor does
- * not process recovery messages
- *
- * @throws Exception
- */
- @Test
- public void testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ public void testRaftActorForwardsToRaftActorRecoverySupport() {
+ String persistenceId = factory.generateActorId("leader-");
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), new DataPersistenceProviderMonitor()), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- // Wait for akka's recovery to complete so it doesn't interfere.
- mockRaftActor.waitForRecoveryComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- Lists.<ReplicatedLogEntry>newArrayList(), 3, 1, 3, 1);
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
- verify(mockRaftActor.delegate, times(0)).applyRecoverySnapshot(any(byte[].class));
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
+ // Wait for akka's recovery to complete so it doesn't interfere.
+ mockRaftActor.waitForRecoveryComplete();
- ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
+ RaftActorRecoverySupport mockSupport = mock(RaftActorRecoverySupport.class);
+ mockRaftActor.setRaftActorRecoverySupport(mockSupport );
- assertEquals("add replicated log entry", 0, replicatedLog.size());
+ Snapshot snapshot = Snapshot.create(new byte[]{1}, Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1);
+ SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
+ mockRaftActor.handleRecover(snapshotOffer);
- mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
+ MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(1,
+ 1, new MockRaftActorContext.MockPayload("1", 5));
+ mockRaftActor.handleRecover(logEntry);
- assertEquals("add replicated log entry", 0, replicatedLog.size());
+ ApplyJournalEntries applyJournalEntries = new ApplyJournalEntries(2);
+ mockRaftActor.handleRecover(applyJournalEntries);
- mockRaftActor.onReceiveRecover(new ApplyJournalEntries(1));
+ ApplyLogEntries applyLogEntries = new ApplyLogEntries(0);
+ mockRaftActor.handleRecover(applyLogEntries);
- assertEquals("commit index -1", -1, mockRaftActor.getRaftActorContext().getCommitIndex());
+ DeleteEntries deleteEntries = new DeleteEntries(1);
+ mockRaftActor.handleRecover(deleteEntries);
- mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(2));
+ org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries deprecatedDeleteEntries =
+ new org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries(1);
+ mockRaftActor.handleRecover(deprecatedDeleteEntries);
- assertEquals("remove log entries", 0, replicatedLog.size());
+ UpdateElectionTerm updateElectionTerm = new UpdateElectionTerm(5, "member2");
+ mockRaftActor.handleRecover(updateElectionTerm);
- mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
-
- assertNotEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
- assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
-
- mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
- }};
+ verify(mockSupport).handleRecoveryMessage(same(snapshotOffer));
+ verify(mockSupport).handleRecoveryMessage(same(logEntry));
+ verify(mockSupport).handleRecoveryMessage(same(applyJournalEntries));
+ verify(mockSupport).handleRecoveryMessage(same(applyLogEntries));
+ verify(mockSupport).handleRecoveryMessage(same(deleteEntries));
+ verify(mockSupport).handleRecoveryMessage(same(deprecatedDeleteEntries));
+ verify(mockSupport).handleRecoveryMessage(same(updateElectionTerm));
}
-
@Test
- public void testUpdatingElectionTermCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
+ public void testRaftActorForwardsToRaftActorSnapshotMessageSupport() {
+ String persistenceId = factory.generateActorId("leader-");
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- CountDownLatch persistLatch = new CountDownLatch(1);
- DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
- dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+ RaftActorSnapshotMessageSupport mockSupport = mock(RaftActorSnapshotMessageSupport.class);
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), mockSupport), persistenceId);
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- mockRaftActor.waitForInitializeBehaviorComplete();
+ // Wait for akka's recovery to complete so it doesn't interfere.
+ mockRaftActor.waitForRecoveryComplete();
- mockRaftActor.getRaftActorContext().getTermInformation().updateAndPersist(10, "foobar");
+ ApplySnapshot applySnapshot = new ApplySnapshot(mock(Snapshot.class));
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(applySnapshot));
+ mockRaftActor.handleCommand(applySnapshot);
- assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
- }
- };
- }
-
- @Test
- public void testAddingReplicatedLogEntryCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ CaptureSnapshot captureSnapshot = new CaptureSnapshot(1, 1, 1, 1, 0, 1, null);
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshot));
+ mockRaftActor.handleCommand(captureSnapshot);
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+ CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(new byte[0]);
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshotReply));
+ mockRaftActor.handleCommand(captureSnapshotReply);
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ SaveSnapshotSuccess saveSnapshotSuccess = new SaveSnapshotSuccess(mock(SnapshotMetadata.class));
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess));
+ mockRaftActor.handleCommand(saveSnapshotSuccess);
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ SaveSnapshotFailure saveSnapshotFailure = new SaveSnapshotFailure(mock(SnapshotMetadata.class), new Throwable());
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure));
+ mockRaftActor.handleCommand(saveSnapshotFailure);
- mockRaftActor.waitForInitializeBehaviorComplete();
+ doReturn(true).when(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT));
+ mockRaftActor.handleCommand(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
- MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(10, 10, mock(Payload.class));
-
- mockRaftActor.getRaftActorContext().getReplicatedLog().appendAndPersist(logEntry);
-
- verify(dataPersistenceProvider).persist(eq(logEntry), any(Procedure.class));
- }
- };
- }
-
- @Test
- public void testRemovingReplicatedLogEntryCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- mockRaftActor.getReplicatedLog().appendAndPersist(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
-
- mockRaftActor.getRaftActorContext().getReplicatedLog().removeFromAndPersist(0);
-
- verify(dataPersistenceProvider, times(2)).persist(anyObject(), any(Procedure.class));
- }
- };
+ verify(mockSupport).handleSnapshotMessage(same(applySnapshot));
+ verify(mockSupport).handleSnapshotMessage(same(captureSnapshot));
+ verify(mockSupport).handleSnapshotMessage(same(captureSnapshotReply));
+ verify(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess));
+ verify(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure));
+ verify(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT));
}
@Test
mockRaftActor.waitForInitializeBehaviorComplete();
- mockRaftActor.onReceiveCommand(new ApplyJournalEntries(10));
-
- verify(dataPersistenceProvider, times(1)).persist(anyObject(), any(Procedure.class));
-
- }
-
- };
- }
-
- @Test
- public void testCaptureSnapshotReplyCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
- MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
- Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1,-1, 1, -1, 1));
-
- RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
-
- mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
+ mockRaftActor.waitUntilLeader();
- mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ mockRaftActor.onReceiveCommand(new ApplyJournalEntries(10));
- verify(dataPersistenceProvider).saveSnapshot(anyObject());
+ verify(dataPersistenceProvider, times(2)).persist(anyObject(), any(Procedure.class));
}
- };
- }
-
- @Test
- public void testSaveSnapshotSuccessCallsDataPersistence() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 1, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 2, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 3, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 4, mock(Payload.class)));
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
- mockRaftActor.setCurrentBehavior(new Follower(raftActorContext));
-
- long replicatedToAllIndex = 1;
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, 2, 1, replicatedToAllIndex, 1));
-
- verify(mockRaftActor.delegate).createSnapshot();
-
- mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
-
- mockRaftActor.onReceiveCommand(new SaveSnapshotSuccess(new SnapshotMetadata("foo", 100, 100)));
-
- verify(dataPersistenceProvider).deleteSnapshots(any(SnapshotSelectionCriteria.class));
-
- verify(dataPersistenceProvider).deleteMessages(100);
-
- assertEquals(3, mockRaftActor.getReplicatedLog().size());
- assertEquals(1, mockRaftActor.getCurrentBehavior().getReplicatedToAllIndex());
-
- assertNotNull(mockRaftActor.getReplicatedLog().get(2));
- assertNotNull(mockRaftActor.getReplicatedLog().get(3));
- assertNotNull(mockRaftActor.getReplicatedLog().get(4));
-
- // Index 2 will not be in the log because it was removed due to snapshotting
- assertNull(mockRaftActor.getReplicatedLog().get(1));
- assertNull(mockRaftActor.getReplicatedLog().get(0));
-
- }
};
}
mockRaftActor.onReceiveCommand(new ApplyState(mockActorRef, "apply-state", entry));
- verify(mockRaftActor.delegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
+ verify(mockRaftActor.actorDelegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
}
};
}
@Test
- public void testApplySnapshot() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
-
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
-
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-
- DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
-
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
-
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
-
- mockRaftActor.waitForInitializeBehaviorComplete();
-
- ReplicatedLog oldReplicatedLog = mockRaftActor.getReplicatedLog();
-
- oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
- oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1, 1, mock(Payload.class)));
- oldReplicatedLog.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
- mock(Payload.class)));
-
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
-
- Snapshot snapshot = mock(Snapshot.class);
-
- doReturn(snapshotBytes.toByteArray()).when(snapshot).getState();
-
- doReturn(3L).when(snapshot).getLastAppliedIndex();
-
- mockRaftActor.onReceiveCommand(new ApplySnapshot(snapshot));
-
- verify(mockRaftActor.delegate).applySnapshot(eq(snapshot.getState()));
-
- assertTrue("The replicatedLog should have changed",
- oldReplicatedLog != mockRaftActor.getReplicatedLog());
+ public void testRaftRoleChangeNotifierWhenRaftActorHasNoPeers() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ TestActorRef<MessageCollectorActor> notifierActor = factory.createTestActor(
+ Props.create(MessageCollectorActor.class));
+ MessageCollectorActor.waitUntilReady(notifierActor);
- assertEquals("lastApplied should be same as in the snapshot",
- (Long) 3L, mockRaftActor.getLastApplied());
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ long heartBeatInterval = 100;
+ config.setHeartBeatInterval(FiniteDuration.create(heartBeatInterval, TimeUnit.MILLISECONDS));
+ config.setElectionTimeoutFactor(20);
- assertEquals(0, mockRaftActor.getReplicatedLog().size());
+ String persistenceId = factory.generateActorId("notifier-");
- }
- };
- }
+ TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), notifierActor,
+ new NonPersistentDataProvider()), persistenceId);
- @Test
- public void testSaveSnapshotFailure() throws Exception {
- new JavaTestKit(getSystem()) {
- {
- String persistenceId = factory.generateActorId("leader-");
+ List<RoleChanged> matches = MessageCollectorActor.expectMatching(notifierActor, RoleChanged.class, 3);
- DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ // check if the notifier got a role change from null to Follower
+ RoleChanged raftRoleChanged = matches.get(0);
+ assertEquals(persistenceId, raftRoleChanged.getMemberId());
+ assertNull(raftRoleChanged.getOldRole());
+ assertEquals(RaftState.Follower.name(), raftRoleChanged.getNewRole());
- DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ // check if the notifier got a role change from Follower to Candidate
+ raftRoleChanged = matches.get(1);
+ assertEquals(persistenceId, raftRoleChanged.getMemberId());
+ assertEquals(RaftState.Follower.name(), raftRoleChanged.getOldRole());
+ assertEquals(RaftState.Candidate.name(), raftRoleChanged.getNewRole());
- TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+ // check if the notifier got a role change from Candidate to Leader
+ raftRoleChanged = matches.get(2);
+ assertEquals(persistenceId, raftRoleChanged.getMemberId());
+ assertEquals(RaftState.Candidate.name(), raftRoleChanged.getOldRole());
+ assertEquals(RaftState.Leader.name(), raftRoleChanged.getNewRole());
- MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ LeaderStateChanged leaderStateChange = MessageCollectorActor.expectFirstMatching(
+ notifierActor, LeaderStateChanged.class);
- mockRaftActor.waitForInitializeBehaviorComplete();
+ assertEquals(raftRoleChanged.getMemberId(), leaderStateChange.getLeaderId());
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
+ notifierActor.underlyingActor().clear();
- RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
+ MockRaftActor raftActor = raftActorRef.underlyingActor();
+ final String newLeaderId = "new-leader";
+ Follower follower = new Follower(raftActor.getRaftActorContext()) {
+ @Override
+ public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+ leaderId = newLeaderId;
+ return this;
+ }
+ };
- mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
+ raftActor.changeCurrentBehavior(follower);
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, -1, 1, -1, 1));
+ leaderStateChange = MessageCollectorActor.expectFirstMatching(notifierActor, LeaderStateChanged.class);
+ assertEquals(persistenceId, leaderStateChange.getMemberId());
+ assertEquals(null, leaderStateChange.getLeaderId());
- mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ raftRoleChanged = MessageCollectorActor.expectFirstMatching(notifierActor, RoleChanged.class);
+ assertEquals(RaftState.Leader.name(), raftRoleChanged.getOldRole());
+ assertEquals(RaftState.Follower.name(), raftRoleChanged.getNewRole());
- mockRaftActor.onReceiveCommand(new SaveSnapshotFailure(new SnapshotMetadata("foobar", 10L, 1234L),
- new Exception()));
+ notifierActor.underlyingActor().clear();
- assertEquals("Snapshot index should not have advanced because save snapshot failed", -1,
- mockRaftActor.getReplicatedLog().getSnapshotIndex());
+ raftActor.handleCommand("any");
- }
- };
+ leaderStateChange = MessageCollectorActor.expectFirstMatching(notifierActor, LeaderStateChanged.class);
+ assertEquals(persistenceId, leaderStateChange.getMemberId());
+ assertEquals(newLeaderId, leaderStateChange.getLeaderId());
+ }};
}
@Test
- public void testRaftRoleChangeNotifier() throws Exception {
+ public void testRaftRoleChangeNotifierWhenRaftActorHasPeers() throws Exception {
new JavaTestKit(getSystem()) {{
ActorRef notifierActor = factory.createActor(Props.create(MessageCollectorActor.class));
MessageCollectorActor.waitUntilReady(notifierActor);
String persistenceId = factory.generateActorId("notifier-");
- factory.createTestActor(MockRaftActor.props(persistenceId,
- Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), notifierActor), persistenceId);
+ factory.createActor(MockRaftActor.props(persistenceId,
+ ImmutableMap.of("leader", "fake/path"), Optional.<ConfigParams>of(config), notifierActor), persistenceId);
List<RoleChanged> matches = null;
for(int i = 0; i < 5000 / heartBeatInterval; i++) {
Uninterruptibles.sleepUninterruptibly(heartBeatInterval, TimeUnit.MILLISECONDS);
}
- assertEquals(3, matches.size());
+ assertNotNull(matches);
+ assertEquals(2, matches.size());
// check if the notifier got a role change from null to Follower
RoleChanged raftRoleChanged = matches.get(0);
assertEquals(RaftState.Follower.name(), raftRoleChanged.getOldRole());
assertEquals(RaftState.Candidate.name(), raftRoleChanged.getNewRole());
- // check if the notifier got a role change from Candidate to Leader
- raftRoleChanged = matches.get(2);
- assertEquals(persistenceId, raftRoleChanged.getMemberId());
- assertEquals(RaftState.Candidate.name(), raftRoleChanged.getOldRole());
- assertEquals(RaftState.Leader.name(), raftRoleChanged.getNewRole());
}};
}
assertEquals(8, leaderActor.getReplicatedLog().size());
- leaderActor.onReceiveCommand(new CaptureSnapshot(6, 1, 4, 1, 4, 1));
+ leaderActor.getRaftActorContext().getSnapshotManager()
+ .capture(new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
+ new MockRaftActorContext.MockPayload("x")), 4);
- leaderActor.getRaftActorContext().setSnapshotCaptureInitiated(true);
- verify(leaderActor.delegate).createSnapshot();
+ verify(leaderActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
assertEquals(8, leaderActor.getReplicatedLog().size());
new MockRaftActorContext.MockPayload("foo-2"),
new MockRaftActorContext.MockPayload("foo-3"),
new MockRaftActorContext.MockPayload("foo-4")));
- leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
- assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+
+ leaderActor.getRaftActorContext().getSnapshotManager().persist(snapshotBytes.toByteArray(),
+ leader, Runtime.getRuntime().totalMemory());
+
+ assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
+
+ // The commit is needed to complete the snapshot creation process
+ leaderActor.getRaftActorContext().getSnapshotManager().commit(-1);
// capture snapshot reply should remove the snapshotted entries only
assertEquals(3, leaderActor.getReplicatedLog().size());
assertEquals(6, followerActor.getReplicatedLog().size());
//snapshot on 4
- followerActor.onReceiveCommand(new CaptureSnapshot(5, 1, 4, 1, 4, 1));
+ followerActor.getRaftActorContext().getSnapshotManager().capture(
+ new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
+ new MockRaftActorContext.MockPayload("D")), 4);
- followerActor.getRaftActorContext().setSnapshotCaptureInitiated(true);
- verify(followerActor.delegate).createSnapshot();
+ verify(followerActor.snapshotCohortDelegate).createSnapshot(any(ActorRef.class));
assertEquals(6, followerActor.getReplicatedLog().size());
new MockRaftActorContext.MockPayload("foo-3"),
new MockRaftActorContext.MockPayload("foo-4")));
followerActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
- assertFalse(followerActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertFalse(followerActor.getRaftActorContext().getSnapshotManager().isCapturing());
+
+ // The commit is needed to complete the snapshot creation process
+ followerActor.getRaftActorContext().getSnapshotManager().commit(-1);
// capture snapshot reply should remove the snapshotted entries only till replicatedToAllIndex
assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
new MockRaftActorContext.MockPayload("foo-3"),
new MockRaftActorContext.MockPayload("foo-4")));
leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
- assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
assertEquals("Real snapshot didn't clear the log till replicatedToAllIndex", 0, leaderActor.getReplicatedLog().size());
};
}
-
- private static class NonPersistentProvider implements DataPersistenceProvider {
- @Override
- public boolean isRecoveryApplicable() {
- return false;
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- try {
- procedure.apply(o);
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- @Override
- public void saveSnapshot(Object o) {
-
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
-
- }
- }
-
@Test
public void testRealSnapshotWhenReplicatedToAllIndexMinusOne() throws Exception {
new JavaTestKit(getSystem()) {{
config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
config.setSnapshotBatchCount(5);
- DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentDataProvider();
Map<String, String> peerAddresses = new HashMap<>();
// Trimming log in this scenario is a no-op
assertEquals(-1, leaderActor.getReplicatedLog().getSnapshotIndex());
- assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
assertEquals(-1, leader.getReplicatedToAllIndex());
}};
config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
config.setSnapshotBatchCount(5);
- DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentDataProvider();
Map<String, String> peerAddresses = new HashMap<>();
// Trimming log in this scenario is a no-op
assertEquals(3, leaderActor.getReplicatedLog().getSnapshotIndex());
- assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
assertEquals(3, leader.getReplicatedToAllIndex());
}};
}
- private ByteString fromObject(Object snapshot) throws Exception {
+ public static ByteString fromObject(Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
try {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.pattern.Patterns;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+public class RaftActorTestKit extends JavaTestKit {
+ private final ActorRef raftActor;
+
+ public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
+ super(actorSystem);
+
+ raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>absent()), actorName);
+
+ }
+
+
+ public ActorRef getRaftActor() {
+ return raftActor;
+ }
+
+ public boolean waitForLogMessage(final Class<?> logEventClass, String message){
+ // Wait for a specific log message to show up
+ return
+ new JavaTestKit.EventFilter<Boolean>(logEventClass
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(raftActor.path().toString())
+ .message(message)
+ .occurrences(1).exec();
+
+
+ }
+
+ protected void waitUntilLeader(){
+ waitUntilLeader(raftActor);
+ }
+
+ public static void waitUntilLeader(ActorRef actorRef) {
+ FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
+ for(int i = 0; i < 20 * 5; i++) {
+ Future<Object> future = Patterns.ask(actorRef, new FindLeader(), new Timeout(duration));
+ try {
+ FindLeaderReply resp = (FindLeaderReply) Await.result(future, duration);
+ if(resp.getLeaderActor() != null) {
+ return;
+ }
+ } catch(TimeoutException e) {
+ } catch(Exception e) {
+ System.err.println("FindLeader threw ex");
+ e.printStackTrace();
+ }
+
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Leader not found for actorRef " + actorRef.path());
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import akka.persistence.SaveSnapshotSuccess;
+import com.google.common.collect.ImmutableMap;
+import java.util.Arrays;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+
+/**
+ * Tests raft actor persistence recovery end-to-end using real RaftActors and behavior communication.
+ *
+ * @author Thomas Pantelis
+ */
+public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
+
+ private MockPayload payload0;
+ private MockPayload payload1;
+
+ @Before
+ public void setup() {
+ follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+ newFollowerConfigParams());
+
+ peerAddresses = ImmutableMap.<String, String>builder().
+ put(follower1Id, follower1Actor.path().toString()).build();
+
+ leaderConfigParams = newLeaderConfigParams();
+ leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
+
+ follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
+ leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
+
+ leaderContext = leaderActor.underlyingActor().getRaftActorContext();
+ }
+
+ @Test
+ public void testStatePersistedBetweenSnapshotCaptureAndPersist() {
+
+ send2InitialPayloads();
+
+ // Block these messages initially so we can control the sequence.
+ leaderActor.underlyingActor().startDropMessages(CaptureSnapshotReply.class);
+ follower1Actor.underlyingActor().startDropMessages(AppendEntries.class);
+
+ MockPayload payload2 = sendPayloadData(leaderActor, "two");
+
+ // This should trigger a snapshot.
+ MockPayload payload3 = sendPayloadData(leaderActor, "three");
+
+ MessageCollectorActor.expectMatching(follower1CollectorActor, AppendEntries.class, 3);
+
+ // Send another payload.
+ MockPayload payload4 = sendPayloadData(leaderActor, "four");
+
+ // Now deliver the AppendEntries to the follower
+ follower1Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+
+ MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyJournalEntries.class, 3);
+
+ // Now deliver the CaptureSnapshotReply to the leader.
+ CaptureSnapshotReply captureSnapshotReply = MessageCollectorActor.expectFirstMatching(
+ leaderCollectorActor, CaptureSnapshotReply.class);
+ leaderActor.underlyingActor().stopDropMessages(CaptureSnapshotReply.class);
+ leaderActor.tell(captureSnapshotReply, leaderActor);
+
+ // Wait for snapshot complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ reinstateLeaderActor();
+
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 3, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 4, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
+
+ assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+ leaderActor.underlyingActor().getState());
+ }
+
+ @Test
+ public void testStatePersistedAfterSnapshotPersisted() {
+
+ send2InitialPayloads();
+
+ // Block these messages initially so we can control the sequence.
+ follower1Actor.underlyingActor().startDropMessages(AppendEntries.class);
+
+ MockPayload payload2 = sendPayloadData(leaderActor, "two");
+
+ // This should trigger a snapshot.
+ MockPayload payload3 = sendPayloadData(leaderActor, "three");
+
+ // Send another payload.
+ MockPayload payload4 = sendPayloadData(leaderActor, "four");
+
+ MessageCollectorActor.expectMatching(follower1CollectorActor, AppendEntries.class, 3);
+
+ // Wait for snapshot complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ // Now deliver the AppendEntries to the follower
+ follower1Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+
+ MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyJournalEntries.class, 3);
+
+ reinstateLeaderActor();
+
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 3, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 4, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
+
+ assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+ leaderActor.underlyingActor().getState());
+ }
+
+ private void reinstateLeaderActor() {
+ killActor(leaderActor);
+
+ leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
+
+ leaderActor.underlyingActor().waitForRecoveryComplete();
+
+ leaderContext = leaderActor.underlyingActor().getRaftActorContext();
+ }
+
+ private void send2InitialPayloads() {
+ waitUntilLeader(leaderActor);
+ currentTerm = leaderContext.getTermInformation().getCurrentTerm();
+
+ payload0 = sendPayloadData(leaderActor, "zero");
+ payload1 = sendPayloadData(leaderActor, "one");
+
+ // Verify the leader applies the states.
+ MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyJournalEntries.class, 2);
+
+ assertEquals("Leader last applied", 1, leaderContext.getLastApplied());
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.same;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import akka.japi.Procedure;
+import com.google.common.base.Supplier;
+import java.util.Collections;
+import org.hamcrest.BaseMatcher;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.mockito.internal.matchers.Same;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for ReplicatedLogImpl.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReplicatedLogImplTest {
+ private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
+
+ @Mock
+ private DataPersistenceProvider mockPersistence;
+
+ @Mock
+ private RaftActorBehavior mockBehavior;
+
+ @Mock
+ private SnapshotManager mockSnapshotManager;
+
+ private RaftActorContext context;
+ private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+
+ context = new RaftActorContextImpl(null, null, "test",
+ new ElectionTermImpl(mockPersistence, "test", LOG),
+ -1, -1, Collections.<String,String>emptyMap(), configParams, mockPersistence, LOG) {
+ @Override
+ public SnapshotManager getSnapshotManager() {
+ return mockSnapshotManager;
+ }
+ };
+ }
+
+ private void verifyPersist(Object message) throws Exception {
+ verifyPersist(message, new Same(message));
+ }
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ private void verifyPersist(Object message, Matcher<?> matcher) throws Exception {
+ ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
+ verify(mockPersistence).persist(Matchers.argThat(matcher), procedure.capture());
+
+ procedure.getValue().apply(message);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testAppendAndPersistExpectingNoCapture() throws Exception {
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
+
+ MockReplicatedLogEntry logEntry = new MockReplicatedLogEntry(1, 1, new MockPayload("1"));
+
+ log.appendAndPersist(logEntry);
+
+ verifyPersist(logEntry);
+
+ assertEquals("size", 1, log.size());
+
+ reset(mockPersistence);
+
+ Procedure<ReplicatedLogEntry> mockCallback = Mockito.mock(Procedure.class);
+ log.appendAndPersist(logEntry, mockCallback);
+
+ verifyPersist(logEntry);
+
+ verify(mockCallback).apply(same(logEntry));
+ verifyNoMoreInteractions(mockSnapshotManager);
+
+ assertEquals("size", 2, log.size());
+ }
+
+ @Test
+ public void testAppendAndPersistExpectingCaptureDueToJournalCount() throws Exception {
+ configParams.setSnapshotBatchCount(2);
+
+ doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
+
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
+
+ MockReplicatedLogEntry logEntry1 = new MockReplicatedLogEntry(1, 2, new MockPayload("2"));
+ MockReplicatedLogEntry logEntry2 = new MockReplicatedLogEntry(1, 3, new MockPayload("3"));
+
+ log.appendAndPersist(logEntry1);
+ verifyPersist(logEntry1);
+
+ verifyNoMoreInteractions(mockSnapshotManager);
+ reset(mockPersistence);
+
+ log.appendAndPersist(logEntry2);
+ verifyPersist(logEntry2);
+
+ verify(mockSnapshotManager).capture(same(logEntry2), eq(1L));
+
+ assertEquals("size", 2, log.size());
+ }
+
+ @Test
+ public void testAppendAndPersistExpectingCaptureDueToDataSize() throws Exception {
+ doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
+
+ context.setTotalMemoryRetriever(new Supplier<Long>() {
+ @Override
+ public Long get() {
+ return 100L;
+ }
+ });
+
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
+
+ int dataSize = 600;
+ MockReplicatedLogEntry logEntry = new MockReplicatedLogEntry(1, 2, new MockPayload("2", dataSize));
+
+ doReturn(true).when(mockSnapshotManager).capture(same(logEntry), eq(1L));
+
+ log.appendAndPersist(logEntry);
+ verifyPersist(logEntry);
+
+ verify(mockSnapshotManager).capture(same(logEntry), eq(1L));
+
+ reset(mockPersistence, mockSnapshotManager);
+
+ logEntry = new MockReplicatedLogEntry(1, 3, new MockPayload("3", 5));
+
+ log.appendAndPersist(logEntry);
+ verifyPersist(logEntry);
+
+ verifyNoMoreInteractions(mockSnapshotManager);
+
+ assertEquals("size", 2, log.size());
+ }
+
+ @Test
+ public void testRemoveFromAndPersist() throws Exception {
+
+ ReplicatedLog log = ReplicatedLogImpl.newInstance(context, mockBehavior);
+
+ log.append(new MockReplicatedLogEntry(1, 0, new MockPayload("0")));
+ log.append(new MockReplicatedLogEntry(1, 1, new MockPayload("1")));
+ log.append(new MockReplicatedLogEntry(1, 2, new MockPayload("2")));
+
+ log.removeFromAndPersist(1);
+
+ DeleteEntries deleteEntries = new DeleteEntries(1);
+ verifyPersist(deleteEntries, match(deleteEntries));
+
+ assertEquals("size", 1, log.size());
+
+ reset(mockPersistence);
+
+ log.removeFromAndPersist(1);
+
+ verifyNoMoreInteractions(mockPersistence);
+ }
+
+ public Matcher<DeleteEntries> match(final DeleteEntries actual){
+ return new BaseMatcher<DeleteEntries>() {
+ @Override
+ public boolean matches(Object o) {
+ DeleteEntries other = (DeleteEntries) o;
+ return actual.getFromIndex() == other.getFromIndex();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("DeleteEntries: fromIndex: " + actual.getFromIndex());
+ }
+ };
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import akka.persistence.SaveSnapshotSuccess;
+import com.google.common.collect.ImmutableMap;
+import java.util.List;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+
+/**
+ * Tests replication and snapshots end-to-end using real RaftActors and behavior communication.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReplicationAndSnapshotsIntegrationTest extends AbstractRaftActorIntegrationTest {
+
+ private List<ReplicatedLogImplEntry> origLeaderJournal;
+
+ private MockPayload recoveredPayload0;
+ private MockPayload recoveredPayload1;
+ private MockPayload recoveredPayload2;
+ private MockPayload payload3;
+ private MockPayload payload4;
+ private MockPayload payload5;
+ private MockPayload payload6;
+ private MockPayload payload7;
+
+ @Test
+ public void runTest() throws Exception {
+ testLog.info("testReplicationAndSnapshots starting");
+
+ // Setup the persistent journal for the leader. We'll start up with 3 journal log entries (one less
+ // than the snapshotBatchCount).
+ long seqId = 1;
+ InMemoryJournal.addEntry(leaderId, seqId++, new UpdateElectionTerm(initialTerm, leaderId));
+ recoveredPayload0 = new MockPayload("zero");
+ InMemoryJournal.addEntry(leaderId, seqId++, new ReplicatedLogImplEntry(0, initialTerm, recoveredPayload0));
+ recoveredPayload1 = new MockPayload("one");
+ InMemoryJournal.addEntry(leaderId, seqId++, new ReplicatedLogImplEntry(1, initialTerm, recoveredPayload1));
+ recoveredPayload2 = new MockPayload("two");
+ InMemoryJournal.addEntry(leaderId, seqId++, new ReplicatedLogImplEntry(2, initialTerm, recoveredPayload2));
+ InMemoryJournal.addEntry(leaderId, seqId++, new ApplyJournalEntries(2));
+
+ origLeaderJournal = InMemoryJournal.get(leaderId, ReplicatedLogImplEntry.class);
+
+ // Create the leader and 2 follower actors and verify initial syncing of the followers after leader
+ // persistence recovery.
+
+ follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower2Id, testActorPath(follower2Id)), newFollowerConfigParams());
+
+ follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
+
+ peerAddresses = ImmutableMap.<String, String>builder().
+ put(follower1Id, follower1Actor.path().toString()).
+ put(follower2Id, follower2Actor.path().toString()).build();
+
+ leaderConfigParams = newLeaderConfigParams();
+ leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
+
+ follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
+ follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
+ leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
+
+ leaderContext = leaderActor.underlyingActor().getRaftActorContext();
+
+ verifyLeaderRecoveryAndInitialization();
+
+ testFirstSnapshot();
+
+ testSubsequentReplications();
+
+ testSecondSnapshot();
+
+ testLeaderReinstatement();
+
+ testLog.info("testReplicationAndSnapshots ending");
+ }
+
+ /**
+ * Verify the expected leader is elected as the leader and verify initial syncing of the followers
+ * from the leader's persistence recovery.
+ */
+ void verifyLeaderRecoveryAndInitialization() {
+ testLog.info("verifyLeaderRecoveryAndInitialization starting");
+
+ waitUntilLeader(leaderActor);
+
+ currentTerm = leaderContext.getTermInformation().getCurrentTerm();
+ assertEquals("Current term > " + initialTerm, true, currentTerm > initialTerm);
+
+ leader = leaderActor.underlyingActor().getCurrentBehavior();
+
+ // The followers should receive AppendEntries for each leader log entry that was recovered from
+ // persistence and apply each one.
+ List<ApplyState> applyStates = MessageCollectorActor.expectMatching(
+ follower1CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, initialTerm, 0, recoveredPayload0);
+ verifyApplyState(applyStates.get(1), null, null, initialTerm, 1, recoveredPayload1);
+ verifyApplyState(applyStates.get(2), null, null, initialTerm, 2, recoveredPayload2);
+
+ // Verify follower 1 applies a log entry for at least the last entry index.
+ verifyApplyJournalEntries(follower1CollectorActor, 2);
+
+ applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, initialTerm, 0, recoveredPayload0);
+ verifyApplyState(applyStates.get(1), null, null, initialTerm, 1, recoveredPayload1);
+ verifyApplyState(applyStates.get(2), null, null, initialTerm, 2, recoveredPayload2);
+
+ // Verify follower 1]2 applies a log entry for at least the last entry index.
+ verifyApplyJournalEntries(follower2CollectorActor, 2);
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+
+ // The leader should have performed fake snapshots due to the follower's AppendEntriesReplies and
+ // trimmed the in-memory log so that only the last entry remains.
+ assertEquals("Leader snapshot term", initialTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 2, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 2, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 2, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 1, leader.getReplicatedToAllIndex());
+
+ // Verify the follower's persisted journal log.
+ verifyPersistedJournal(follower1Id, origLeaderJournal);
+ verifyPersistedJournal(follower2Id, origLeaderJournal);
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+
+ testLog.info("verifyLeaderRecoveryAndInitialization ending");
+ }
+
+ /**
+ * Send a payload to the TestRaftActor to persist and replicate. Since snapshotBatchCount is set to
+ * 4 and we already have 3 entries in the journal log, this should initiate a snapshot. In this
+ * scenario, the follower consensus and application of state is delayed until after the snapshot
+ * completes.
+ * @throws Exception
+ */
+ private void testFirstSnapshot() throws Exception {
+ testLog.info("testFirstSnapshot starting");
+
+ expSnapshotState.add(recoveredPayload0);
+ expSnapshotState.add(recoveredPayload1);
+ expSnapshotState.add(recoveredPayload2);
+
+ // Delay the consensus by temporarily dropping the AppendEntries to both followers.
+ follower1Actor.underlyingActor().startDropMessages(AppendEntries.class);
+ follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
+
+ // Send the payload.
+ payload3 = sendPayloadData(leaderActor, "three");
+
+ // Wait for snapshot complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ // The snapshot index should not be advanced nor the log trimmed because replicatedToAllIndex
+ // is behind due the followers not being replicated yet via AppendEntries.
+ assertEquals("Leader snapshot term", initialTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 2, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 3, leaderContext.getReplicatedLog().lastIndex());
+
+ // Verify the persisted snapshot in the leader. This should reflect the advanced snapshot index as
+ // the last applied log entry (2) even though the leader hasn't yet advanced its cached snapshot index.
+ List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
+ assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
+ verifySnapshot("Persisted", persistedSnapshots.get(0), initialTerm, 2, currentTerm, 3);
+ List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
+ verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 3, payload3);
+
+ // The leader's persisted journal log should be cleared since we snapshotted.
+ List<ReplicatedLogImplEntry> persistedLeaderJournal = InMemoryJournal.get(leaderId, ReplicatedLogImplEntry.class);
+ assertEquals("Persisted journal log size", 0, persistedLeaderJournal.size());
+
+ // Allow AppendEntries to both followers to proceed. This should catch up the followers and cause a
+ // "fake" snapshot in the leader to advance the snapshot index to 2. Also the state should be applied
+ // in all members (via ApplyState).
+ follower1Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+ follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+
+ ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
+ verifyApplyState(applyState, leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
+
+ verifyApplyJournalEntries(leaderCollectorActor, 3);
+
+ assertEquals("Leader commit index", 3, leaderContext.getCommitIndex());
+
+ applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
+ verifyApplyState(applyState, null, null, currentTerm, 3, payload3);
+
+ verifyApplyJournalEntries(follower1CollectorActor, 3);
+
+ applyState = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, ApplyState.class);
+ verifyApplyState(applyState, null, null, currentTerm, 3, payload3);
+
+ verifyApplyJournalEntries(follower2CollectorActor, 3);
+
+ assertEquals("Leader snapshot term", initialTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 2, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader commit index", 3, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 3, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 2, leader.getReplicatedToAllIndex());
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+
+ testLog.info("testFirstSnapshot ending");
+ }
+
+ /**
+ * Send 3 more payload instances and verify they get applied by all members.
+ */
+ private void testSubsequentReplications() {
+ testLog.info("testSubsequentReplications starting");
+
+ payload4 = sendPayloadData(leaderActor, "four");
+ payload5 = sendPayloadData(leaderActor, "five");
+ payload6 = sendPayloadData(leaderActor, "six");
+
+ // Verify the leader applies the states.
+ List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), leaderCollectorActor, payload4.toString(), currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(1), leaderCollectorActor, payload5.toString(), currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(2), leaderCollectorActor, payload6.toString(), currentTerm, 6, payload6);
+
+ // Verify the leader applies a log entry for at least the last entry index.
+ verifyApplyJournalEntries(leaderCollectorActor, 6);
+
+ // The leader should have performed fake snapshots due to the follower's AppendEntriesReplies and
+ // trimmed the in-memory log so that only the last entry remains.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 5, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 6, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 6, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 6, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 5, leader.getReplicatedToAllIndex());
+
+ // Verify follower 1 applies the states.
+ applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
+
+ // Verify follower 1 applies a log entry for at least the last entry index.
+ verifyApplyJournalEntries(follower1CollectorActor, 6);
+
+ // Verify follower 2 applies the states.
+ applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
+
+ // Verify follower 2 applies a log entry for at least the last entry index.
+ verifyApplyJournalEntries(follower2CollectorActor, 6);
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+
+ testLog.info("testSubsequentReplications ending");
+ }
+
+ /**
+ * Send one more payload to trigger another snapshot. In this scenario, we delay the snapshot until
+ * consensus occurs and the leader applies the state.
+ * @throws Exception
+ */
+ private void testSecondSnapshot() throws Exception {
+ testLog.info("testSecondSnapshot starting");
+
+ expSnapshotState.add(payload3);
+ expSnapshotState.add(payload4);
+ expSnapshotState.add(payload5);
+ expSnapshotState.add(payload6);
+
+ // Delay the CaptureSnapshot message to the leader actor.
+ leaderActor.underlyingActor().startDropMessages(CaptureSnapshotReply.class);
+
+ // Send the payload.
+ payload7 = sendPayloadData(leaderActor, "seven");
+
+ // Capture the CaptureSnapshotReply message so we can send it later.
+ CaptureSnapshotReply captureSnapshotReply = MessageCollectorActor.expectFirstMatching(leaderCollectorActor,
+ CaptureSnapshotReply.class);
+
+ // Wait for the state to be applied in the leader.
+ ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
+ verifyApplyState(applyState, leaderCollectorActor, payload7.toString(), currentTerm, 7, payload7);
+
+ // At this point the leader has applied the new state but the cached snapshot index should not be
+ // advanced by a "fake" snapshot because we're in the middle of a snapshot. We'll wait for at least
+ // one more heartbeat AppendEntriesReply to ensure this does not occur.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 5, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 2, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 7, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 5, leader.getReplicatedToAllIndex());
+
+ // Now deliver the CaptureSnapshotReply.
+ leaderActor.underlyingActor().stopDropMessages(CaptureSnapshotReply.class);
+ leaderActor.tell(captureSnapshotReply, leaderActor);
+
+ // Wait for snapshot complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ // Wait for another heartbeat AppendEntriesReply. This should cause a "fake" snapshot to advance the
+ // snapshot index and trimmed the log since we're no longer in a snapshot.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 6, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
+
+ // Verify the persisted snapshot. This should reflect the snapshot index as the last applied
+ // log entry (7) and shouldn't contain any unapplied entries as we capture persisted the snapshot data
+ // when the snapshot is created (ie when the CaptureSnapshot is processed).
+ List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
+ assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
+ verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 6, currentTerm, 7);
+ List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
+ verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 7, payload7);
+
+ // The leader's persisted journal log should be cleared since we did a snapshot.
+ List<ReplicatedLogImplEntry> persistedLeaderJournal = InMemoryJournal.get(
+ leaderId, ReplicatedLogImplEntry.class);
+ assertEquals("Persisted journal log size", 0, persistedLeaderJournal.size());
+
+ // Verify the followers apply all 4 new log entries.
+ List<ApplyState> applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 4);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
+ verifyApplyState(applyStates.get(3), null, null, currentTerm, 7, payload7);
+
+ applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 4);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 6, payload6);
+ verifyApplyState(applyStates.get(3), null, null, currentTerm, 7, payload7);
+
+ // Verify the follower's snapshot index has also advanced. (after another AppendEntries heartbeat
+ // to be safe).
+
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
+ RaftActorContext follower1Context = follower1Actor.underlyingActor().getRaftActorContext();
+ assertEquals("Follower 1 snapshot term", currentTerm, follower1Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 1 snapshot index", 6, follower1Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 1 journal log size", 1, follower1Context.getReplicatedLog().size());
+ assertEquals("Follower 1 journal last index", 7, follower1Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 1 commit index", 7, follower1Context.getCommitIndex());
+
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+ MessageCollectorActor.expectFirstMatching(follower2CollectorActor, AppendEntries.class);
+ RaftActorContext follower2Context = follower2Actor.underlyingActor().getRaftActorContext();
+ assertEquals("Follower 2 snapshot term", currentTerm, follower2Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 2 snapshot index", 6, follower2Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 2 journal log size", 1, follower2Context.getReplicatedLog().size());
+ assertEquals("Follower 2 journal last index", 7, follower2Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 2 commit index", 7, follower2Context.getCommitIndex());
+
+ expSnapshotState.add(payload7);
+
+ testLog.info("testSecondSnapshot ending");
+ }
+
+ /**
+ * Kill the leader actor, reinstate it and verify the recovered journal.
+ */
+ private void testLeaderReinstatement() {
+ testLog.info("testLeaderReinstatement starting");
+
+ killActor(leaderActor);
+
+ leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
+
+ leaderActor.underlyingActor().waitForRecoveryComplete();
+
+ leaderContext = leaderActor.underlyingActor().getRaftActorContext();
+
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 6, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 7, leaderContext.getLastApplied());
+ verifyReplicatedLogEntry(leaderContext.getReplicatedLog().last(), currentTerm, 7, payload7);
+
+ testLog.info("testLeaderReinstatement ending");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertEquals;
+import akka.persistence.SaveSnapshotSuccess;
+import com.google.common.collect.ImmutableMap;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+
+/**
+ * Tests replication and snapshots end-to-end using real RaftActors and behavior communication with a
+ * lagging follower.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends AbstractRaftActorIntegrationTest {
+
+ private MockPayload payload9;
+ private MockPayload payload11;
+ private MockPayload payload12;
+ private MockPayload payload13;
+
+ @Test
+ public void runTest() throws Exception {
+ testLog.info("testReplicationAndSnapshotsWithLaggingFollower starting");
+
+ leaderId = factory.generateActorId("leader");
+ follower1Id = factory.generateActorId("follower");
+ follower2Id = factory.generateActorId("follower");
+
+ // Setup the persistent journal for the leader - just an election term and no journal/snapshots.
+ InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(initialTerm, leaderId));
+
+ // Create the leader and 2 follower actors.
+ follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower2Id, testActorPath(follower2Id)), newFollowerConfigParams());
+
+ follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+ follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
+
+ Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().
+ put(follower1Id, follower1Actor.path().toString()).
+ put(follower2Id, follower2Actor.path().toString()).build();
+
+ leaderConfigParams = newLeaderConfigParams();
+ leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
+
+ waitUntilLeader(leaderActor);
+
+ leaderContext = leaderActor.underlyingActor().getRaftActorContext();
+ leader = leaderActor.underlyingActor().getCurrentBehavior();
+
+ follower1Context = follower1Actor.underlyingActor().getRaftActorContext();
+ follower1 = follower1Actor.underlyingActor().getCurrentBehavior();
+
+ follower2Context = follower2Actor.underlyingActor().getRaftActorContext();
+ follower2 = follower2Actor.underlyingActor().getCurrentBehavior();
+
+ currentTerm = leaderContext.getTermInformation().getCurrentTerm();
+ assertEquals("Current term > " + initialTerm, true, currentTerm > initialTerm);
+
+ leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
+ follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
+ follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
+
+ testLog.info("Leader created and elected");
+
+ testInitialReplications();
+
+ testSubsequentReplicationsAndSnapshots();
+
+ testLeaderSnapshotTriggeredByMemoryThresholdExceeded();
+
+ testInstallSnapshotToLaggingFollower();
+
+ verifyNoSubsequentSnapshotAfterMemoryThresholdExceededSnapshot();
+
+ testFinalReplicationsAndSnapshot();
+
+ testLeaderReinstatement();
+
+ testLog.info("testReplicationAndSnapshotsWithLaggingFollower ending");
+ }
+
+ /**
+ * Send 3 payload instances with follower 2 temporarily lagging.
+ *
+ * @throws Exception
+ */
+ private void testInitialReplications() throws Exception {
+
+ testLog.info("testInitialReplications starting: sending 2 new payloads");
+
+ // Simulate lagging by dropping AppendEntries messages in follower 2.
+ follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
+
+ // Send the payloads.
+ MockPayload payload0 = sendPayloadData(leaderActor, "zero");
+ MockPayload payload1 = sendPayloadData(leaderActor, "one");
+ MockPayload payload2 = sendPayloadData(leaderActor, "two");
+
+ // Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
+ List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), leaderCollectorActor, payload0.toString(), currentTerm, 0, payload0);
+ verifyApplyState(applyStates.get(1), leaderCollectorActor, payload1.toString(), currentTerm, 1, payload1);
+ verifyApplyState(applyStates.get(2), leaderCollectorActor, payload2.toString(), currentTerm, 2, payload2);
+
+ // Verify follower 1 applies each log entry.
+ applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 0, payload0);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 1, payload1);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 2, payload2);
+
+ // Ensure there's at least 1 more heartbeat.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+
+ // The leader should not have performed fake snapshots to trim the log because the entries have not
+ // been replicated to follower 2.
+ assertEquals("Leader snapshot term", -1, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", -1, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 3, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 2, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 2, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 2, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", -1, leader.getReplicatedToAllIndex());
+
+ testLog.info("Step 3: new entries applied - re-enabling follower {}", follower2Id);
+
+ // Now stop dropping AppendEntries in follower 2.
+ follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+
+ // Verify follower 2 applies each log entry.
+ applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 0, payload0);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 1, payload1);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 2, payload2);
+
+ // Ensure there's at least 1 more heartbeat.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+
+ // The leader should now have performed fake snapshots to trim the log.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 1, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 2, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 2, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 2, leaderContext.getLastApplied());
+ // Note - replicatedToAllIndex always lags 1 behind last applied since it trims the log up to the
+ // last applied index. The next entry successfully replicated to followers woild advance it.
+ assertEquals("Leader replicatedToAllIndex", 1, leader.getReplicatedToAllIndex());
+
+ // Even though follower 2 lagged behind, the leader should not have tried to install a snapshot
+ // to catch it up because no snapshotting was done so the follower's next index was present in the log.
+ InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
+ InstallSnapshot.class);
+ Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+
+ expSnapshotState.add(payload0);
+ expSnapshotState.add(payload1);
+ expSnapshotState.add(payload2);
+
+ testLog.info("testInitialReplications complete");
+ }
+
+ /**
+ * Send 5 more payloads with follower 2 lagging. Since the snapshotBatch count is 4, this should cause
+ * 2 leader snapshots and follower 2's log will be behind by 5 entries.
+ *
+ * @throws Exception
+ */
+ private void testSubsequentReplicationsAndSnapshots() throws Exception {
+ testLog.info("testSubsequentReplicationsAndSnapshots starting: sending first payload, replicatedToAllIndex: {}",
+ leader.getReplicatedToAllIndex());
+
+ follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
+
+ // Send the first payload - this should cause the first snapshot.
+ MockPayload payload3 = sendPayloadData(leaderActor, "three");
+
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ expSnapshotState.add(payload3);
+
+ testLog.info("testSubsequentReplicationsAndSnapshots: sending 4 more payloads");
+
+ // Send the next 4. The last one should cause the second snapshot.
+ MockPayload payload4 = sendPayloadData(leaderActor, "four");
+ MockPayload payload5 = sendPayloadData(leaderActor, "five");
+ MockPayload payload6 = sendPayloadData(leaderActor, "six");
+ MockPayload payload7 = sendPayloadData(leaderActor, "seven");
+
+ // Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
+ List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 5);
+ verifyApplyState(applyStates.get(0), leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
+ verifyApplyState(applyStates.get(1), leaderCollectorActor, payload4.toString(), currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(2), leaderCollectorActor, payload5.toString(), currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(3), leaderCollectorActor, payload6.toString(), currentTerm, 6, payload6);
+ verifyApplyState(applyStates.get(4), leaderCollectorActor, payload7.toString(), currentTerm, 7, payload7);
+
+ // Verify follower 1 applies each log entry.
+ applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 5);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 3, payload3);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(3), null, null, currentTerm, 6, payload6);
+ verifyApplyState(applyStates.get(4), null, null, currentTerm, 7, payload7);
+
+ // Wait for snapshot completion.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ // The first new entry applied should have caused the leader to advanced the snapshot index to the
+ // last previously applied index (2) that was replicated to all followers.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 2, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 5, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 7, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 7, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 2, leader.getReplicatedToAllIndex());
+
+ // Now stop dropping AppendEntries in follower 2.
+ follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+
+ // Verify follower 2 applies each log entry.
+ applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 5);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 3, payload3);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 4, payload4);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 5, payload5);
+ verifyApplyState(applyStates.get(3), null, null, currentTerm, 6, payload6);
+ verifyApplyState(applyStates.get(4), null, null, currentTerm, 7, payload7);
+
+ // Ensure there's at least 1 more heartbeat.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+
+ // The leader should now have performed fake snapshots to advance the snapshot index and to trim
+ // the log. In addition replicatedToAllIndex should've advanced.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 6, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 7, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader replicatedToAllIndex", 6, leader.getReplicatedToAllIndex());
+
+ // Verify the leader's persisted snapshot.
+ List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
+ assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
+ verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 3, currentTerm, 7);
+ List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 4, unAppliedEntry.size());
+ verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 4, payload4);
+ verifyReplicatedLogEntry(unAppliedEntry.get(1), currentTerm, 5, payload5);
+ verifyReplicatedLogEntry(unAppliedEntry.get(2), currentTerm, 6, payload6);
+ verifyReplicatedLogEntry(unAppliedEntry.get(3), currentTerm, 7, payload7);
+
+ // Even though follower 2's log was behind by 5 entries and 2 snapshots were done, the leader
+ // should not have tried to install a snapshot to catch it up because replicatedToAllIndex was also
+ // behind. Instead of installing a snapshot the leader would've sent AppendEntries with the log entries.
+ InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor, InstallSnapshot.class);
+ Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+
+ // Verify follower 1's log and snapshot indexes.
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
+ assertEquals("Follower 1 snapshot term", currentTerm, follower1Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 1 snapshot index", 6, follower1Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 1 journal log size", 1, follower1Context.getReplicatedLog().size());
+ assertEquals("Follower 1 journal last index", 7, follower1Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 1 commit index", 7, follower1Context.getCommitIndex());
+ assertEquals("Follower 1 last applied", 7, follower1Context.getLastApplied());
+ assertEquals("Follower 1 replicatedToAllIndex", 6, follower1.getReplicatedToAllIndex());
+
+ // Verify follower 2's log and snapshot indexes.
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+ MessageCollectorActor.expectFirstMatching(follower2CollectorActor, AppendEntries.class);
+ assertEquals("Follower 2 snapshot term", currentTerm, follower2Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 2 snapshot index", 6, follower2Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 2 journal log size", 1, follower2Context.getReplicatedLog().size());
+ assertEquals("Follower 2 journal last index", 7, follower2Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 2 commit index", 7, follower2Context.getCommitIndex());
+ assertEquals("Follower 2 last applied", 7, follower2Context.getLastApplied());
+ assertEquals("Follower 2 replicatedToAllIndex", 6, follower2.getReplicatedToAllIndex());
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+
+ expSnapshotState.add(payload4);
+ expSnapshotState.add(payload5);
+ expSnapshotState.add(payload6);
+ expSnapshotState.add(payload7);
+
+ testLog.info("testSubsequentReplicationsAndSnapshots complete");
+ }
+
+ /**
+ * Send a couple more payloads with follower 2 lagging. The last payload will have a large enough size
+ * to trigger a leader snapshot.
+ *
+ * @throws Exception
+ */
+ private void testLeaderSnapshotTriggeredByMemoryThresholdExceeded() throws Exception {
+ testLog.info("testLeaderSnapshotTriggeredByMemoryThresholdExceeded starting: sending 3 payloads, replicatedToAllIndex: {}",
+ leader.getReplicatedToAllIndex());
+
+ leaderActor.underlyingActor().setMockTotalMemory(1000);
+
+ // We'll expect a ReplicatedLogImplEntry message and an ApplyJournalEntries message added to the journal.
+ InMemoryJournal.addWriteMessagesCompleteLatch(leaderId, 2);
+
+ follower2Actor.underlyingActor().startDropMessages(AppendEntries.class);
+
+ // Send a payload with a large relative size but not enough to trigger a snapshot.
+ MockPayload payload8 = sendPayloadData(leaderActor, "eight", 500);
+
+ // Verify the leader got consensus and applies the first log entry even though follower 2 didn't respond.
+ List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 1);
+ verifyApplyState(applyStates.get(0), leaderCollectorActor, payload8.toString(), currentTerm, 8, payload8);
+
+ // Wait for all the ReplicatedLogImplEntry and ApplyJournalEntries messages to be added to the journal
+ // before the snapshot so the snapshot sequence # will be higher to ensure the snapshot gets
+ // purged from the snapshot store after subsequent snapshots.
+ InMemoryJournal.waitForWriteMessagesComplete(leaderId);
+
+ // Verify a snapshot is not triggered.
+ CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
+ Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+
+ expSnapshotState.add(payload8);
+
+ // Send another payload with a large enough relative size in combination with the last payload
+ // that exceeds the memory threshold (70% * 1000 = 700) - this should do a snapshot.
+ payload9 = sendPayloadData(leaderActor, "nine", 201);
+
+ // Verify the leader applies the last log entry.
+ applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 2);
+ verifyApplyState(applyStates.get(1), leaderCollectorActor, payload9.toString(), currentTerm, 9, payload9);
+
+ // Verify follower 1 applies each log entry.
+ applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 2);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 8, payload8);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 9, payload9);
+
+ // A snapshot should've occurred - wait for it to complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ // Because the snapshot was triggered by exceeding the memory threshold the leader should've advanced
+ // the snapshot index to the last applied index and trimmed the log even though the entries weren't
+ // replicated to all followers.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 8, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 9, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 9, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 9, leaderContext.getLastApplied());
+ // Note: replicatedToAllIndex should not be advanced since log entries 8 and 9 haven't yet been
+ // replicated to follower 2.
+ assertEquals("Leader replicatedToAllIndex", 7, leader.getReplicatedToAllIndex());
+
+ // Verify the leader's persisted snapshot.
+ List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
+ assertEquals("Persisted snapshots size", 1, persistedSnapshots.size());
+ verifySnapshot("Persisted", persistedSnapshots.get(0), currentTerm, 8, currentTerm, 9);
+ List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshots.get(0).getUnAppliedEntries();
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
+ verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 9, payload9);
+
+ testLog.info("testLeaderSnapshotTriggeredByMemoryThresholdExceeded ending");
+ }
+
+ /**
+ * Send another payload to verify another snapshot is not done since the last snapshot trimmed the
+ * first log entry so the memory threshold should not be exceeded.
+ *
+ * @throws Exception
+ */
+ private void verifyNoSubsequentSnapshotAfterMemoryThresholdExceededSnapshot() throws Exception {
+ ApplyState applyState;
+ CaptureSnapshot captureSnapshot;
+
+ MockPayload payload10 = sendPayloadData(leaderActor, "ten");
+
+ // Verify the leader applies the state.
+ applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
+ verifyApplyState(applyState, leaderCollectorActor, payload10.toString(), currentTerm, 10, payload10);
+
+ captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
+ Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+
+ // Verify the follower 1 applies the state.
+ applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
+ verifyApplyState(applyState, null, null, currentTerm, 10, payload10);
+
+ // Verify the follower 2 applies the state.
+ applyState = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, ApplyState.class);
+ verifyApplyState(applyState, null, null, currentTerm, 10, payload10);
+
+ // Verify the leader's state.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 9, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 10, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 10, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 10, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 9, leader.getReplicatedToAllIndex());
+
+ // Verify follower 1's state.
+ assertEquals("Follower 1 snapshot term", currentTerm, follower1Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 1 snapshot index", 9, follower1Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 1 journal log size", 1, follower1Context.getReplicatedLog().size());
+ assertEquals("Follower 1 journal last index", 10, follower1Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 1 commit index", 10, follower1Context.getCommitIndex());
+ assertEquals("Follower 1 last applied", 10, follower1Context.getLastApplied());
+ assertEquals("Follower 1 replicatedToAllIndex", 9, follower1.getReplicatedToAllIndex());
+
+ // Verify follower 2's state.
+ assertEquals("Follower 2 snapshot term", currentTerm, follower2Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 2 snapshot index", 9, follower2Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 2 journal log size", 1, follower2Context.getReplicatedLog().size());
+ assertEquals("Follower 2 journal last index", 10, follower2Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 2 commit index", 10, follower2Context.getCommitIndex());
+ assertEquals("Follower 2 last applied", 10, follower2Context.getLastApplied());
+ assertEquals("Follower 2 replicatedToAllIndex", 9, follower2.getReplicatedToAllIndex());
+
+ // Revert back to JVM total memory.
+ leaderActor.underlyingActor().setMockTotalMemory(0);
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+
+ expSnapshotState.add(payload10);
+ }
+
+ /**
+ * Following a snapshot due memory threshold exceeded, resume the lagging follower and verify it receives
+ * an install snapshot from the leader.
+ *
+ * @throws Exception
+ */
+ private void testInstallSnapshotToLaggingFollower() throws Exception {
+ List<Snapshot> persistedSnapshots;
+ List<ReplicatedLogEntry> unAppliedEntry;
+ ApplyState applyState;
+ ApplySnapshot applySnapshot;
+ InstallSnapshot installSnapshot;
+ InstallSnapshotReply installSnapshotReply;
+
+ expSnapshotState.add(payload9);
+
+ // Now stop dropping AppendEntries in follower 2.
+ follower2Actor.underlyingActor().stopDropMessages(AppendEntries.class);
+
+ installSnapshot = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, InstallSnapshot.class);
+ assertEquals("InstallSnapshot getTerm", currentTerm, installSnapshot.getTerm());
+ assertEquals("InstallSnapshot getLeaderId", leaderId, installSnapshot.getLeaderId());
+ assertEquals("InstallSnapshot getChunkIndex", 1, installSnapshot.getChunkIndex());
+ assertEquals("InstallSnapshot getTotalChunks", 1, installSnapshot.getTotalChunks());
+ assertEquals("InstallSnapshot getLastIncludedTerm", currentTerm, installSnapshot.getLastIncludedTerm());
+ assertEquals("InstallSnapshot getLastIncludedIndex", 8, installSnapshot.getLastIncludedIndex());
+ //assertArrayEquals("InstallSnapshot getData", snapshot, installSnapshot.getData().toByteArray());
+
+ installSnapshotReply = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, InstallSnapshotReply.class);
+ assertEquals("InstallSnapshotReply getTerm", currentTerm, installSnapshotReply.getTerm());
+ assertEquals("InstallSnapshotReply getChunkIndex", 1, installSnapshotReply.getChunkIndex());
+ assertEquals("InstallSnapshotReply getFollowerId", follower2Id, installSnapshotReply.getFollowerId());
+ assertEquals("InstallSnapshotReply isSuccess", true, installSnapshotReply.isSuccess());
+
+ // Verify follower 2 applies the snapshot.
+ applySnapshot = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, ApplySnapshot.class);
+ verifySnapshot("Follower 2", applySnapshot.getSnapshot(), currentTerm, 8, currentTerm, 8);
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 0, applySnapshot.getSnapshot().getUnAppliedEntries().size());
+
+ // Verify follower 2 only applies the second log entry (9) as the first one (8) was in the snapshot.
+ applyState = MessageCollectorActor.expectFirstMatching(follower2CollectorActor, ApplyState.class);
+ verifyApplyState(applyState, null, null, currentTerm, 9, payload9);
+
+ // Wait for the snapshot to complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ // Ensure there's at least 1 more heartbeat.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+
+ // The leader should now have performed fake snapshots to advance the snapshot index and to trim
+ // the log. In addition replicatedToAllIndex should've advanced.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 8, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader commit index", 9, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 9, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 8, leader.getReplicatedToAllIndex());
+
+ // Verify the leader's persisted snapshot. The previous snapshot (currently) won't be deleted from
+ // the snapshot store because the second snapshot was initiated by the follower install snapshot and
+ // not because the batch count was reached so the persisted journal sequence number wasn't advanced
+ // far enough to cause the previous snapshot to be deleted. This is because
+ // RaftActor#trimPersistentData subtracts the snapshotBatchCount from the snapshot's sequence number.
+ // This is OK - the next snapshot should delete it. In production, even if the system restarted
+ // before another snapshot, they would both get applied which wouldn't hurt anything.
+ persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
+ Assert.assertTrue("Expected at least 1 persisted snapshots", persistedSnapshots.size() > 0);
+ Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1);
+ verifySnapshot("Persisted", persistedSnapshot, currentTerm, 9, currentTerm, 9);
+ unAppliedEntry = persistedSnapshot.getUnAppliedEntries();
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 0, unAppliedEntry.size());
+
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.clearMessages(follower1CollectorActor);
+ MessageCollectorActor.clearMessages(follower2CollectorActor);
+ }
+
+ /**
+ * Do another round of payloads and snapshot to verify replicatedToAllIndex gets back on track and
+ * snapshots works as expected after doing a follower snapshot. In this step we don't lag a follower.
+ * @throws Exception
+ */
+ private void testFinalReplicationsAndSnapshot() throws Exception {
+ List<ApplyState> applyStates;
+ ApplyState applyState;
+
+ testLog.info("testFinalReplicationsAndSnapshot starting: replicatedToAllIndex: {}", leader.getReplicatedToAllIndex());
+
+ // Send another payload - a snapshot should occur.
+ payload11 = sendPayloadData(leaderActor, "eleven");
+
+ // Wait for the snapshot to complete.
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+ applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
+ verifyApplyState(applyState, leaderCollectorActor, payload11.toString(), currentTerm, 11, payload11);
+
+ // Verify the leader's last persisted snapshot (previous ones may not be purged yet).
+ List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
+ Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1);
+ verifySnapshot("Persisted", persistedSnapshot, currentTerm, 10, currentTerm, 11);
+ List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshot.getUnAppliedEntries();
+ assertEquals("Persisted Snapshot getUnAppliedEntries size", 1, unAppliedEntry.size());
+ verifyReplicatedLogEntry(unAppliedEntry.get(0), currentTerm, 11, payload11);
+
+ // Send a couple more payloads.
+ payload12 = sendPayloadData(leaderActor, "twelve");
+ payload13 = sendPayloadData(leaderActor, "thirteen");
+
+ // Verify the leader applies the 2 log entries.
+ applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(1), leaderCollectorActor, payload12.toString(), currentTerm, 12, payload12);
+ verifyApplyState(applyStates.get(2), leaderCollectorActor, payload13.toString(), currentTerm, 13, payload13);
+
+ // Verify the leader applies a log entry for at least the last entry index.
+ verifyApplyJournalEntries(leaderCollectorActor, 13);
+
+ // Ensure there's at least 1 more heartbeat to trim the log.
+ MessageCollectorActor.clearMessages(leaderCollectorActor);
+ MessageCollectorActor.expectFirstMatching(leaderCollectorActor, AppendEntriesReply.class);
+
+ // Verify the leader's final snapshot index et al.
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 12, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 1, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 13, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 13, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 13, leaderContext.getLastApplied());
+ assertEquals("Leader replicatedToAllIndex", 12, leader.getReplicatedToAllIndex());
+
+ InMemoryJournal.dumpJournal(leaderId);
+
+ // Verify the leaders's persisted journal log - should only contain the last 2 ReplicatedLogEntries
+ // added after the snapshot as the persisted journal should've been purged to the snapshot
+ // sequence number.
+ verifyPersistedJournal(leaderId, Arrays.asList(new ReplicatedLogImplEntry(12, currentTerm, payload12),
+ new ReplicatedLogImplEntry(13, currentTerm, payload13)));
+
+ // Verify the leaders's persisted journal contains an ApplyJournalEntries for at least the last entry index.
+ List<ApplyJournalEntries> persistedApplyJournalEntries = InMemoryJournal.get(leaderId, ApplyJournalEntries.class);
+ boolean found = false;
+ for(ApplyJournalEntries entry: persistedApplyJournalEntries) {
+ if(entry.getToIndex() == 13) {
+ found = true;
+ break;
+ }
+ }
+
+ Assert.assertTrue(String.format("ApplyJournalEntries with index %d not found in leader's persisted journal", 13), found);
+
+ // Verify follower 1 applies the 2 log entries.
+ applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 11, payload11);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 12, payload12);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 13, payload13);
+
+ // Verify follower 1's log state.
+ assertEquals("Follower 1 snapshot term", currentTerm, follower1Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 1 snapshot index", 12, follower1Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 1 journal log size", 1, follower1Context.getReplicatedLog().size());
+ assertEquals("Follower 1 journal last index", 13, follower1Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 1 commit index", 13, follower1Context.getCommitIndex());
+ assertEquals("Follower 1 last applied", 13, follower1Context.getLastApplied());
+ assertEquals("Follower 1 replicatedToAllIndex", 12, follower1.getReplicatedToAllIndex());
+
+ // Verify follower 2 applies the 2 log entries.
+ applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, 3);
+ verifyApplyState(applyStates.get(0), null, null, currentTerm, 11, payload11);
+ verifyApplyState(applyStates.get(1), null, null, currentTerm, 12, payload12);
+ verifyApplyState(applyStates.get(2), null, null, currentTerm, 13, payload13);
+
+ // Verify follower 2's log state.
+ assertEquals("Follower 2 snapshot term", currentTerm, follower2Context.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Follower 2 snapshot index", 12, follower2Context.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Follower 2 journal log size", 1, follower2Context.getReplicatedLog().size());
+ assertEquals("Follower 2 journal last index", 13, follower2Context.getReplicatedLog().lastIndex());
+ assertEquals("Follower 2 commit index", 13, follower2Context.getCommitIndex());
+ assertEquals("Follower 2 last applied", 13, follower2Context.getLastApplied());
+ assertEquals("Follower 2 replicatedToAllIndex", 12, follower2.getReplicatedToAllIndex());
+
+ testLog.info("testFinalReplicationsAndSnapshot ending");
+ }
+
+ /**
+ * Kill the leader actor, reinstate it and verify the recovered journal.
+ */
+ private void testLeaderReinstatement() {
+ testLog.info("testLeaderReinstatement starting");
+
+ killActor(leaderActor);
+
+ leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
+
+ leaderActor.underlyingActor().startDropMessages(RequestVoteReply.class);
+
+ leaderContext = leaderActor.underlyingActor().getRaftActorContext();
+
+ leaderActor.underlyingActor().waitForRecoveryComplete();
+
+ assertEquals("Leader snapshot term", currentTerm, leaderContext.getReplicatedLog().getSnapshotTerm());
+ assertEquals("Leader snapshot index", 10, leaderContext.getReplicatedLog().getSnapshotIndex());
+ assertEquals("Leader journal log size", 3, leaderContext.getReplicatedLog().size());
+ assertEquals("Leader journal last index", 13, leaderContext.getReplicatedLog().lastIndex());
+ assertEquals("Leader commit index", 13, leaderContext.getCommitIndex());
+ assertEquals("Leader last applied", 13, leaderContext.getLastApplied());
+ verifyReplicatedLogEntry(leaderContext.getReplicatedLog().get(11), currentTerm, 11, payload11);
+ verifyReplicatedLogEntry(leaderContext.getReplicatedLog().get(12), currentTerm, 12, payload12);
+ verifyReplicatedLogEntry(leaderContext.getReplicatedLog().get(13), currentTerm, 13, payload13);
+
+ testLog.info("testLeaderReinstatement ending");
+ }
+}
--- /dev/null
+package org.opendaylight.controller.cluster.raft;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import akka.actor.ActorRef;
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.testkit.TestActorRef;
+import com.google.common.collect.ImmutableMap;
+import java.util.Arrays;
+import java.util.HashMap;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.SnapshotManager.LastAppliedTermInformationReader;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.slf4j.LoggerFactory;
+
+public class SnapshotManagerTest extends AbstractActorTest {
+
+ @Mock
+ private RaftActorContext mockRaftActorContext;
+
+ @Mock
+ private ConfigParams mockConfigParams;
+
+ @Mock
+ private ReplicatedLog mockReplicatedLog;
+
+ @Mock
+ private DataPersistenceProvider mockDataPersistenceProvider;
+
+ @Mock
+ private RaftActorBehavior mockRaftActorBehavior;
+
+ @Mock
+ private Procedure<Void> mockProcedure;
+
+ private SnapshotManager snapshotManager;
+
+ private TestActorFactory factory;
+
+ private TestActorRef<MessageCollectorActor> actorRef;
+
+ @Before
+ public void setUp(){
+ MockitoAnnotations.initMocks(this);
+
+ doReturn(new HashMap<>()).when(mockRaftActorContext).getPeerAddresses();
+ doReturn(mockConfigParams).when(mockRaftActorContext).getConfigParams();
+ doReturn(10L).when(mockConfigParams).getSnapshotBatchCount();
+ doReturn(mockReplicatedLog).when(mockRaftActorContext).getReplicatedLog();
+ doReturn("123").when(mockRaftActorContext).getId();
+ doReturn(mockDataPersistenceProvider).when(mockRaftActorContext).getPersistenceProvider();
+ doReturn("123").when(mockRaftActorBehavior).getLeaderId();
+
+ ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
+ doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
+ doReturn(5L).when(mockElectionTerm).getCurrentTerm();
+
+ snapshotManager = new SnapshotManager(mockRaftActorContext, LoggerFactory.getLogger(this.getClass()));
+ factory = new TestActorFactory(getSystem());
+
+ actorRef = factory.createTestActor(MessageCollectorActor.props(), factory.generateActorId("test-"));
+ doReturn(actorRef).when(mockRaftActorContext).getActor();
+
+ snapshotManager.setCreateSnapshotCallable(mockProcedure);
+ }
+
+ @After
+ public void tearDown(){
+ factory.close();
+ }
+
+ @Test
+ public void testConstruction(){
+ assertEquals(false, snapshotManager.isCapturing());
+ }
+
+ @Test
+ public void testCaptureToInstall() throws Exception {
+
+ // Force capturing toInstall = true
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(1, 0,
+ new MockRaftActorContext.MockPayload()), 0, "follower-1");
+
+ assertEquals(true, snapshotManager.isCapturing());
+
+ verify(mockProcedure).apply(null);
+
+ CaptureSnapshot captureSnapshot = snapshotManager.getCaptureSnapshot();
+
+ // LastIndex and LastTerm are picked up from the lastLogEntry
+ assertEquals(0L, captureSnapshot.getLastIndex());
+ assertEquals(1L, captureSnapshot.getLastTerm());
+
+ // Since the actor does not have any followers (no peer addresses) lastApplied will be from lastLogEntry
+ assertEquals(0L, captureSnapshot.getLastAppliedIndex());
+ assertEquals(1L, captureSnapshot.getLastAppliedTerm());
+
+ //
+ assertEquals(-1L, captureSnapshot.getReplicatedToAllIndex());
+ assertEquals(-1L, captureSnapshot.getReplicatedToAllTerm());
+ actorRef.underlyingActor().clear();
+ }
+
+ @Test
+ public void testCapture() throws Exception {
+ boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ assertTrue(capture);
+
+ assertEquals(true, snapshotManager.isCapturing());
+
+ verify(mockProcedure).apply(null);
+
+ CaptureSnapshot captureSnapshot = snapshotManager.getCaptureSnapshot();
+
+ // LastIndex and LastTerm are picked up from the lastLogEntry
+ assertEquals(9L, captureSnapshot.getLastIndex());
+ assertEquals(1L, captureSnapshot.getLastTerm());
+
+ // Since the actor does not have any followers (no peer addresses) lastApplied will be from lastLogEntry
+ assertEquals(9L, captureSnapshot.getLastAppliedIndex());
+ assertEquals(1L, captureSnapshot.getLastAppliedTerm());
+
+ //
+ assertEquals(-1L, captureSnapshot.getReplicatedToAllIndex());
+ assertEquals(-1L, captureSnapshot.getReplicatedToAllTerm());
+
+ actorRef.underlyingActor().clear();
+
+ }
+
+ @Test
+ public void testCaptureWithCreateProcedureError () throws Exception {
+ doThrow(new Exception("mock")).when(mockProcedure).apply(null);
+
+ boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ assertFalse(capture);
+
+ assertEquals(false, snapshotManager.isCapturing());
+
+ verify(mockProcedure).apply(null);
+ }
+
+ @Test
+ public void testIllegalCapture() throws Exception {
+ boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ assertTrue(capture);
+
+ verify(mockProcedure).apply(null);
+
+ reset(mockProcedure);
+
+ // This will not cause snapshot capture to start again
+ capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ assertFalse(capture);
+
+ verify(mockProcedure, never()).apply(null);
+ }
+
+ @Test
+ public void testPersistWhenReplicatedToAllIndexMinusOne(){
+ doReturn(7L).when(mockReplicatedLog).getSnapshotIndex();
+ doReturn(1L).when(mockReplicatedLog).getSnapshotTerm();
+
+ doReturn(ImmutableMap.builder().put("follower-1", "").build()).when(mockRaftActorContext).getPeerAddresses();
+
+ doReturn(8L).when(mockRaftActorContext).getLastApplied();
+
+ MockRaftActorContext.MockReplicatedLogEntry lastLogEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 3L, 9L, new MockRaftActorContext.MockPayload());
+
+ MockRaftActorContext.MockReplicatedLogEntry lastAppliedEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 2L, 8L, new MockRaftActorContext.MockPayload());
+
+ doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L);
+ doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
+
+ // when replicatedToAllIndex = -1
+ snapshotManager.capture(lastLogEntry, -1);
+
+ byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
+ snapshotManager.persist(bytes, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ ArgumentCaptor<Snapshot> snapshotArgumentCaptor = ArgumentCaptor.forClass(Snapshot.class);
+ verify(mockDataPersistenceProvider).saveSnapshot(snapshotArgumentCaptor.capture());
+
+ Snapshot snapshot = snapshotArgumentCaptor.getValue();
+
+ assertEquals("getLastTerm", 3L, snapshot.getLastTerm());
+ assertEquals("getLastIndex", 9L, snapshot.getLastIndex());
+ assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm());
+ assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex());
+ assertArrayEquals("getState", bytes, snapshot.getState());
+ assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries());
+
+ verify(mockReplicatedLog).snapshotPreCommit(7L, 1L);
+ }
+
+ @Test
+ public void testPersistWhenReplicatedToAllIndexNotMinus(){
+ doReturn(45L).when(mockReplicatedLog).getSnapshotIndex();
+ doReturn(6L).when(mockReplicatedLog).getSnapshotTerm();
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(replicatedLogEntry).when(mockReplicatedLog).get(9);
+ doReturn(6L).when(replicatedLogEntry).getTerm();
+ doReturn(9L).when(replicatedLogEntry).getIndex();
+
+ // when replicatedToAllIndex != -1
+ snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
+ snapshotManager.persist(bytes, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ ArgumentCaptor<Snapshot> snapshotArgumentCaptor = ArgumentCaptor.forClass(Snapshot.class);
+ verify(mockDataPersistenceProvider).saveSnapshot(snapshotArgumentCaptor.capture());
+
+ Snapshot snapshot = snapshotArgumentCaptor.getValue();
+
+ assertEquals("getLastTerm", 6L, snapshot.getLastTerm());
+ assertEquals("getLastIndex", 9L, snapshot.getLastIndex());
+ assertEquals("getLastAppliedTerm", 6L, snapshot.getLastAppliedTerm());
+ assertEquals("getLastAppliedIndex", 9L, snapshot.getLastAppliedIndex());
+ assertArrayEquals("getState", bytes, snapshot.getState());
+ assertEquals("getUnAppliedEntries size", 0, snapshot.getUnAppliedEntries().size());
+
+ verify(mockReplicatedLog).snapshotPreCommit(9L, 6L);
+
+ verify(mockRaftActorBehavior).setReplicatedToAllIndex(9);
+ }
+
+
+ @Test
+ public void testPersistWhenReplicatedLogDataSizeGreaterThanThreshold(){
+ doReturn(Integer.MAX_VALUE).when(mockReplicatedLog).dataSize();
+
+ // when replicatedToAllIndex = -1
+ snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
+ new MockRaftActorContext.MockPayload()), -1);
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
+
+ verify(mockReplicatedLog).snapshotPreCommit(9L, 6L);
+ }
+
+ @Test
+ public void testPersistSendInstallSnapshot(){
+ doReturn(Integer.MAX_VALUE).when(mockReplicatedLog).dataSize();
+
+ // when replicatedToAllIndex = -1
+ boolean capture = snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ assertTrue(capture);
+
+ byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
+
+ snapshotManager.persist(bytes, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
+
+ verify(mockReplicatedLog).snapshotPreCommit(9L, 6L);
+
+ ArgumentCaptor<SendInstallSnapshot> sendInstallSnapshotArgumentCaptor
+ = ArgumentCaptor.forClass(SendInstallSnapshot.class);
+
+ verify(mockRaftActorBehavior).handleMessage(any(ActorRef.class), sendInstallSnapshotArgumentCaptor.capture());
+
+ SendInstallSnapshot sendInstallSnapshot = sendInstallSnapshotArgumentCaptor.getValue();
+
+ assertTrue(Arrays.equals(bytes, sendInstallSnapshot.getSnapshot().toByteArray()));
+ }
+
+ @Test
+ public void testCallingPersistWithoutCaptureWillDoNothing(){
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ verify(mockDataPersistenceProvider, never()).saveSnapshot(any(Snapshot.class));
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(9L, 6L);
+
+ verify(mockRaftActorBehavior, never()).handleMessage(any(ActorRef.class), any(SendInstallSnapshot.class));
+ }
+ @Test
+ public void testCallingPersistTwiceWillDoNoHarm(){
+ doReturn(Integer.MAX_VALUE).when(mockReplicatedLog).dataSize();
+
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
+
+ verify(mockReplicatedLog).snapshotPreCommit(9L, 6L);
+
+ verify(mockRaftActorBehavior).handleMessage(any(ActorRef.class), any(SendInstallSnapshot.class));
+ }
+
+ @Test
+ public void testCommit(){
+ doReturn(50L).when(mockDataPersistenceProvider).getLastSequenceNumber();
+
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ snapshotManager.commit(100L);
+
+ verify(mockReplicatedLog).snapshotCommit();
+
+ verify(mockDataPersistenceProvider).deleteMessages(50L);
+
+ ArgumentCaptor<SnapshotSelectionCriteria> criteriaCaptor = ArgumentCaptor.forClass(SnapshotSelectionCriteria.class);
+
+ verify(mockDataPersistenceProvider).deleteSnapshots(criteriaCaptor.capture());
+
+ assertEquals(90, criteriaCaptor.getValue().maxSequenceNr()); // sequenceNumber = 100
+ // config snapShotBatchCount = 10
+ // therefore maxSequenceNumber = 90
+ }
+
+ @Test
+ public void testCommitBeforePersist(){
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.commit(100L);
+
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ verify(mockDataPersistenceProvider, never()).deleteMessages(100L);
+
+ verify(mockDataPersistenceProvider, never()).deleteSnapshots(any(SnapshotSelectionCriteria.class));
+
+ }
+
+ @Test
+ public void testCommitBeforeCapture(){
+ snapshotManager.commit(100L);
+
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ verify(mockDataPersistenceProvider, never()).deleteMessages(anyLong());
+
+ verify(mockDataPersistenceProvider, never()).deleteSnapshots(any(SnapshotSelectionCriteria.class));
+
+ }
+
+ @Test
+ public void testCallingCommitMultipleTimesCausesNoHarm(){
+ doReturn(50L).when(mockDataPersistenceProvider).getLastSequenceNumber();
+
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ snapshotManager.commit(100L);
+
+ snapshotManager.commit(100L);
+
+ verify(mockReplicatedLog, times(1)).snapshotCommit();
+
+ verify(mockDataPersistenceProvider, times(1)).deleteMessages(50L);
+
+ verify(mockDataPersistenceProvider, times(1)).deleteSnapshots(any(SnapshotSelectionCriteria.class));
+ }
+
+ @Test
+ public void testRollback(){
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ snapshotManager.rollback();
+
+ verify(mockReplicatedLog).snapshotRollback();
+ }
+
+
+ @Test
+ public void testRollbackBeforePersist(){
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.rollback();
+
+ verify(mockReplicatedLog, never()).snapshotRollback();
+ }
+
+ @Test
+ public void testRollbackBeforeCapture(){
+ snapshotManager.rollback();
+
+ verify(mockReplicatedLog, never()).snapshotRollback();
+ }
+
+ @Test
+ public void testCallingRollbackMultipleTimesCausesNoHarm(){
+ // when replicatedToAllIndex = -1
+ snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(6, 9,
+ new MockRaftActorContext.MockPayload()), -1, "follower-1");
+
+ snapshotManager.persist(new byte[]{}, mockRaftActorBehavior, Runtime.getRuntime().totalMemory());
+
+ snapshotManager.rollback();
+
+ snapshotManager.rollback();
+
+ verify(mockReplicatedLog, times(1)).snapshotRollback();
+ }
+
+ @Test
+ public void testTrimLogWhenTrimIndexLessThanLastApplied() {
+ doReturn(20L).when(mockRaftActorContext).getLastApplied();
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", 10L, retIndex);
+
+ verify(mockReplicatedLog).snapshotPreCommit(10, 5);
+ verify(mockReplicatedLog).snapshotCommit();
+
+ verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+ }
+
+ @Test
+ public void testTrimLogWhenLastAppliedNotSet() {
+ doReturn(-1L).when(mockRaftActorContext).getLastApplied();
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", -1L, retIndex);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+ }
+
+ @Test
+ public void testTrimLogWhenLastAppliedZero() {
+ doReturn(0L).when(mockRaftActorContext).getLastApplied();
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", -1L, retIndex);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+ }
+
+ @Test
+ public void testTrimLogWhenTrimIndexNotPresent() {
+ doReturn(20L).when(mockRaftActorContext).getLastApplied();
+
+ doReturn(false).when(mockReplicatedLog).isPresent(10);
+
+ long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+ assertEquals("return index", -1L, retIndex);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ // Trim index is greater than replicatedToAllIndex so should update it.
+ verify(mockRaftActorBehavior).setReplicatedToAllIndex(10L);
+ }
+
+ @Test
+ public void testTrimLogAfterCapture(){
+ boolean capture = snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9);
+
+ assertTrue(capture);
+
+ assertEquals(true, snapshotManager.isCapturing());
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(20L).when(mockRaftActorContext).getLastApplied();
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ snapshotManager.trimLog(10, mockRaftActorBehavior);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ }
+
+ @Test
+ public void testTrimLogAfterCaptureToInstall(){
+ boolean capture = snapshotManager.captureToInstall(new MockRaftActorContext.MockReplicatedLogEntry(1,9,
+ new MockRaftActorContext.MockPayload()), 9, "follower-1");
+
+ assertTrue(capture);
+
+ assertEquals(true, snapshotManager.isCapturing());
+
+ ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+ doReturn(20L).when(mockRaftActorContext).getLastApplied();
+ doReturn(true).when(mockReplicatedLog).isPresent(10);
+ doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+ doReturn(5L).when(replicatedLogEntry).getTerm();
+
+ snapshotManager.trimLog(10, mockRaftActorBehavior);
+
+ verify(mockReplicatedLog, never()).snapshotPreCommit(10, 5);
+ verify(mockReplicatedLog, never()).snapshotCommit();
+
+ }
+
+ @Test
+ public void testLastAppliedTermInformationReader() {
+
+ LastAppliedTermInformationReader reader = new LastAppliedTermInformationReader();
+
+ doReturn(4L).when(mockReplicatedLog).getSnapshotTerm();
+ doReturn(7L).when(mockReplicatedLog).getSnapshotIndex();
+
+ ReplicatedLogEntry lastLogEntry = new MockRaftActorContext.MockReplicatedLogEntry(6L, 9L,
+ new MockRaftActorContext.MockPayload());
+
+ // No followers and valid lastLogEntry
+ reader.init(mockReplicatedLog, 1L, lastLogEntry, false);
+
+ assertEquals("getTerm", 6L, reader.getTerm());
+ assertEquals("getIndex", 9L, reader.getIndex());
+
+ // No followers and null lastLogEntry
+ reader.init(mockReplicatedLog, 1L, null, false);
+
+ assertEquals("getTerm", -1L, reader.getTerm());
+ assertEquals("getIndex", -1L, reader.getIndex());
+
+ // Followers and valid originalIndex entry
+ doReturn(new MockRaftActorContext.MockReplicatedLogEntry(5L, 8L,
+ new MockRaftActorContext.MockPayload())).when(mockReplicatedLog).get(8L);
+ reader.init(mockReplicatedLog, 8L, lastLogEntry, true);
+
+ assertEquals("getTerm", 5L, reader.getTerm());
+ assertEquals("getIndex", 8L, reader.getIndex());
+
+ // Followers and null originalIndex entry and valid snapshot index
+ reader.init(mockReplicatedLog, 7L, lastLogEntry, true);
+
+ assertEquals("getTerm", 4L, reader.getTerm());
+ assertEquals("getIndex", 7L, reader.getIndex());
+
+ // Followers and null originalIndex entry and invalid snapshot index
+ doReturn(-1L).when(mockReplicatedLog).getSnapshotIndex();
+ reader.init(mockReplicatedLog, 7L, lastLogEntry, true);
+
+ assertEquals("getTerm", -1L, reader.getTerm());
+ assertEquals("getIndex", -1L, reader.getIndex());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Unit tests for DeleteEntries.
+ *
+ * @author Thomas Pantelis
+ */
+public class DeleteEntriesTest {
+
+ @Test
+ public void testSerialization() {
+
+ DeleteEntries deleteEntries = new DeleteEntries(11);
+
+ DeleteEntries clone = (DeleteEntries) SerializationUtils.clone(deleteEntries);
+
+ Assert.assertEquals("getFromIndex", 11, clone.getFromIndex());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.dispatch.Dispatchers;
+import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import org.junit.After;
+import org.junit.Before;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * Abstract base for a leader election scenario test.
+ *
+ * @author Thomas Pantelis
+ */
+public class AbstractLeaderElectionScenarioTest {
+ static final int HEARTBEAT_INTERVAL = 50;
+
+ static class MemberActor extends MessageCollectorActor {
+
+ volatile RaftActorBehavior behavior;
+ Map<Class<?>, CountDownLatch> messagesReceivedLatches = new ConcurrentHashMap<>();
+ Map<Class<?>, Boolean> dropMessagesToBehavior = new ConcurrentHashMap<>();
+ CountDownLatch behaviorStateChangeLatch;
+
+ public static Props props() {
+ return Props.create(MemberActor.class).withDispatcher(Dispatchers.DefaultDispatcherId());
+ }
+
+ @Override
+ public void onReceive(Object message) throws Exception {
+ // Ignore scheduled SendHeartBeat messages.
+ if(message instanceof SendHeartBeat) {
+ return;
+ }
+
+ try {
+ if(behavior != null && !dropMessagesToBehavior.containsKey(message.getClass())) {
+ RaftActorBehavior oldBehavior = behavior;
+ behavior = behavior.handleMessage(getSender(), message);
+ if(behavior != oldBehavior && behaviorStateChangeLatch != null) {
+ behaviorStateChangeLatch.countDown();
+ }
+ }
+ } finally {
+ super.onReceive(message);
+
+ CountDownLatch latch = messagesReceivedLatches.get(message.getClass());
+ if(latch != null) {
+ latch.countDown();
+ }
+ }
+ }
+
+ void expectBehaviorStateChange() {
+ behaviorStateChangeLatch = new CountDownLatch(1);
+ }
+
+ void waitForBehaviorStateChange() {
+ assertTrue("Expected behavior state change",
+ Uninterruptibles.awaitUninterruptibly(behaviorStateChangeLatch, 5, TimeUnit.SECONDS));
+ }
+
+ void expectMessageClass(Class<?> expClass, int expCount) {
+ messagesReceivedLatches.put(expClass, new CountDownLatch(expCount));
+ }
+
+ void waitForExpectedMessages(Class<?> expClass) {
+ CountDownLatch latch = messagesReceivedLatches.get(expClass);
+ assertNotNull("No messages received for " + expClass, latch);
+ assertTrue("Missing messages of type " + expClass,
+ Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+ }
+
+ void dropMessagesToBehavior(Class<?> msgClass) {
+ dropMessagesToBehavior(msgClass, 1);
+ }
+
+ void dropMessagesToBehavior(Class<?> msgClass, int expCount) {
+ expectMessageClass(msgClass, expCount);
+ dropMessagesToBehavior.put(msgClass, Boolean.TRUE);
+ }
+
+ void clearDropMessagesToBehavior() {
+ dropMessagesToBehavior.clear();
+ }
+
+ @Override
+ public void clear() {
+ behaviorStateChangeLatch = null;
+ clearDropMessagesToBehavior();
+ messagesReceivedLatches.clear();
+ super.clear();
+ }
+
+ void forwardCapturedMessageToBehavior(Class<?> msgClass, ActorRef sender) throws Exception {
+ Object message = getFirstMatching(getSelf(), msgClass);
+ assertNotNull("Message of type " + msgClass + " not received", message);
+ getSelf().tell(message, sender);
+ }
+
+ void forwardCapturedMessagesToBehavior(Class<?> msgClass, ActorRef sender) throws Exception {
+ for(Object m: getAllMatching(getSelf(), msgClass)) {
+ getSelf().tell(m, sender);
+ }
+ }
+
+ <T> T getCapturedMessage(Class<T> msgClass) throws Exception {
+ Object message = getFirstMatching(getSelf(), msgClass);
+ assertNotNull("Message of type " + msgClass + " not received", message);
+ return (T) message;
+ }
+ }
+
+ protected final Logger testLog = LoggerFactory.getLogger(MockRaftActorContext.class);
+ protected final ActorSystem system = ActorSystem.create("test");
+ protected TestActorRef<MemberActor> member1ActorRef;
+ protected TestActorRef<MemberActor> member2ActorRef;
+ protected TestActorRef<MemberActor> member3ActorRef;
+ protected MemberActor member1Actor;
+ protected MemberActor member2Actor;
+ protected MemberActor member3Actor;
+ protected MockRaftActorContext member1Context;
+ protected MockRaftActorContext member2Context;
+ protected MockRaftActorContext member3Context;
+
+ @Before
+ public void setup() throws Exception {
+ member1ActorRef = newMemberActor("member1");
+ member2ActorRef = newMemberActor("member2");
+ member3ActorRef = newMemberActor("member3");
+
+ member1Actor = member1ActorRef.underlyingActor();
+ member2Actor = member2ActorRef.underlyingActor();
+ member3Actor = member3ActorRef.underlyingActor();
+ }
+
+ @After
+ public void tearDown() {
+ JavaTestKit.shutdownActorSystem(system);
+ }
+
+ DefaultConfigParamsImpl newConfigParams() {
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS));
+ configParams.setElectionTimeoutFactor(100000);
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ return configParams;
+ }
+
+ MockRaftActorContext newRaftActorContext(String id, ActorRef actor,
+ Map<String, String> peerAddresses) {
+ MockRaftActorContext context = new MockRaftActorContext(id, system, actor);
+ context.setPeerAddresses(peerAddresses);
+ context.getTermInformation().updateAndPersist(1, "");
+ return context;
+ }
+
+ void verifyBehaviorState(String name, MemberActor actor, RaftState expState) {
+ assertEquals(name + " behavior state", expState, actor.behavior.state());
+ }
+
+ void initializeLeaderBehavior(MemberActor actor, RaftActorContext context,
+ int numActiveFollowers) throws Exception {
+ // Leader sends immediate heartbeats - we don't care about it so ignore it.
+
+ actor.expectMessageClass(AppendEntriesReply.class, numActiveFollowers);
+ Leader leader = new Leader(context);
+ actor.waitForExpectedMessages(AppendEntriesReply.class);
+ actor.behavior = leader;
+
+ actor.forwardCapturedMessagesToBehavior(AppendEntriesReply.class, ActorRef.noSender());
+ actor.clear();
+ }
+
+ TestActorRef<MemberActor> newMemberActor(String name) throws Exception {
+ TestActorRef<MemberActor> actor = TestActorRef.create(system, MemberActor.props(), name);
+ MessageCollectorActor.waitUntilReady(actor);
+ return actor;
+ }
+
+ void sendHeartbeat(TestActorRef<MemberActor> leaderActor) {
+ Uninterruptibles.sleepUninterruptibly(HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS);
+ leaderActor.underlyingActor().behavior.handleMessage(leaderActor, new SendHeartBeat());
+ }
+}
package org.opendaylight.controller.cluster.raft.behaviors;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.TestActorRef;
+import com.google.common.base.Stopwatch;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
assertEquals("getTerm", 1001, reply.getTerm());
}
+ @Test
+ public void testCandidateSchedulesElectionTimeoutImmediatelyWhenItHasNoPeers(){
+ MockRaftActorContext context = createActorContext();
+
+ Stopwatch stopwatch = Stopwatch.createStarted();
+
+ candidate = createBehavior(context);
+
+ MessageCollectorActor.expectFirstMatching(candidateActor, ElectionTimeout.class);
+
+ long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
+
+ assertTrue(elapsed < context.getConfigParams().getElectionTimeOutInterval().toMillis());
+ }
@Override
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+import akka.actor.ActorRef;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+
+/**
+ * A leader election scenario test that delays various messages to behaviors to simulate network delays.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelayedMessagesElectionScenarioTest extends AbstractLeaderElectionScenarioTest {
+
+ @Test
+ public void runTest() throws Exception {
+ testLog.info("DelayedMessagesElectionScenarioTest starting");
+
+ setupInitialMemberBehaviors();
+
+ sendInitialElectionTimeoutToFollowerMember2();
+
+ forwardDelayedRequestVotesToLeaderMember1AndFollowerMember3();
+
+ sendElectionTimeoutToFollowerMember3();
+
+ forwardDelayedRequestVoteReplyFromOriginalFollowerMember3ToMember2();
+
+ testLog.info("DelayedMessagesElectionScenarioTest ending");
+ }
+
+ private void forwardDelayedRequestVoteReplyFromOriginalFollowerMember3ToMember2() throws Exception {
+ testLog.info("forwardDelayedRequestVoteReplyFromOriginalFollowerMember3ToMember2 starting");
+
+ // Now forward the original delayed RequestVoteReply from member 3 to member 2 that granted
+ // the vote. Since member 2 is now a Follower, the RequestVoteReply should be ignored.
+
+ member2Actor.clearDropMessagesToBehavior();
+ member2Actor.forwardCapturedMessageToBehavior(RequestVoteReply.class, member3ActorRef);
+
+ member2Actor.waitForExpectedMessages(RequestVoteReply.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("forwardDelayedRequestVoteReplyFromOriginalFollowerMember3ToMember2 ending");
+ }
+
+ private void sendElectionTimeoutToFollowerMember3() throws Exception {
+ testLog.info("sendElectionTimeoutToFollowerMember3 starting");
+
+ // Send ElectionTimeout to member 3 to simulate missing heartbeat from a Leader. member 3
+ // should switch to Candidate and send out RequestVote messages. member 1 should grant the
+ // vote and send a reply. After receiving the RequestVoteReply, member 3 should switch to leader.
+
+ member2Actor.expectBehaviorStateChange();
+ member3Actor.clear();
+ member3Actor.expectMessageClass(RequestVoteReply.class, 1);
+ member3Actor.expectMessageClass(AppendEntriesReply.class, 2);
+
+ member3ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member3Actor.waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member3Actor.getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ // member 2 should've switched to Follower as member 3's RequestVote term (3) was greater
+ // than member 2's term (2).
+
+ member2Actor.waitForBehaviorStateChange();
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+
+ // The switch to leader should cause an immediate AppendEntries heartbeat from member 3.
+
+ member3Actor.waitForExpectedMessages(AppendEntriesReply.class);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("sendElectionTimeoutToFollowerMember3 ending");
+ }
+
+ private void forwardDelayedRequestVotesToLeaderMember1AndFollowerMember3() throws Exception {
+ testLog.info("forwardDelayedRequestVotesToLeaderMember1AndFollowerMember3 starting");
+
+ // At this point member 1 and 3 actors have captured the RequestVote messages. First
+ // forward the RequestVote message to member 1's behavior. Since the RequestVote term
+ // is greater than member 1's term and member 1 is a Leader, member 1 should switch to Follower
+ // without replying to RequestVote and update its term to 2.
+
+ member1Actor.clearDropMessagesToBehavior();
+ member1Actor.expectBehaviorStateChange();
+ member1Actor.forwardCapturedMessageToBehavior(RequestVote.class, member2ActorRef);
+ member1Actor.waitForExpectedMessages(RequestVote.class);
+
+ member1Actor.waitForBehaviorStateChange();
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+
+ // Now forward member 3's captured RequestVote message to its behavior. Since member 3 is
+ // already a Follower, it should update its term to 2 and send a RequestVoteReply back to
+ // member 2 granting the vote b/c the RequestVote's term, lastLogTerm, and lastLogIndex
+ // should satisfy the criteria for granting the vote. However, we'll delay sending the
+ // RequestVoteReply to member 2's behavior to simulate network latency.
+
+ member2Actor.dropMessagesToBehavior(RequestVoteReply.class);
+
+ member3Actor.clearDropMessagesToBehavior();
+ member3Actor.expectMessageClass(RequestVote.class, 1);
+ member3Actor.forwardCapturedMessageToBehavior(RequestVote.class, member2ActorRef);
+ member3Actor.waitForExpectedMessages(RequestVote.class);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("forwardDelayedRequestVotesToLeaderMember1AndFollowerMember3 ending");
+ }
+
+ private void sendInitialElectionTimeoutToFollowerMember2() {
+ testLog.info("sendInitialElectionTimeoutToFollowerMember2 starting");
+
+ // Send ElectionTimeout to member 2 to simulate missing heartbeat from the Leader. member 2
+ // should switch to Candidate and send out RequestVote messages. Set member 1 and 3 actors
+ // to capture RequestVote but not to forward to the behavior just yet as we want to
+ // control the order of RequestVote messages to member 1 and 3.
+
+ member1Actor.dropMessagesToBehavior(RequestVote.class);
+
+ member2Actor.expectBehaviorStateChange();
+
+ member3Actor.dropMessagesToBehavior(RequestVote.class);
+
+ member2ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.waitForExpectedMessages(RequestVote.class);
+ member3Actor.waitForExpectedMessages(RequestVote.class);
+
+ member2Actor.waitForBehaviorStateChange();
+ verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
+
+ assertEquals("member 1 election term", 1, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 1, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("sendInitialElectionTimeoutToFollowerMember2 ending");
+ }
+
+ private void setupInitialMemberBehaviors() throws Exception {
+ testLog.info("setupInitialMemberBehaviors starting");
+
+ // Create member 2's behavior initially as Follower
+
+ member2Context = newRaftActorContext("member2", member2ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1ActorRef.path().toString()).
+ put("member3", member3ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
+ member2Context.setConfigParams(member2ConfigParams);
+
+ Follower member2Behavior = new Follower(member2Context);
+ member2Actor.behavior = member2Behavior;
+
+ // Create member 3's behavior initially as Follower
+
+ member3Context = newRaftActorContext("member3", member3ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1ActorRef.path().toString()).
+ put("member2", member2ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
+ member3Context.setConfigParams(member3ConfigParams);
+
+ Follower member3Behavior = new Follower(member3Context);
+ member3Actor.behavior = member3Behavior;
+
+ // Create member 1's behavior initially as Leader
+
+ member1Context = newRaftActorContext("member1", member1ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member2", member2ActorRef.path().toString()).
+ put("member3", member3ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
+ member1Context.setConfigParams(member1ConfigParams);
+
+ initializeLeaderBehavior(member1Actor, member1Context, 2);
+
+ member2Actor.clear();
+ member3Actor.clear();
+
+ testLog.info("setupInitialMemberBehaviors ending");
+ }
+}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.TestActorRef;
+import com.google.common.base.Stopwatch;
import com.google.protobuf.ByteString;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
follower = createBehavior(context);
- HashMap<String, String> followerSnapshot = new HashMap<>();
- followerSnapshot.put("1", "A");
- followerSnapshot.put("2", "B");
- followerSnapshot.put("3", "C");
-
- ByteString bsSnapshot = toByteString(followerSnapshot);
+ ByteString bsSnapshot = createSnapshot();
int offset = 0;
int snapshotLength = bsSnapshot.size();
int chunkSize = 50;
ApplySnapshot applySnapshot = MessageCollectorActor.expectFirstMatching(followerActor,
ApplySnapshot.class);
Snapshot snapshot = applySnapshot.getSnapshot();
+ assertNotNull(lastInstallSnapshot);
assertEquals("getLastIndex", lastInstallSnapshot.getLastIncludedIndex(), snapshot.getLastIndex());
assertEquals("getLastIncludedTerm", lastInstallSnapshot.getLastIncludedTerm(),
snapshot.getLastAppliedTerm());
assertNull("Expected null SnapshotTracker", ((Follower) follower).getSnapshotTracker());
}
+
+ /**
+ * Verify that when an AppendEntries is sent to a follower during a snapshot install
+ * the Follower short-circuits the processing of the AppendEntries message.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testReceivingAppendEntriesDuringInstallSnapshot() throws Exception {
+ logStart("testReceivingAppendEntriesDuringInstallSnapshot");
+
+ MockRaftActorContext context = createActorContext();
+
+ follower = createBehavior(context);
+
+ ByteString bsSnapshot = createSnapshot();
+ int snapshotLength = bsSnapshot.size();
+ int chunkSize = 50;
+ int totalChunks = (snapshotLength / chunkSize) + ((snapshotLength % chunkSize) > 0 ? 1 : 0);
+ int lastIncludedIndex = 1;
+
+ // Check that snapshot installation is not in progress
+ assertNull(((Follower) follower).getSnapshotTracker());
+
+ // Make sure that we have more than 1 chunk to send
+ assertTrue(totalChunks > 1);
+
+ // Send an install snapshot with the first chunk to start the process of installing a snapshot
+ ByteString chunkData = getNextChunk(bsSnapshot, 0, chunkSize);
+ follower.handleMessage(leaderActor, new InstallSnapshot(1, "leader", lastIncludedIndex, 1,
+ chunkData, 1, totalChunks));
+
+ // Check if snapshot installation is in progress now
+ assertNotNull(((Follower) follower).getSnapshotTracker());
+
+ // Send an append entry
+ AppendEntries appendEntries = mock(AppendEntries.class);
+ doReturn(context.getTermInformation().getCurrentTerm()).when(appendEntries).getTerm();
+
+ follower.handleMessage(leaderActor, appendEntries);
+
+ AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
+ assertEquals(context.getReplicatedLog().lastIndex(), reply.getLogLastIndex());
+ assertEquals(context.getReplicatedLog().lastTerm(), reply.getLogLastTerm());
+ assertEquals(context.getTermInformation().getCurrentTerm(), reply.getTerm());
+
+ // We should not hit the code that needs to look at prevLogIndex because we are short circuiting
+ verify(appendEntries, never()).getPrevLogIndex();
+
+ }
+
@Test
public void testInitialSyncUpWithHandleInstallSnapshotFollowedByAppendEntries() throws Exception {
logStart("testInitialSyncUpWithHandleInstallSnapshot");
follower = createBehavior(context);
- HashMap<String, String> followerSnapshot = new HashMap<>();
- followerSnapshot.put("1", "A");
- followerSnapshot.put("2", "B");
- followerSnapshot.put("3", "C");
-
- ByteString bsSnapshot = toByteString(followerSnapshot);
+ ByteString bsSnapshot = createSnapshot();
int offset = 0;
int snapshotLength = bsSnapshot.size();
int chunkSize = 50;
follower = createBehavior(context);
- HashMap<String, String> followerSnapshot = new HashMap<>();
- followerSnapshot.put("1", "A");
- followerSnapshot.put("2", "B");
- followerSnapshot.put("3", "C");
-
- ByteString bsSnapshot = toByteString(followerSnapshot);
+ ByteString bsSnapshot = createSnapshot();
InstallSnapshot installSnapshot = new InstallSnapshot(1, "leader", 3, 1,
getNextChunk(bsSnapshot, 10, 50), 3, 3);
assertNull("Expected null SnapshotTracker", ((Follower) follower).getSnapshotTracker());
}
+ @Test
+ public void testFollowerSchedulesElectionTimeoutImmediatelyWhenItHasNoPeers(){
+ MockRaftActorContext context = createActorContext();
+
+ Stopwatch stopwatch = Stopwatch.createStarted();
+
+ follower = createBehavior(context);
+
+ MessageCollectorActor.expectFirstMatching(followerActor, ElectionTimeout.class);
+
+ long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
+
+ assertTrue(elapsed < context.getConfigParams().getElectionTimeOutInterval().toMillis());
+ }
+
public ByteString getNextChunk (ByteString bs, int offset, int chunkSize){
int snapshotLength = bs.size();
int start = offset;
new MockRaftActorContext.MockPayload(data));
}
+ private ByteString createSnapshot(){
+ HashMap<String, String> followerSnapshot = new HashMap<>();
+ followerSnapshot.put("1", "A");
+ followerSnapshot.put("2", "B");
+ followerSnapshot.put("3", "C");
+
+ return toByteString(followerSnapshot);
+ }
+
@Override
protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(RaftActorContext actorContext,
ActorRef actorRef, RaftRPC rpc) throws Exception {
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.raft.behaviors;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Dispatchers;
-import akka.testkit.JavaTestKit;
-import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
-import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
-import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
-import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.MockRaftActorContext.SimpleReplicatedLog;
-import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
-import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
-import org.opendaylight.controller.cluster.raft.messages.RequestVote;
-import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
-import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.impl.SimpleLogger;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Tests various leader election scenarios.
- *
- * @author Thomas Pantelis
- */
-public class LeaderElectionScenariosTest {
-
- private static final int HEARTBEAT_INTERVAL = 50;
-
- public static class MemberActor extends MessageCollectorActor {
-
- volatile RaftActorBehavior behavior;
- Map<Class<?>, CountDownLatch> messagesReceivedLatches = new ConcurrentHashMap<>();
- Map<Class<?>, Boolean> dropMessagesToBehavior = new ConcurrentHashMap<>();
- CountDownLatch behaviorStateChangeLatch;
-
- public static Props props() {
- return Props.create(MemberActor.class).withDispatcher(Dispatchers.DefaultDispatcherId());
- }
-
- @Override
- public void onReceive(Object message) throws Exception {
- // Ignore scheduled SendHeartBeat messages.
- if(message instanceof SendHeartBeat) {
- return;
- }
-
- try {
- if(behavior != null && !dropMessagesToBehavior.containsKey(message.getClass())) {
- RaftActorBehavior oldBehavior = behavior;
- behavior = behavior.handleMessage(getSender(), message);
- if(behavior != oldBehavior && behaviorStateChangeLatch != null) {
- behaviorStateChangeLatch.countDown();
- }
- }
- } finally {
- super.onReceive(message);
-
- CountDownLatch latch = messagesReceivedLatches.get(message.getClass());
- if(latch != null) {
- latch.countDown();
- }
- }
- }
-
- void expectBehaviorStateChange() {
- behaviorStateChangeLatch = new CountDownLatch(1);
- }
-
- void waitForBehaviorStateChange() {
- assertTrue("Expected behavior state change",
- Uninterruptibles.awaitUninterruptibly(behaviorStateChangeLatch, 5, TimeUnit.SECONDS));
- }
-
- void expectMessageClass(Class<?> expClass, int expCount) {
- messagesReceivedLatches.put(expClass, new CountDownLatch(expCount));
- }
-
- void waitForExpectedMessages(Class<?> expClass) {
- CountDownLatch latch = messagesReceivedLatches.get(expClass);
- assertNotNull("No messages received for " + expClass, latch);
- assertTrue("Missing messages of type " + expClass,
- Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
- }
-
- void dropMessagesToBehavior(Class<?> msgClass) {
- dropMessagesToBehavior(msgClass, 1);
- }
-
- void dropMessagesToBehavior(Class<?> msgClass, int expCount) {
- expectMessageClass(msgClass, expCount);
- dropMessagesToBehavior.put(msgClass, Boolean.TRUE);
- }
-
- void clearDropMessagesToBehavior() {
- dropMessagesToBehavior.clear();
- }
-
- @Override
- public void clear() {
- behaviorStateChangeLatch = null;
- clearDropMessagesToBehavior();
- messagesReceivedLatches.clear();
- super.clear();
- }
-
- void forwardCapturedMessageToBehavior(Class<?> msgClass, ActorRef sender) throws Exception {
- Object message = getFirstMatching(getSelf(), msgClass);
- assertNotNull("Message of type " + msgClass + " not received", message);
- getSelf().tell(message, sender);
- }
-
- void forwardCapturedMessagesToBehavior(Class<?> msgClass, ActorRef sender) throws Exception {
- for(Object m: getAllMatching(getSelf(), msgClass)) {
- getSelf().tell(m, sender);
- }
- }
-
- <T> T getCapturedMessage(Class<T> msgClass) throws Exception {
- Object message = getFirstMatching(getSelf(), msgClass);
- assertNotNull("Message of type " + msgClass + " not received", message);
- return (T) message;
- }
- }
-
- static {
- System.setProperty(SimpleLogger.LOG_KEY_PREFIX + MockRaftActorContext.class.getName(), "trace");
- }
-
- private final Logger testLog = LoggerFactory.getLogger(MockRaftActorContext.class);
- private final ActorSystem system = ActorSystem.create("test");
-
- @After
- public void tearDown() {
- JavaTestKit.shutdownActorSystem(system);
- }
-
- private DefaultConfigParamsImpl newConfigParams() {
- DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
- configParams.setHeartBeatInterval(new FiniteDuration(HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS));
- configParams.setElectionTimeoutFactor(100000);
- configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
- return configParams;
- }
-
- private MockRaftActorContext newRaftActorContext(String id, ActorRef actor,
- Map<String, String> peerAddresses) {
- MockRaftActorContext context = new MockRaftActorContext(id, system, actor);
- context.setPeerAddresses(peerAddresses);
- context.getTermInformation().updateAndPersist(1, "");
- return context;
- }
-
- private void verifyBehaviorState(String name, TestActorRef<MemberActor> actor, RaftState expState) {
- assertEquals(name + " behavior state", expState, actor.underlyingActor().behavior.state());
- }
-
- private void initializeLeaderBehavior(TestActorRef<MemberActor> actor, RaftActorContext context,
- int numActiveFollowers) throws Exception {
- // Leader sends immediate heartbeats - we don't care about it so ignore it.
-
- actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, numActiveFollowers);
- Leader leader = new Leader(context);
- actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
- actor.underlyingActor().behavior = leader;
-
- actor.underlyingActor().forwardCapturedMessagesToBehavior(AppendEntriesReply.class, ActorRef.noSender());
- actor.underlyingActor().clear();
- }
-
- private TestActorRef<MemberActor> newMemberActor(String name) throws Exception {
- TestActorRef<MemberActor> actor = TestActorRef.create(system, MemberActor.props(), name);
- MessageCollectorActor.waitUntilReady(actor);
- return actor;
- }
-
- private void sendHeartbeat(TestActorRef<MemberActor> leaderActor) {
- Uninterruptibles.sleepUninterruptibly(HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS);
- leaderActor.underlyingActor().behavior.handleMessage(leaderActor, new SendHeartBeat());
- }
-
- @Test
- public void testDelayedMessagesScenario() throws Exception {
- testLog.info("Starting testDelayedMessagesScenario");
-
- TestActorRef<MemberActor> member1Actor = newMemberActor("member1");
- TestActorRef<MemberActor> member2Actor = newMemberActor("member2");
- TestActorRef<MemberActor> member3Actor = newMemberActor("member3");
-
- // Create member 2's behavior initially as Follower
-
- MockRaftActorContext member2Context = newRaftActorContext("member2", member2Actor,
- ImmutableMap.<String,String>builder().
- put("member1", member1Actor.path().toString()).
- put("member3", member3Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
- member2Context.setConfigParams(member2ConfigParams);
-
- Follower member2Behavior = new Follower(member2Context);
- member2Actor.underlyingActor().behavior = member2Behavior;
-
- // Create member 3's behavior initially as Follower
-
- MockRaftActorContext member3Context = newRaftActorContext("member3", member3Actor,
- ImmutableMap.<String,String>builder().
- put("member1", member1Actor.path().toString()).
- put("member2", member2Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
- member3Context.setConfigParams(member3ConfigParams);
-
- Follower member3Behavior = new Follower(member3Context);
- member3Actor.underlyingActor().behavior = member3Behavior;
-
- // Create member 1's behavior initially as Leader
-
- MockRaftActorContext member1Context = newRaftActorContext("member1", member1Actor,
- ImmutableMap.<String,String>builder().
- put("member2", member2Actor.path().toString()).
- put("member3", member3Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
- member1Context.setConfigParams(member1ConfigParams);
-
- initializeLeaderBehavior(member1Actor, member1Context, 2);
-
- member2Actor.underlyingActor().clear();
- member3Actor.underlyingActor().clear();
-
- // Send ElectionTimeout to member 2 to simulate missing heartbeat from the Leader. member 2
- // should switch to Candidate and send out RequestVote messages. Set member 1 and 3 actors
- // to capture RequestVote but not to forward to the behavior just yet as we want to
- // control the order of RequestVote messages to member 1 and 3.
-
- member1Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
-
- member2Actor.underlyingActor().expectBehaviorStateChange();
-
- member3Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
-
- member2Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- member2Actor.underlyingActor().waitForBehaviorStateChange();
- verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
-
- assertEquals("member 1 election term", 1, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 1, member3Context.getTermInformation().getCurrentTerm());
-
- // At this point member 1 and 3 actors have captured the RequestVote messages. First
- // forward the RequestVote message to member 1's behavior. Since the RequestVote term
- // is greater than member 1's term, member 1 should switch to Follower without replying
- // to RequestVote and update its term to 2.
-
- member1Actor.underlyingActor().clearDropMessagesToBehavior();
- member1Actor.underlyingActor().expectBehaviorStateChange();
- member1Actor.underlyingActor().forwardCapturedMessageToBehavior(RequestVote.class, member2Actor);
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- member1Actor.underlyingActor().waitForBehaviorStateChange();
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
-
- // Now forward member 3's captured RequestVote message to its behavior. Since member 3 is
- // already a Follower, it should update its term to 2 and send a RequestVoteReply back to
- // member 2 granting the vote b/c the RequestVote's term, lastLogTerm, and lastLogIndex
- // should satisfy the criteria for granting the vote. However, we'll delay sending the
- // RequestVoteReply to member 2's behavior to simulate network latency.
-
- member2Actor.underlyingActor().dropMessagesToBehavior(RequestVoteReply.class);
-
- member3Actor.underlyingActor().clearDropMessagesToBehavior();
- member3Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
- member3Actor.underlyingActor().forwardCapturedMessageToBehavior(RequestVote.class, member2Actor);
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
-
- assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
-
- // Send ElectionTimeout to member 3 to simulate missing heartbeat from a Leader. member 3
- // should switch to Candidate and send out RequestVote messages. member 1 should grant the
- // vote and send a reply. After receiving the RequestVoteReply, member 3 should switch to leader.
-
- member2Actor.underlyingActor().expectBehaviorStateChange();
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
- member3Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 2);
-
- member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
-
- RequestVoteReply requestVoteReply = member3Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
- assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
- assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
-
- verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
-
- // member 2 should've switched to Follower as member 3's RequestVote term (3) was greater
- // than member 2's term (2).
-
- member2Actor.underlyingActor().waitForBehaviorStateChange();
- verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
-
- // The switch to leader should cause an immediate AppendEntries heartbeat from member 3.
-
- member3Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
-
- assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
-
- // Now forward the original delayed RequestVoteReply from member 3 to member 2 that granted
- // the vote. Since member 2 is now a Follower, the RequestVoteReply should be ignored.
-
- member2Actor.underlyingActor().clearDropMessagesToBehavior();
- member2Actor.underlyingActor().forwardCapturedMessageToBehavior(RequestVoteReply.class, member3Actor);
-
- member2Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
- verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
- verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
-
- assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
-
- testLog.info("testDelayedMessagesScenario done");
- }
-
- @Test
- public void testPartitionedLeadersScenario() throws Exception {
- testLog.info("Starting testPartitionedLeadersScenario");
-
- TestActorRef<MemberActor> member1Actor = newMemberActor("member1");
- TestActorRef<MemberActor> member2Actor = newMemberActor("member2");
- TestActorRef<MemberActor> member3Actor = newMemberActor("member3");
-
- // Create member 2's behavior initially as Follower
-
- MockRaftActorContext member2Context = newRaftActorContext("member2", member2Actor,
- ImmutableMap.<String,String>builder().
- put("member1", member1Actor.path().toString()).
- put("member3", member3Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
- member2Context.setConfigParams(member2ConfigParams);
-
- Follower member2Behavior = new Follower(member2Context);
- member2Actor.underlyingActor().behavior = member2Behavior;
-
- // Create member 3's behavior initially as Follower
-
- MockRaftActorContext member3Context = newRaftActorContext("member3", member3Actor,
- ImmutableMap.<String,String>builder().
- put("member1", member1Actor.path().toString()).
- put("member2", member2Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
- member3Context.setConfigParams(member3ConfigParams);
-
- Follower member3Behavior = new Follower(member3Context);
- member3Actor.underlyingActor().behavior = member3Behavior;
-
- // Create member 1's behavior initially as Leader
-
- MockRaftActorContext member1Context = newRaftActorContext("member1", member1Actor,
- ImmutableMap.<String,String>builder().
- put("member2", member2Actor.path().toString()).
- put("member3", member3Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
- member1Context.setConfigParams(member1ConfigParams);
-
- initializeLeaderBehavior(member1Actor, member1Context, 2);
-
- member2Actor.underlyingActor().clear();
- member3Actor.underlyingActor().clear();
-
- // Send ElectionTimeout to member 2 to simulate no heartbeat from the Leader (member 1).
- // member 2 should switch to Candidate, start new term 2 and send out RequestVote messages.
- // member 1 will switch to Follower b/c its term is less than the RequestVote term, also it
- // won't send back a reply. member 3 will drop the message (ie won't forward it to its behavior) to
- // simulate loss of network connectivity between member 2 and 3.
-
- member1Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
-
- member2Actor.underlyingActor().expectBehaviorStateChange();
-
- member3Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
-
- member2Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- // member 1 should switch to Follower as the RequestVote term is greater than its term. It
- // won't send back a RequestVoteReply in this case.
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
-
- // member 2 should switch to Candidate since member 1 didn't reply.
-
- member2Actor.underlyingActor().waitForBehaviorStateChange();
- verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
-
- assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 1, member3Context.getTermInformation().getCurrentTerm());
-
- // Send ElectionTimeout to member 3 to simulate no heartbeat from the Leader (member 1).
- // member 2 should switch to Candidate and send out RequestVote messages. member 1 will reply and
- // grant the vote but member 2 will drop the message to simulate loss of network connectivity.
-
- member1Actor.underlyingActor().clear();
- member1Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
- member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
-
- member2Actor.underlyingActor().clear();
- member2Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
- member2Actor.underlyingActor().dropMessagesToBehavior(AppendEntries.class);
-
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
- member3Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
-
- member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
-
- RequestVoteReply requestVoteReply = member3Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
- assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
- assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
-
- // when member 3 switches to Leader it will immediately send out heartbeat AppendEntries to
- // the followers. Wait for AppendEntries to member 1 and its AppendEntriesReply. The
- // AppendEntries message to member 2 is dropped.
-
- member1Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
- member2Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
- member3Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
- verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
- verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
-
- assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
-
- // member 2 is partitioned from the Leader (member 3) and hasn't received any messages. It
- // would get another ElectionTimeout so simulate that. member 1 should send back a reply
- // granting the vote. Messages (RequestVote and AppendEntries) from member 2 to member 3
- // are dropped to simulate loss of network connectivity. Note member 2 will increment its
- // election term to 3.
-
- member1Actor.underlyingActor().clear();
- member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
-
- member2Actor.underlyingActor().clear();
- member2Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
- member2Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
-
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().dropMessagesToBehavior(AppendEntries.class);
- member3Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
-
- member2Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member2Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
-
- requestVoteReply = member2Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
- assertEquals("getTerm", member2Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
- assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
-
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- member1Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
- member3Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
- member2Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
-
- // We end up with 2 partitioned leaders both leading member 1. The term for member 1 and 3
- // is 3 and member 3's term is 2.
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
- verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
- verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
-
- assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
-
- // Re-establish connectivity between member 2 and 3, ie stop dropping messages between
- // the 2. Send heartbeats (AppendEntries) from member 3. Both member 1 and 2 should send back
- // an unsuccessful AppendEntriesReply b/c their term (3) is greater than member 3's term (2).
- // This should cause member 3 to switch to Follower.
-
- RaftActorBehavior savedMember1Behavior = member1Actor.underlyingActor().behavior;
- RaftActorBehavior savedMember2Behavior = member2Actor.underlyingActor().behavior;
- RaftActorBehavior savedMember3Behavior = member3Actor.underlyingActor().behavior;
- long savedMember3Term = member3Context.getTermInformation().getCurrentTerm();
- String savedMember3VoterFor = member3Context.getTermInformation().getVotedFor();
-
- member1Actor.underlyingActor().clear();
- member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
-
- member2Actor.underlyingActor().clear();
- member2Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
-
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
-
- sendHeartbeat(member3Actor);
-
- member3Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
-
- AppendEntriesReply appendEntriesReply = member3Actor.underlyingActor().
- getCapturedMessage(AppendEntriesReply.class);
- assertEquals("isSuccess", false, appendEntriesReply.isSuccess());
- assertEquals("getTerm", 3, appendEntriesReply.getTerm());
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
- verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
- verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
-
- assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
-
- // Revert back to the partitioned leaders state to test the other sequence where member 2
- // sends heartbeats first before member 3. member 1 should return a successful
- // AppendEntriesReply b/c his term matches member 2's. member 3 should switch to Follower
- // as his term is less than member 2's.
-
- member1Actor.underlyingActor().behavior = savedMember1Behavior;
- member2Actor.underlyingActor().behavior = savedMember2Behavior;
- member3Actor.underlyingActor().behavior = savedMember3Behavior;
-
- member3Context.getTermInformation().update(savedMember3Term, savedMember3VoterFor);
-
- member1Actor.underlyingActor().clear();
- member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
-
- member2Actor.underlyingActor().clear();
- member2Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
-
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
-
- sendHeartbeat(member2Actor);
-
- member1Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
- member3Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
-
- member2Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
- verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
- verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
-
- assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
-
- testLog.info("testPartitionedLeadersScenario done");
- }
-
- @Test
- public void testPartitionedCandidateOnStartupScenario() throws Exception {
- testLog.info("Starting testPartitionedCandidateOnStartupScenario");
-
- TestActorRef<MemberActor> member1Actor = newMemberActor("member1") ;
- TestActorRef<MemberActor> member2Actor = newMemberActor("member2");
- TestActorRef<MemberActor> member3Actor = newMemberActor("member3");
-
- // Create member 2's behavior as Follower.
-
- MockRaftActorContext member2Context = newRaftActorContext("member2", member2Actor,
- ImmutableMap.<String,String>builder().
- put("member1", member1Actor.path().toString()).
- put("member3", member3Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
- member2Context.setConfigParams(member2ConfigParams);
-
- Follower member2Behavior = new Follower(member2Context);
- member2Actor.underlyingActor().behavior = member2Behavior;
-
- // Create member 1's behavior as Leader.
-
- MockRaftActorContext member1Context = newRaftActorContext("member1", member1Actor,
- ImmutableMap.<String,String>builder().
- put("member2", member2Actor.path().toString()).
- put("member3", member3Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
- member1Context.setConfigParams(member1ConfigParams);
-
- initializeLeaderBehavior(member1Actor, member1Context, 1);
-
- member2Actor.underlyingActor().clear();
- member3Actor.underlyingActor().clear();
-
- // Initialize the ReplicatedLog and election term info for member 1 and 2. The current term
- // will be 3 and the last term will be 2.
-
- SimpleReplicatedLog replicatedLog = new SimpleReplicatedLog();
- replicatedLog.append(new MockReplicatedLogEntry(2, 1, new MockPayload("")));
- replicatedLog.append(new MockReplicatedLogEntry(3, 1, new MockPayload("")));
-
- member1Context.setReplicatedLog(replicatedLog);
- member1Context.getTermInformation().update(3, "");
-
- member2Context.setReplicatedLog(replicatedLog);
- member2Context.getTermInformation().update(3, member1Context.getId());
-
- // Create member 3's behavior initially as a Candidate.
-
- MockRaftActorContext member3Context = newRaftActorContext("member3", member3Actor,
- ImmutableMap.<String,String>builder().
- put("member1", member1Actor.path().toString()).
- put("member2", member2Actor.path().toString()).build());
-
- DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
- member3Context.setConfigParams(member3ConfigParams);
-
- // Initialize the ReplicatedLog and election term info for Candidate member 3. The current term
- // will be 2 and the last term will be 1 so it is behind the leader's log.
-
- SimpleReplicatedLog candidateReplicatedLog = new SimpleReplicatedLog();
- candidateReplicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("")));
- candidateReplicatedLog.append(new MockReplicatedLogEntry(2, 1, new MockPayload("")));
-
- member3Context.setReplicatedLog(candidateReplicatedLog);
- member3Context.getTermInformation().update(2, member1Context.getId());
-
- // The member 3 Candidate will start a new term and send RequestVotes. However it will be
- // partitioned from the cluster by having member 1 and 2 drop its RequestVote messages.
-
- int numCandidateElections = 5;
- long candidateElectionTerm = member3Context.getTermInformation().getCurrentTerm() + numCandidateElections;
-
- member1Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class, numCandidateElections);
-
- member2Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class, numCandidateElections);
-
- Candidate member3Behavior = new Candidate(member3Context);
- member3Actor.underlyingActor().behavior = member3Behavior;
-
- // Send several additional ElectionTimeouts to Candidate member 3. Each ElectionTimeout will
- // start a new term so Candidate member 3's current term will be greater than the leader's
- // current term.
-
- for(int i = 0; i < numCandidateElections - 1; i++) {
- member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
- }
-
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Leader);
- verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
- verifyBehaviorState("member 3", member3Actor, RaftState.Candidate);
-
- assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", candidateElectionTerm,
- member3Context.getTermInformation().getCurrentTerm());
-
- // Now send a couple more ElectionTimeouts to Candidate member 3 with the partition resolved.
- //
- // On the first RequestVote, Leader member 1 should switch to Follower as its term (s) is less than
- // the RequestVote's term (8) from member 3. No RequestVoteReply should be sent by member 1.
- // Follower member 2 should update its term since it less than the RequestVote's term and
- // should return a RequestVoteReply but should not grant the vote as its last term and index
- // is greater than the RequestVote's lastLogTerm and lastLogIndex, ie member 2's log is later
- // or more up to date than member 3's.
- //
- // On the second RequestVote, both member 1 and 2 are followers so they should update their
- // term and return a RequestVoteReply but should not grant the vote.
-
- candidateElectionTerm += 2;
- for(int i = 0; i < 2; i++) {
- member1Actor.underlyingActor().clear();
- member1Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
- member2Actor.underlyingActor().clear();
- member2Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
-
- member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
-
- RequestVoteReply requestVoteReply = member3Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
- assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
- assertEquals("isVoteGranted", false, requestVoteReply.isVoteGranted());
- }
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
- verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
- verifyBehaviorState("member 3", member3Actor, RaftState.Candidate);
-
- // Even though member 3 didn't get voted for, member 1 and 2 should have updated their term
- // to member 3's.
-
- assertEquals("member 1 election term", candidateElectionTerm,
- member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", candidateElectionTerm,
- member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", candidateElectionTerm,
- member3Context.getTermInformation().getCurrentTerm());
-
- // At this point we have no leader. Candidate member 3 would continue to start new elections
- // but wouldn't be granted a vote. One of the 2 followers would eventually time out from
- // not having received a heartbeat from a leader and switch to candidate and start a new
- // election. We'll simulate that here by sending an ElectionTimeout to member 1.
-
- member1Actor.underlyingActor().clear();
- member1Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
- member2Actor.underlyingActor().clear();
- member2Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
- member3Actor.underlyingActor().clear();
- member3Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
- member3Actor.underlyingActor().expectBehaviorStateChange();
-
- member1Actor.tell(new ElectionTimeout(), ActorRef.noSender());
-
- member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
- member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
-
- // The RequestVoteReply should come from Follower member 2 and the vote should be granted
- // since member 2's last term and index matches member 1's.
-
- member1Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
-
- RequestVoteReply requestVoteReply = member1Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
- assertEquals("getTerm", member1Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
- assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
-
- // Candidate member 3 should change to follower as its term should be less than the
- // RequestVote term (member 1 started a new term higher than the other member's terms).
-
- member3Actor.underlyingActor().waitForBehaviorStateChange();
-
- verifyBehaviorState("member 1", member1Actor, RaftState.Leader);
- verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
- verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
-
- // newTerm should be 10.
-
- long newTerm = candidateElectionTerm + 1;
- assertEquals("member 1 election term", newTerm, member1Context.getTermInformation().getCurrentTerm());
- assertEquals("member 2 election term", newTerm, member2Context.getTermInformation().getCurrentTerm());
- assertEquals("member 3 election term", newTerm, member3Context.getTermInformation().getCurrentTerm());
-
- testLog.info("testPartitionedCandidateOnStartupScenario done");
- }
-}
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import scala.concurrent.duration.FiniteDuration;
public class LeaderTest extends AbstractLeaderTest {
static final String FOLLOWER_ID = "follower";
+ public static final String LEADER_ID = "leader";
private final TestActorRef<ForwardMessageToBehaviorActor> leaderActor = actorFactory.createTestActor(
Props.create(ForwardMessageToBehaviorActor.class), actorFactory.generateActorId("leader"));
assertEquals("Entry getTerm", term, appendEntries.getEntries().get(0).getTerm());
}
+
+ private RaftActorBehavior sendReplicate(MockRaftActorContext actorContext, long index){
+ MockRaftActorContext.MockPayload payload = new MockRaftActorContext.MockPayload("foo");
+ MockRaftActorContext.MockReplicatedLogEntry newEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 1, index, payload);
+ actorContext.getReplicatedLog().append(newEntry);
+ return leader.handleMessage(leaderActor, new Replicate(null, null, newEntry));
+ }
+
@Test
public void testHandleReplicateMessageSendAppendEntriesToFollower() throws Exception {
logStart("testHandleReplicateMessageSendAppendEntriesToFollower");
MockRaftActorContext.MockReplicatedLogEntry newEntry = new MockRaftActorContext.MockReplicatedLogEntry(
1, lastIndex + 1, payload);
actorContext.getReplicatedLog().append(newEntry);
- RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
- new Replicate(null, null, newEntry));
+ RaftActorBehavior raftBehavior = sendReplicate(actorContext, lastIndex+1);
// State should not change
assertTrue(raftBehavior instanceof Leader);
assertEquals("Entry payload", payload, appendEntries.getEntries().get(0).getData());
}
+ @Test
+ public void testMultipleReplicateShouldNotCauseDuplicateAppendEntriesToBeSent() throws Exception {
+ logStart("testHandleReplicateMessageSendAppendEntriesToFollower");
+
+ MockRaftActorContext actorContext = createActorContextWithFollower();
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public FiniteDuration getHeartBeatInterval() {
+ return FiniteDuration.apply(5, TimeUnit.SECONDS);
+ }
+ });
+
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
+ leader = new Leader(actorContext);
+
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
+
+ followerActor.underlyingActor().clear();
+
+ for(int i=0;i<5;i++) {
+ sendReplicate(actorContext, lastIndex+i+1);
+ }
+
+ List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+ // We expect only 1 message to be sent because of two reasons,
+ // - an append entries reply was not received
+ // - the heartbeat interval has not expired
+ // In this scenario if multiple messages are sent they would likely be duplicates
+ assertEquals("The number of append entries collected should be 1", 1, allMessages.size());
+ }
+
+ @Test
+ public void testMultipleReplicateWithReplyShouldResultInAppendEntries() throws Exception {
+ logStart("testMultipleReplicateWithReplyShouldResultInAppendEntries");
+
+ MockRaftActorContext actorContext = createActorContextWithFollower();
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public FiniteDuration getHeartBeatInterval() {
+ return FiniteDuration.apply(5, TimeUnit.SECONDS);
+ }
+ });
+
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
+ leader = new Leader(actorContext);
+
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
+
+ followerActor.underlyingActor().clear();
+
+ for(int i=0;i<3;i++) {
+ sendReplicate(actorContext, lastIndex+i+1);
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex + i + 1, term));
+
+ }
+
+ for(int i=3;i<5;i++) {
+ sendReplicate(actorContext, lastIndex + i + 1);
+ }
+
+ List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+ // We expect 4 here because the first 3 replicate got a reply and so the 4th entry would
+ // get sent to the follower - but not the 5th
+ assertEquals("The number of append entries collected should be 4", 4, allMessages.size());
+
+ for(int i=0;i<4;i++) {
+ long expected = allMessages.get(i).getEntries().get(0).getIndex();
+ assertEquals(expected, i+2);
+ }
+ }
+
+ @Test
+ public void testDuplicateAppendEntriesWillBeSentOnHeartBeat() throws Exception {
+ logStart("testDuplicateAppendEntriesWillBeSentOnHeartBeat");
+
+ MockRaftActorContext actorContext = createActorContextWithFollower();
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public FiniteDuration getHeartBeatInterval() {
+ return FiniteDuration.apply(500, TimeUnit.MILLISECONDS);
+ }
+ });
+
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
+ leader = new Leader(actorContext);
+
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
+
+ followerActor.underlyingActor().clear();
+
+ sendReplicate(actorContext, lastIndex+1);
+
+ // Wait slightly longer than heartbeat duration
+ Uninterruptibles.sleepUninterruptibly(750, TimeUnit.MILLISECONDS);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+ assertEquals("The number of append entries collected should be 2", 2, allMessages.size());
+
+ assertEquals(1, allMessages.get(0).getEntries().size());
+ assertEquals(lastIndex+1, allMessages.get(0).getEntries().get(0).getIndex());
+ assertEquals(1, allMessages.get(1).getEntries().size());
+ assertEquals(lastIndex+1, allMessages.get(0).getEntries().get(0).getIndex());
+
+ }
+
+ @Test
+ public void testHeartbeatsAreAlwaysSentIfTheHeartbeatIntervalHasElapsed() throws Exception {
+ logStart("testHeartbeatsAreAlwaysSentIfTheHeartbeatIntervalHasElapsed");
+
+ MockRaftActorContext actorContext = createActorContextWithFollower();
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public FiniteDuration getHeartBeatInterval() {
+ return FiniteDuration.apply(100, TimeUnit.MILLISECONDS);
+ }
+ });
+
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
+ leader = new Leader(actorContext);
+
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
+
+ followerActor.underlyingActor().clear();
+
+ for(int i=0;i<3;i++) {
+ Uninterruptibles.sleepUninterruptibly(150, TimeUnit.MILLISECONDS);
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+ }
+
+ List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+ assertEquals("The number of append entries collected should be 3", 3, allMessages.size());
+ }
+
+ @Test
+ public void testSendingReplicateImmediatelyAfterHeartbeatDoesReplicate() throws Exception {
+ logStart("testSendingReplicateImmediatelyAfterHeartbeatDoesReplicate");
+
+ MockRaftActorContext actorContext = createActorContextWithFollower();
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public FiniteDuration getHeartBeatInterval() {
+ return FiniteDuration.apply(100, TimeUnit.MILLISECONDS);
+ }
+ });
+
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
+ leader = new Leader(actorContext);
+
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
+
+ followerActor.underlyingActor().clear();
+
+ Uninterruptibles.sleepUninterruptibly(150, TimeUnit.MILLISECONDS);
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+ sendReplicate(actorContext, lastIndex+1);
+
+ List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+ assertEquals("The number of append entries collected should be 2", 2, allMessages.size());
+
+ assertEquals(0, allMessages.get(0).getEntries().size());
+ assertEquals(1, allMessages.get(1).getEntries().size());
+ }
+
+
@Test
public void testHandleReplicateMessageWhenThereAreNoFollowers() throws Exception {
logStart("testHandleReplicateMessageWhenThereAreNoFollowers");
leader.handleMessage(leaderActor, new SendHeartBeat());
- InstallSnapshotMessages.InstallSnapshot isproto = MessageCollectorActor.expectFirstMatching(followerActor,
- InstallSnapshot.SERIALIZABLE_CLASS);
-
- InstallSnapshot is = (InstallSnapshot) SerializationUtils.fromSerializable(isproto);
+ InstallSnapshot is = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(snapshotIndex, is.getLastIncludedIndex());
}
new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
new MockRaftActorContext.MockPayload("D"));
+ actorContext.getReplicatedLog().append(entry);
+
//update follower timestamp
leader.markFollowerActive(FOLLOWER_ID);
assertTrue(raftBehavior instanceof Leader);
- MessageCollectorActor.expectFirstMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
}
@Test
leader.handleMessage(leaderActor, new Replicate(null, "state-id", entry));
- CaptureSnapshot cs = MessageCollectorActor.expectFirstMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
+
+ CaptureSnapshot cs = actorContext.getSnapshotManager().getCaptureSnapshot();
assertTrue(cs.isInstallSnapshotInitiated());
assertEquals(3, cs.getLastAppliedIndex());
// if an initiate is started again when first is in progress, it shouldnt initiate Capture
leader.handleMessage(leaderActor, new Replicate(null, "state-id", entry));
- List<CaptureSnapshot> captureSnapshots = MessageCollectorActor.getAllMatching(leaderActor, CaptureSnapshot.class);
- assertEquals("CaptureSnapshot should not get invoked when initiate is in progress", 1, captureSnapshots.size());
+ Assert.assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
}
@Test
// check if installsnapshot gets called with the correct values.
- InstallSnapshot installSnapshot = (InstallSnapshot) SerializationUtils.fromSerializable(
- MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshotMessages.InstallSnapshot.class));
+ InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertNotNull(installSnapshot.getData());
assertEquals(snapshotIndex, installSnapshot.getLastIncludedIndex());
leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
FOLLOWER_ID, installSnapshot.getChunkIndex(), true));
- installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(2, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
FOLLOWER_ID, installSnapshot.getChunkIndex(), true));
- installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
// Send snapshot reply one more time and make sure that a new snapshot message should not be sent to follower
followerActor.underlyingActor().clear();
leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
FOLLOWER_ID, installSnapshot.getChunkIndex(), true));
- installSnapshot = MessageCollectorActor.getFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ installSnapshot = MessageCollectorActor.getFirstMatching(followerActor, InstallSnapshot.class);
Assert.assertNull(installSnapshot);
}
ByteString bs = toByteString(leadersSnapshot);
leader.setSnapshot(Optional.of(bs));
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
leader.handleMessage(leaderActor, new SendHeartBeat());
- installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(1, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE, installSnapshot.getLastChunkHashCode());
+ assertEquals(AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE, installSnapshot.getLastChunkHashCode().get().intValue());
int hashCode = installSnapshot.getData().hashCode();
leader.handleMessage(followerActor, new InstallSnapshotReply(installSnapshot.getTerm(),
FOLLOWER_ID, 1, true));
- installSnapshot = MessageCollectorActor.expectFirstMatching(
- followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+ installSnapshot = MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
assertEquals(2, installSnapshot.getChunkIndex());
assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(hashCode, installSnapshot.getLastChunkHashCode());
+ assertEquals(hashCode, installSnapshot.getLastChunkHashCode().get().intValue());
}
@Test
@Override
protected MockRaftActorContext createActorContext(ActorRef actorRef) {
- return createActorContext("leader", actorRef);
+ return createActorContext(LEADER_ID, actorRef);
}
private MockRaftActorContext createActorContextWithFollower() {
MockRaftActorContext leaderActorContext = createActorContext();
MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
+ followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
Follower follower = new Follower(followerActorContext);
followerActor.underlyingActor().setBehavior(follower);
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(FOLLOWER_ID, followerActor.path().toString());
+ Map<String, String> leaderPeerAddresses = new HashMap<>();
+ leaderPeerAddresses.put(FOLLOWER_ID, followerActor.path().toString());
- leaderActorContext.setPeerAddresses(peerAddresses);
+ leaderActorContext.setPeerAddresses(leaderPeerAddresses);
leaderActorContext.getReplicatedLog().removeFrom(0);
MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
followerActorContext.setConfigParams(configParams);
+ followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
Follower follower = new Follower(followerActorContext);
followerActor.underlyingActor().setBehavior(follower);
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+import akka.actor.ActorRef;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.SimpleReplicatedLog;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+
+/**
+ * A leader election scenario test that partitions a candidate when trying to join a cluster on startup.
+ *
+ * @author Thomas Pantelis
+ */
+public class PartitionedCandidateOnStartupElectionScenarioTest extends AbstractLeaderElectionScenarioTest {
+
+ private final int numCandidateElections = 5;
+ private long candidateElectionTerm;
+
+ @Test
+ public void runTest() throws Exception {
+ testLog.info("PartitionedCandidateOnStartupElectionScenarioTest starting");
+
+ setupInitialMember1AndMember2Behaviors();
+
+ setupPartitionedCandidateMember3AndSendElectionTimeouts();
+
+ resolvePartitionAndSendElectionTimeoutsToCandidateMember3();
+
+ sendElectionTimeoutToFollowerMember1();
+
+ testLog.info("PartitionedCandidateOnStartupElectionScenarioTest ending");
+ }
+
+ private void sendElectionTimeoutToFollowerMember1() throws Exception {
+ testLog.info("sendElectionTimeoutToFollowerMember1 starting");
+
+ // At this point we have no leader. Candidate member 3 would continue to start new elections
+ // but wouldn't be granted a vote. One of the 2 followers would eventually time out from
+ // not having received a heartbeat from a leader and switch to candidate and start a new
+ // election. We'll simulate that here by sending an ElectionTimeout to member 1.
+
+ member1Actor.clear();
+ member1Actor.expectMessageClass(RequestVoteReply.class, 1);
+ member2Actor.clear();
+ member2Actor.expectMessageClass(RequestVote.class, 1);
+ member3Actor.clear();
+ member3Actor.expectMessageClass(RequestVote.class, 1);
+ member3Actor.expectBehaviorStateChange();
+
+ member1ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member2Actor.waitForExpectedMessages(RequestVote.class);
+ member3Actor.waitForExpectedMessages(RequestVote.class);
+
+ // The RequestVoteReply should come from Follower member 2 and the vote should be granted
+ // since member 2's last term and index matches member 1's.
+
+ member1Actor.waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member1Actor.getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member1Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ // Candidate member 3 should change to follower as its term should be less than the
+ // RequestVote term (member 1 started a new term higher than the other member's terms).
+
+ member3Actor.waitForBehaviorStateChange();
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Leader);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ // newTerm should be 10.
+
+ long newTerm = candidateElectionTerm + 1;
+ assertEquals("member 1 election term", newTerm, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", newTerm, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", newTerm, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("sendElectionTimeoutToFollowerMember1 ending");
+ }
+
+ private void resolvePartitionAndSendElectionTimeoutsToCandidateMember3() throws Exception {
+ testLog.info("resolvePartitionAndSendElectionTimeoutsToCandidateMember3 starting");
+
+ // Now send a couple more ElectionTimeouts to Candidate member 3 with the partition resolved.
+ //
+ // On the first RequestVote, Leader member 1 should switch to Follower as its term (s) is less than
+ // the RequestVote's term (8) from member 3. No RequestVoteReply should be sent by member 1.
+ // Follower member 2 should update its term since it less than the RequestVote's term and
+ // should return a RequestVoteReply but should not grant the vote as its last term and index
+ // is greater than the RequestVote's lastLogTerm and lastLogIndex, ie member 2's log is later
+ // or more up to date than member 3's.
+ //
+ // On the second RequestVote, both member 1 and 2 are followers so they should update their
+ // term and return a RequestVoteReply but should not grant the vote.
+
+ candidateElectionTerm += 2;
+ for(int i = 0; i < 2; i++) {
+ member1Actor.clear();
+ member1Actor.expectMessageClass(RequestVote.class, 1);
+ member2Actor.clear();
+ member2Actor.expectMessageClass(RequestVote.class, 1);
+ member3Actor.clear();
+ member3Actor.expectMessageClass(RequestVoteReply.class, 1);
+
+ member3ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.waitForExpectedMessages(RequestVote.class);
+ member2Actor.waitForExpectedMessages(RequestVote.class);
+
+ member3Actor.waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member3Actor.getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", false, requestVoteReply.isVoteGranted());
+ }
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Candidate);
+
+ // Even though member 3 didn't get voted for, member 1 and 2 should have updated their term
+ // to member 3's.
+
+ assertEquals("member 1 election term", candidateElectionTerm,
+ member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", candidateElectionTerm,
+ member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", candidateElectionTerm,
+ member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("resolvePartitionAndSendElectionTimeoutsToCandidateMember3 ending");
+ }
+
+ private void setupPartitionedCandidateMember3AndSendElectionTimeouts() {
+ testLog.info("setupPartitionedCandidateMember3AndSendElectionTimeouts starting");
+
+ // Create member 3's behavior initially as a Candidate.
+
+ member3Context = newRaftActorContext("member3", member3ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1ActorRef.path().toString()).
+ put("member2", member2ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
+ member3Context.setConfigParams(member3ConfigParams);
+
+ // Initialize the ReplicatedLog and election term info for Candidate member 3. The current term
+ // will be 2 and the last term will be 1 so it is behind the leader's log.
+
+ SimpleReplicatedLog candidateReplicatedLog = new SimpleReplicatedLog();
+ candidateReplicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("")));
+ candidateReplicatedLog.append(new MockReplicatedLogEntry(2, 1, new MockPayload("")));
+
+ member3Context.setReplicatedLog(candidateReplicatedLog);
+ member3Context.getTermInformation().update(2, member1Context.getId());
+
+ // The member 3 Candidate will start a new term and send RequestVotes. However it will be
+ // partitioned from the cluster by having member 1 and 2 drop its RequestVote messages.
+
+ candidateElectionTerm = member3Context.getTermInformation().getCurrentTerm() + numCandidateElections;
+
+ member1Actor.dropMessagesToBehavior(RequestVote.class, numCandidateElections);
+
+ member2Actor.dropMessagesToBehavior(RequestVote.class, numCandidateElections);
+
+ Candidate member3Behavior = new Candidate(member3Context);
+ member3Actor.behavior = member3Behavior;
+
+ // Send several additional ElectionTimeouts to Candidate member 3. Each ElectionTimeout will
+ // start a new term so Candidate member 3's current term will be greater than the leader's
+ // current term.
+
+ for(int i = 0; i < numCandidateElections - 1; i++) {
+ member3ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+ }
+
+ member1Actor.waitForExpectedMessages(RequestVote.class);
+ member2Actor.waitForExpectedMessages(RequestVote.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Leader);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Candidate);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", candidateElectionTerm,
+ member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("setupPartitionedCandidateMember3AndSendElectionTimeouts ending");
+ }
+
+ private void setupInitialMember1AndMember2Behaviors() throws Exception {
+ testLog.info("setupInitialMember1AndMember2Behaviors starting");
+
+ // Create member 2's behavior as Follower.
+
+ member2Context = newRaftActorContext("member2", member2ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1ActorRef.path().toString()).
+ put("member3", member3ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
+ member2Context.setConfigParams(member2ConfigParams);
+
+ Follower member2Behavior = new Follower(member2Context);
+ member2Actor.behavior = member2Behavior;
+
+ // Create member 1's behavior as Leader.
+
+ member1Context = newRaftActorContext("member1", member1ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member2", member2ActorRef.path().toString()).
+ put("member3", member3ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
+ member1Context.setConfigParams(member1ConfigParams);
+
+ initializeLeaderBehavior(member1Actor, member1Context, 1);
+
+ member2Actor.clear();
+ member3Actor.clear();
+
+ // Initialize the ReplicatedLog and election term info for member 1 and 2. The current term
+ // will be 3 and the last term will be 2.
+
+ SimpleReplicatedLog replicatedLog = new SimpleReplicatedLog();
+ replicatedLog.append(new MockReplicatedLogEntry(2, 1, new MockPayload("")));
+ replicatedLog.append(new MockReplicatedLogEntry(3, 1, new MockPayload("")));
+
+ member1Context.setReplicatedLog(replicatedLog);
+ member1Context.getTermInformation().update(3, "");
+
+ member2Context.setReplicatedLog(replicatedLog);
+ member2Context.getTermInformation().update(3, member1Context.getId());
+
+ testLog.info("setupInitialMember1AndMember2Behaviors ending");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+import akka.actor.ActorRef;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+
+/**
+ * A leader election scenario test that causes partitioned leaders by dropping messages between 2 members.
+ *
+ * @author Thomas Pantelis
+ */
+public class PartitionedLeadersElectionScenarioTest extends AbstractLeaderElectionScenarioTest {
+
+ /**
+ * This test sets up a scenario with partitioned leaders member 2 and 3 where partitioned leader 3
+ * sends a heartbeat first when connectivity is re-established.
+ */
+ @Test
+ public void runTest1() throws Exception {
+ testLog.info("PartitionedLeadersElectionScenarioTest 1 starting");
+
+ setupInitialMemberBehaviors();
+
+ sendInitialElectionTimeoutToFollowerMember2();
+
+ sendInitialElectionTimeoutToFollowerMember3();
+
+ sendElectionTimeoutToNowCandidateMember2();
+
+ resolvePartitionedLeadersWithLeaderMember3SendingHeartbeatFirst();
+
+ testLog.info("PartitionedLeadersElectionScenarioTest 1 ending");
+ }
+
+ /**
+ * This test sets up a scenario with partitioned leaders member 2 and 3 where partitioned leader 2
+ * sends a heartbeat first when connectivity is re-established.
+ */
+ @Test
+ public void runTest2() throws Exception {
+ testLog.info("PartitionedLeadersElectionScenarioTest 2 starting");
+
+ setupInitialMemberBehaviors();
+
+ sendInitialElectionTimeoutToFollowerMember2();
+
+ sendInitialElectionTimeoutToFollowerMember3();
+
+ sendElectionTimeoutToNowCandidateMember2();
+
+ resolvePartitionedLeadersWithLeaderMember2SendingHeartbeatFirst();
+
+ testLog.info("PartitionedLeadersElectionScenarioTest 2 ending");
+ }
+
+ private void resolvePartitionedLeadersWithLeaderMember2SendingHeartbeatFirst() {
+ testLog.info("resolvePartitionedLeadersWithLeaderMember2SendingHeartbeatFirst starting");
+
+ // Re-establish connectivity between member 2 and 3, ie stop dropping messages between
+ // the 2. Send heartbeats (AppendEntries) from partitioned leader member 2. Follower member 1 should
+ // return a successful AppendEntriesReply b/c its term matches member 2's. member 3 should switch to
+ // Follower as its term is less than member 2's.
+
+ member1Actor.clear();
+ member1Actor.expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.clear();
+ member2Actor.expectMessageClass(AppendEntriesReply.class, 1);
+
+ member3Actor.clear();
+ member3Actor.expectMessageClass(AppendEntries.class, 1);
+
+ sendHeartbeat(member2ActorRef);
+
+ member1Actor.waitForExpectedMessages(AppendEntries.class);
+ member3Actor.waitForExpectedMessages(AppendEntries.class);
+
+ member2Actor.waitForExpectedMessages(AppendEntriesReply.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("resolvePartitionedLeadersWithLeaderMember2SendingHeartbeatFirst ending");
+ }
+
+ private void resolvePartitionedLeadersWithLeaderMember3SendingHeartbeatFirst() throws Exception {
+ testLog.info("resolvePartitionedLeadersWithLeaderMember3SendingHeartbeatFirst starting");
+
+ // Re-establish connectivity between member 2 and 3, ie stop dropping messages between
+ // the 2. Send heartbeats (AppendEntries) from now leader member 3. Both member 1 and 2 should send
+ // back an unsuccessful AppendEntriesReply b/c their term (3) is greater than member 3's term (2).
+ // This should cause member 3 to switch to Follower.
+
+ member1Actor.clear();
+ member1Actor.expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.clear();
+ member2Actor.expectMessageClass(AppendEntries.class, 1);
+
+ member3Actor.clear();
+ member3Actor.expectMessageClass(AppendEntriesReply.class, 1);
+
+ sendHeartbeat(member3ActorRef);
+
+ member3Actor.waitForExpectedMessages(AppendEntriesReply.class);
+
+ AppendEntriesReply appendEntriesReply = member3Actor.getCapturedMessage(AppendEntriesReply.class);
+ assertEquals("isSuccess", false, appendEntriesReply.isSuccess());
+ assertEquals("getTerm", 3, appendEntriesReply.getTerm());
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("resolvePartitionedLeadersWithLeaderMember3SendingHeartbeatFirst ending");
+ }
+
+ private void sendElectionTimeoutToNowCandidateMember2() throws Exception {
+ testLog.info("sendElectionTimeoutToNowCandidateMember2 starting");
+
+ // member 2, now a candidate, is partitioned from the Leader (now member 3) and hasn't received any
+ // messages. It would get another ElectionTimeout so simulate that. member 1 should send back a reply
+ // granting the vote. Messages (RequestVote and AppendEntries) from member 2 to member 3
+ // are dropped to simulate loss of network connectivity. Note member 2 will increment its
+ // election term to 3.
+
+ member1Actor.clear();
+ member1Actor.expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.clear();
+ member2Actor.expectMessageClass(RequestVoteReply.class, 1);
+ member2Actor.expectMessageClass(AppendEntriesReply.class, 1);
+
+ member3Actor.clear();
+ member3Actor.dropMessagesToBehavior(AppendEntries.class);
+ member3Actor.dropMessagesToBehavior(RequestVote.class);
+
+ member2ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member2Actor.waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member2Actor.getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member2Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ member3Actor.waitForExpectedMessages(RequestVote.class);
+
+ member1Actor.waitForExpectedMessages(AppendEntries.class);
+ member3Actor.waitForExpectedMessages(AppendEntries.class);
+ member2Actor.waitForExpectedMessages(AppendEntriesReply.class);
+
+ // We end up with 2 partitioned leaders both leading member 1. The term for member 1 and 3
+ // is 3 and member 3's term is 2.
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("sendElectionTimeoutToNowCandidateMember2 ending");
+ }
+
+ private void sendInitialElectionTimeoutToFollowerMember3() throws Exception {
+ testLog.info("sendInitialElectionTimeoutToFollowerMember3 starting");
+
+ // Send ElectionTimeout to member 3 to simulate no heartbeat from a Leader (originally member 1).
+ // member 3 should switch to Candidate and send out RequestVote messages. member 1, now a follower,
+ // should reply and grant the vote but member 2 will drop the message to simulate loss of network
+ // connectivity between members 2 and 3. member 3 should switch to leader.
+
+ member1Actor.clear();
+ member1Actor.expectMessageClass(RequestVote.class, 1);
+ member1Actor.expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.clear();
+ member2Actor.dropMessagesToBehavior(RequestVote.class);
+ member2Actor.dropMessagesToBehavior(AppendEntries.class);
+
+ member3Actor.clear();
+ member3Actor.expectMessageClass(RequestVoteReply.class, 1);
+ member3Actor.expectMessageClass(AppendEntriesReply.class, 1);
+
+ member3ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.waitForExpectedMessages(RequestVote.class);
+ member2Actor.waitForExpectedMessages(RequestVote.class);
+ member3Actor.waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member3Actor.getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ // when member 3 switches to Leader it will immediately send out heartbeat AppendEntries to
+ // the followers. Wait for AppendEntries to member 1 and its AppendEntriesReply. The
+ // AppendEntries message to member 2 is dropped.
+
+ member1Actor.waitForExpectedMessages(AppendEntries.class);
+ member2Actor.waitForExpectedMessages(AppendEntries.class);
+ member3Actor.waitForExpectedMessages(AppendEntriesReply.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("sendInitialElectionTimeoutToFollowerMember3 ending");
+ }
+
+ private void sendInitialElectionTimeoutToFollowerMember2() {
+ testLog.info("sendInitialElectionTimeoutToFollowerMember2 starting");
+
+ // Send ElectionTimeout to member 2 to simulate no heartbeat from the Leader (member 1).
+ // member 2 should switch to Candidate, start new term 2 and send out RequestVote messages.
+ // member 1 will switch to Follower b/c its term is less than the member 2's RequestVote term, also it
+ // won't send back a reply. member 3 will drop the message (ie won't forward it to its behavior) to
+ // simulate loss of network connectivity between members 2 and 3.
+
+ member1Actor.expectMessageClass(RequestVote.class, 1);
+
+ member2Actor.expectBehaviorStateChange();
+
+ member3Actor.dropMessagesToBehavior(RequestVote.class);
+
+ member2ActorRef.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.waitForExpectedMessages(RequestVote.class);
+ member3Actor.waitForExpectedMessages(RequestVote.class);
+
+ // Original leader member 1 should switch to Follower as the RequestVote term is greater than its
+ // term. It won't send back a RequestVoteReply in this case.
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+
+ // member 2 should switch to Candidate since it didn't get a RequestVoteReply from the other 2 members.
+
+ member2Actor.waitForBehaviorStateChange();
+ verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
+
+ assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 1, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("sendInitialElectionTimeoutToFollowerMember2 ending");
+ }
+
+ private void setupInitialMemberBehaviors() throws Exception {
+ testLog.info("setupInitialMemberBehaviors starting");
+
+ // Create member 2's behavior initially as Follower
+
+ member2Context = newRaftActorContext("member2", member2ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1ActorRef.path().toString()).
+ put("member3", member3ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
+ member2Context.setConfigParams(member2ConfigParams);
+
+ Follower member2Behavior = new Follower(member2Context);
+ member2Actor.behavior = member2Behavior;
+
+ // Create member 3's behavior initially as Follower
+
+ member3Context = newRaftActorContext("member3", member3ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1ActorRef.path().toString()).
+ put("member2", member2ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
+ member3Context.setConfigParams(member3ConfigParams);
+
+ Follower member3Behavior = new Follower(member3Context);
+ member3Actor.behavior = member3Behavior;
+
+ // Create member 1's behavior initially as Leader
+
+ member1Context = newRaftActorContext("member1", member1ActorRef,
+ ImmutableMap.<String,String>builder().
+ put("member2", member2ActorRef.path().toString()).
+ put("member3", member3ActorRef.path().toString()).build());
+
+ DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
+ member1Context.setConfigParams(member1ConfigParams);
+
+ initializeLeaderBehavior(member1Actor, member1Context, 2);
+
+ member2Actor.clear();
+ member3Actor.clear();
+
+ testLog.info("setupInitialMemberBehaviors ending");
+ }
+}
/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.utils;
+package org.opendaylight.controller.cluster.raft.utils;
-import static org.junit.Assert.assertEquals;
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.Uninterruptibles;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-import akka.dispatch.Futures;
-import akka.japi.Procedure;
-import akka.persistence.PersistentConfirmation;
-import akka.persistence.PersistentId;
-import akka.persistence.PersistentImpl;
-import akka.persistence.PersistentRepr;
-import akka.persistence.journal.japi.AsyncWriteJournal;
+/**
+ * An akka AsyncWriteJournal implementation that stores data in memory. This is intended for testing.
+ *
+ * @author Thomas Pantelis
+ */
public class InMemoryJournal extends AsyncWriteJournal {
+ static final Logger LOG = LoggerFactory.getLogger(InMemoryJournal.class);
+
private static final Map<String, Map<Long, Object>> journals = new ConcurrentHashMap<>();
private static final Map<String, CountDownLatch> deleteMessagesCompleteLatches = new ConcurrentHashMap<>();
+ private static final Map<String, CountDownLatch> writeMessagesCompleteLatches = new ConcurrentHashMap<>();
+
private static final Map<String, CountDownLatch> blockReadMessagesLatches = new ConcurrentHashMap<>();
public static void addEntry(String persistenceId, long sequenceNr, Object data) {
journals.clear();
}
+ @SuppressWarnings("unchecked")
+ public static <T> List<T> get(String persistenceId, Class<T> type) {
+ Map<Long, Object> journalMap = journals.get(persistenceId);
+ if(journalMap == null) {
+ return Collections.<T>emptyList();
+ }
+
+ synchronized (journalMap) {
+ List<T> journal = new ArrayList<>(journalMap.size());
+ for(Object entry: journalMap.values()) {
+ if(type.isInstance(entry)) {
+ journal.add((T) entry);
+ }
+ }
+
+ return journal;
+ }
+ }
+
public static Map<Long, Object> get(String persistenceId) {
- Map<Long, Object> journal = journals.get(persistenceId);
- return journal != null ? journal : Collections.<Long, Object>emptyMap();
+ Map<Long, Object> journalMap = journals.get(persistenceId);
+ return journalMap != null ? journalMap : Collections.<Long, Object>emptyMap();
+ }
+
+ public static void dumpJournal(String persistenceId) {
+ StringBuilder builder = new StringBuilder(String.format("Journal log for %s:", persistenceId));
+ Map<Long, Object> journalMap = journals.get(persistenceId);
+ if(journalMap != null) {
+ synchronized (journalMap) {
+ for(Map.Entry<Long, Object> e: journalMap.entrySet()) {
+ builder.append("\n ").append(e.getKey()).append(" = ").append(e.getValue());
+ }
+ }
+ }
+
+ LOG.info(builder.toString());
}
public static void waitForDeleteMessagesComplete(String persistenceId) {
- assertEquals("Recovery complete", true, Uninterruptibles.awaitUninterruptibly(
- deleteMessagesCompleteLatches.get(persistenceId), 5, TimeUnit.SECONDS));
+ if(!Uninterruptibles.awaitUninterruptibly(deleteMessagesCompleteLatches.get(persistenceId), 5, TimeUnit.SECONDS)) {
+ throw new AssertionError("Delete messages did not complete");
+ }
+ }
+
+ public static void waitForWriteMessagesComplete(String persistenceId) {
+ if(!Uninterruptibles.awaitUninterruptibly(writeMessagesCompleteLatches.get(persistenceId), 5, TimeUnit.SECONDS)) {
+ throw new AssertionError("Journal write messages did not complete");
+ }
}
public static void addDeleteMessagesCompleteLatch(String persistenceId) {
deleteMessagesCompleteLatches.put(persistenceId, new CountDownLatch(1));
}
+ public static void addWriteMessagesCompleteLatch(String persistenceId, int count) {
+ writeMessagesCompleteLatches.put(persistenceId, new CountDownLatch(count));
+ }
+
public static void addBlockReadMessagesLatch(String persistenceId, CountDownLatch latch) {
blockReadMessagesLatches.put(persistenceId, latch);
}
@Override
public Future<Long> doAsyncReadHighestSequenceNr(String persistenceId, long fromSequenceNr) {
- return Futures.successful(-1L);
+ // Akka calls this during recovery.
+
+ Map<Long, Object> journal = journals.get(persistenceId);
+ if(journal == null) {
+ return Futures.successful(-1L);
+ }
+
+ synchronized (journal) {
+ long highest = -1;
+ for (Long seqNr : journal.keySet()) {
+ if(seqNr.longValue() >= fromSequenceNr && seqNr.longValue() > highest) {
+ highest = seqNr.longValue();
+ }
+ }
+
+ return Futures.successful(highest);
+ }
}
@Override
}
synchronized (journal) {
+ LOG.trace("doAsyncWriteMessages: id: {}: seqNr: {}, payload: {}", repr.persistenceId(),
+ repr.sequenceNr(), repr.payload());
journal.put(repr.sequenceNr(), repr.payload());
}
+
+ CountDownLatch latch = writeMessagesCompleteLatches.get(repr.persistenceId());
+ if(latch != null) {
+ latch.countDown();
+ }
}
+
return null;
}
}, context().dispatcher());
@Override
public Future<Void> doAsyncDeleteMessagesTo(String persistenceId, long toSequenceNr, boolean permanent) {
+ LOG.trace("doAsyncDeleteMessagesTo: {}", toSequenceNr);
Map<Long, Object> journal = journals.get(persistenceId);
if(journal != null) {
synchronized (journal) {
/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore.utils;
+package org.opendaylight.controller.cluster.raft.utils;
import akka.dispatch.Futures;
import akka.japi.Option;
import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.snapshot.japi.SnapshotStore;
import com.google.common.collect.Iterables;
-import scala.concurrent.Future;
+import com.google.common.collect.Lists;
import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-import org.opendaylight.controller.cluster.raft.Snapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+/**
+ * An akka SnapshotStore implementation that stores data in memory. This is intended for testing.
+ *
+ * @author Thomas Pantelis
+ */
public class InMemorySnapshotStore extends SnapshotStore {
+ static final Logger LOG = LoggerFactory.getLogger(InMemorySnapshotStore.class);
+
private static Map<String, List<StoredSnapshot>> snapshots = new ConcurrentHashMap<>();
- public static void addSnapshot(String persistentId, Snapshot snapshot) {
+ public static void addSnapshot(String persistentId, Object snapshot) {
List<StoredSnapshot> snapshotList = snapshots.get(persistentId);
if(snapshotList == null) {
snapshots.put(persistentId, snapshotList);
}
- snapshotList.add(new StoredSnapshot(new SnapshotMetadata(persistentId, snapshotList.size(),
- System.currentTimeMillis()), snapshot));
+ synchronized (snapshotList) {
+ snapshotList.add(new StoredSnapshot(new SnapshotMetadata(persistentId, snapshotList.size(),
+ System.currentTimeMillis()), snapshot));
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public static <T> List<T> getSnapshots(String persistentId, Class<T> type) {
+ List<StoredSnapshot> stored = snapshots.get(persistentId);
+ if(stored == null) {
+ return Collections.emptyList();
+ }
+
+ List<T> retList;
+ synchronized (stored) {
+ retList = Lists.newArrayListWithCapacity(stored.size());
+ for(StoredSnapshot s: stored) {
+ if(type.isInstance(s.getData())) {
+ retList.add((T) s.getData());
+ }
+ }
+ }
+
+ return retList;
}
public static void clear() {
snapshotList = new ArrayList<>();
snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
}
- snapshotList.add(new StoredSnapshot(snapshotMetadata, o));
+ synchronized (snapshotList) {
+ snapshotList.add(new StoredSnapshot(snapshotMetadata, o));
+ }
return Futures.successful(null);
}
int deleteIndex = -1;
- for(int i=0;i<snapshotList.size(); i++){
- StoredSnapshot snapshot = snapshotList.get(i);
- if(snapshotMetadata.equals(snapshot.getMetadata())){
- deleteIndex = i;
- break;
+ synchronized (snapshotList) {
+ for(int i=0;i<snapshotList.size(); i++){
+ StoredSnapshot snapshot = snapshotList.get(i);
+ if(snapshotMetadata.equals(snapshot.getMetadata())){
+ deleteIndex = i;
+ break;
+ }
}
- }
- if(deleteIndex != -1){
- snapshotList.remove(deleteIndex);
+ if(deleteIndex != -1){
+ snapshotList.remove(deleteIndex);
+ }
}
-
}
@Override
- public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+ public void doDelete(String persistentId, SnapshotSelectionCriteria snapshotSelectionCriteria)
throws Exception {
- List<StoredSnapshot> snapshotList = snapshots.get(s);
+ List<StoredSnapshot> snapshotList = snapshots.get(persistentId);
if(snapshotList == null){
return;
}
- // TODO : This is a quick and dirty implementation. Do actual match later.
- snapshotList.clear();
- snapshots.remove(s);
+ synchronized (snapshotList) {
+ Iterator<StoredSnapshot> iter = snapshotList.iterator();
+ while(iter.hasNext()) {
+ StoredSnapshot s = iter.next();
+ LOG.trace("doDelete: sequenceNr: {}, maxSequenceNr: {}", s.getMetadata().sequenceNr(),
+ snapshotSelectionCriteria.maxSequenceNr());
+
+ if(s.getMetadata().sequenceNr() <= snapshotSelectionCriteria.maxSequenceNr()) {
+ iter.remove();
+ }
+ }
+ }
}
private static class StoredSnapshot {
import akka.actor.UntypedActor;
import akka.pattern.Patterns;
import akka.util.Timeout;
+import com.google.common.base.Predicate;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.junit.Assert;
+import org.opendaylight.controller.cluster.raft.SerializationUtils;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
public class MessageCollectorActor extends UntypedActor {
private static final String ARE_YOU_READY = "ARE_YOU_READY";
+ private static final String GET_ALL_MESSAGES = "get-all-messages";
+ private static final String CLEAR_MESSAGES = "clear-messages";
private final List<Object> messages = new ArrayList<>();
return;
}
- if(message instanceof String){
- if("get-all-messages".equals(message)){
- getSender().tell(new ArrayList<>(messages), getSelf());
- }
+ if(GET_ALL_MESSAGES.equals(message)) {
+ getSender().tell(new ArrayList<>(messages), getSelf());
+ } else if(CLEAR_MESSAGES.equals(message)) {
+ messages.clear();
} else if(message != null) {
- messages.add(message);
+ messages.add(SerializationUtils.fromSerializable(message));
}
}
public static List<Object> getAllMessages(ActorRef actor) throws Exception {
FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
Timeout operationTimeout = new Timeout(operationDuration);
- Future<Object> future = Patterns.ask(actor, "get-all-messages", operationTimeout);
+ Future<Object> future = Patterns.ask(actor, GET_ALL_MESSAGES, operationTimeout);
return (List<Object>) Await.result(future, operationDuration);
}
+ public static void clearMessages(ActorRef actor) {
+ actor.tell(CLEAR_MESSAGES, ActorRef.noSender());
+ }
+
/**
* Get the first message that matches the specified class
* @param actor
return null;
}
+ public static <T> List<T> expectMatching(ActorRef actor, Class<T> clazz, int count) {
+ int timeout = 5000;
+ List<T> messages = Collections.emptyList();
+ for(int i = 0; i < timeout / 50; i++) {
+ try {
+ messages = getAllMatching(actor, clazz);
+ if(messages.size() >= count) {
+ return messages;
+ }
+ } catch (Exception e) {}
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail(String.format("Expected %d messages of type %s. Actual received was %d: %s", count, clazz,
+ messages.size(), messages));
+ return null;
+ }
+
public static <T> T expectFirstMatching(ActorRef actor, Class<T> clazz) {
return expectFirstMatching(actor, clazz, 5000);
}
return null;
}
+ public static <T> T expectFirstMatching(ActorRef actor, Class<T> clazz, Predicate<T> matcher) {
+ int timeout = 5000;
+ T lastMessage = null;
+ for(int i = 0; i < timeout / 50; i++) {
+ try {
+ List<T> messages = getAllMatching(actor, clazz);
+ for(T msg: messages) {
+ if(matcher.apply(msg)) {
+ return msg;
+ }
+
+ lastMessage = msg;
+ }
+ } catch (Exception e) {}
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail(String.format("Expected specific message of type %s. Last message received was: %s", clazz, lastMessage));
+ return null;
+ }
+
public static <T> List<T> getAllMatching(ActorRef actor, Class<T> clazz) throws Exception {
List<Object> allMessages = getAllMessages(actor);
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.raft.utils;
-
-import akka.dispatch.Futures;
-import akka.japi.Procedure;
-import akka.persistence.PersistentConfirmation;
-import akka.persistence.PersistentId;
-import akka.persistence.PersistentImpl;
-import akka.persistence.PersistentRepr;
-import akka.persistence.journal.japi.AsyncWriteJournal;
-import com.google.common.collect.Maps;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import scala.concurrent.Future;
-
-public class MockAkkaJournal extends AsyncWriteJournal {
-
- private static Map<Long, Object> journal = Maps.newLinkedHashMap();
-
- public static void addToJournal(long sequenceNr, Object message) {
- journal.put(sequenceNr, message);
- }
-
- public static void clearJournal() {
- journal.clear();
- }
-
- @Override
- public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
- long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
-
- return Futures.future(new Callable<Void>() {
- @Override
- public Void call() throws Exception {
- for (Map.Entry<Long,Object> entry : journal.entrySet()) {
- PersistentRepr persistentMessage =
- new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId, false, null, null);
- replayCallback.apply(persistentMessage);
- }
- return null;
- }
- }, context().dispatcher());
- }
-
- @Override
- public Future<Long> doAsyncReadHighestSequenceNr(String s, long l) {
- return Futures.successful(new Long(0));
- }
-
- @Override
- public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> persistentReprs) {
- return Futures.successful(null);
- }
-
- @Override
- public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> persistentConfirmations) {
- return Futures.successful(null);
- }
-
- @Override
- public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> persistentIds, boolean b) {
- return Futures.successful(null);
- }
-
- @Override
- public Future<Void> doAsyncDeleteMessagesTo(String s, long l, boolean b) {
- return Futures.successful(null);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.utils;
-
-import akka.dispatch.Futures;
-import akka.japi.Option;
-import akka.persistence.SelectedSnapshot;
-import akka.persistence.SnapshotMetadata;
-import akka.persistence.SnapshotSelectionCriteria;
-import akka.persistence.snapshot.japi.SnapshotStore;
-import org.opendaylight.controller.cluster.raft.Snapshot;
-import scala.concurrent.Future;
-
-
-public class MockSnapshotStore extends SnapshotStore {
-
- private static Snapshot mockSnapshot;
- private static String persistenceId;
-
- public static void setMockSnapshot(Snapshot s) {
- mockSnapshot = s;
- }
-
- public static void setPersistenceId(String pId) {
- persistenceId = pId;
- }
-
- @Override
- public Future<Option<SelectedSnapshot>> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) {
- if (mockSnapshot == null) {
- return Futures.successful(Option.<SelectedSnapshot>none());
- }
-
- SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345);
- SelectedSnapshot selectedSnapshot =
- new SelectedSnapshot(smd, mockSnapshot);
- return Futures.successful(Option.some(selectedSnapshot));
- }
-
- @Override
- public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
- return null;
- }
-
- @Override
- public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
-
- }
-
- @Override
- public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
-
- }
-
- @Override
- public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception {
-
- }
-}
mock-snapshot-store {
# Class name of the plugin.
- class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore"
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore"
# Dispatcher for the plugin actor.
plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
}
mock-journal {
# Class name of the plugin.
- class = "org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal"
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
# Dispatcher for the plugin actor.
plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-api</artifactId>
<packaging>bundle</packaging>
* @see AsyncDataBroker
* @see TransactionChainFactory
*/
-public interface DataBroker extends TransactionFactory, AsyncDataBroker<InstanceIdentifier<?>, DataObject, DataChangeListener>, BindingService, TransactionChainFactory<InstanceIdentifier<?>, DataObject> {
+public interface DataBroker extends AsyncDataBroker<InstanceIdentifier<?>, DataObject, DataChangeListener>,
+ TransactionChainFactory<InstanceIdentifier<?>, DataObject>, TransactionFactory, BindingService, DataTreeChangeService {
/**
* {@inheritDoc}
*/
import java.util.Collection;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Identifier;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
/**
* Represents modification of Data Object.
*
*/
-public interface DataObjectModification<T extends DataObject> extends Identifiable<PathArgument> {
+public interface DataObjectModification<T extends DataObject> extends org.opendaylight.yangtools.concepts.Identifiable<PathArgument> {
enum ModificationType {
/**
@Nonnull ModificationType getModificationType();
/**
- * Returns after state of top level container.
+ * Returns before-state of top level container. Implementations are encouraged,
+ * but not required to provide this state.
+ *
+ * @param root Class representing data container
+ * @return State of object before modification. Null if subtree was not present,
+ * or the implementation cannot provide the state.
+ */
+ @Nullable T getDataBefore();
+
+ /**
+ * Returns after-state of top level container.
*
* @param root Class representing data container
* @return State of object after modification. Null if subtree is not present.
*/
@Nonnull Collection<DataObjectModification<? extends DataObject>> getModifiedChildren();
+ /**
+ * Returns container child modification if {@code child} was modified by this
+ * modification.
+ *
+ * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
+ *
+ * @param child Type of child - must be only container
+ * @return Modification of {@code child} if {@code child} was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied {@code child} class is not valid child according
+ * to generated model.
+ */
+ @Nullable <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(@Nonnull Class<C> child);
+
+ /**
+ * Returns augmentation child modification if {@code augmentation} was modified by this
+ * modification.
+ *
+ * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
+ *
+ * @param augmentation Type of augmentation - must be only container
+ * @return Modification of {@code augmentation} if {@code augmentation} was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied {@code augmentation} class is not valid augmentation
+ * according to generated model.
+ */
+ @Nullable <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(@Nonnull Class<C> augmentation);
+
+
+ /**
+ * Returns child list item modification if {@code child} was modified by this modification.
+ *
+ * @param listItem Type of list item - must be list item with key
+ * @param listKey List item key
+ * @return Modification of {@code child} if {@code child} was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied {@code listItem} class is not valid child according
+ * to generated model.
+ */
+ <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
+ @Nonnull Class<C> listItem,@Nonnull K listKey);
+
+ /**
+ * Returns a child modification if a node identified by {@code childArgument} was modified by
+ * this modification.
+ *
+ * @param childArgument Path Argument of child node
+ * @return Modification of child identified by {@code childArgument} if {@code childArgument}
+ * was modified, null otherwise.
+ * @throws IllegalArgumentException If supplied path argument is not valid child according to
+ * generated model.
+ *
+ */
+ @Nullable DataObjectModification<? extends DataObject> getModifiedChild(PathArgument childArgument);
}
import java.util.Collection;
import java.util.EventListener;
import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.binding.DataObject;
/**
* Interface implemented by classes interested in receiving notifications about
* in that it provides a cursor-based view of the change, which has potentially
* lower overhead and allow more flexible consumption of change event.
*/
-public interface DataTreeChangeListener extends EventListener {
+public interface DataTreeChangeListener<T extends DataObject> extends EventListener {
/**
* Invoked when there was data change for the supplied path, which was used
* to register this listener.
*
* @param changes Collection of change events, may not be null or empty.
*/
- void onDataTreeChanged(@Nonnull Collection<DataTreeModification> changes);
+ void onDataTreeChanged(@Nonnull Collection<DataTreeModification<T>> changes);
}
import javax.annotation.Nonnull;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
/**
* A {@link DOMService} which allows users to register for changes to a
* your listener using {@link ListenerRegistration#close()} to stop
* delivery of change events.
*/
- @Nonnull <L extends DataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier treeId, @Nonnull L listener);
+ @Nonnull <T extends DataObject,L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier<T> treeId, @Nonnull L listener);
}
\ No newline at end of file
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.yangtools.concepts.Immutable;
import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
* A unique identifier for a particular subtree. It is composed of the logical
* data store type and the instance identifier of the root node.
*/
-public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentifier>, Serializable {
+public final class DataTreeIdentifier<T extends DataObject> implements Immutable, Path<DataTreeIdentifier<?>>, Serializable {
private static final long serialVersionUID = 1L;
- private final InstanceIdentifier<?> rootIdentifier;
+ private final InstanceIdentifier<T> rootIdentifier;
private final LogicalDatastoreType datastoreType;
- public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<?> rootIdentifier) {
+ public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> rootIdentifier) {
this.datastoreType = Preconditions.checkNotNull(datastoreType);
this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
}
*
* @return Instance identifier corresponding to the root node.
*/
- public @Nonnull InstanceIdentifier<?> getRootIdentifier() {
+ public @Nonnull InstanceIdentifier<T> getRootIdentifier() {
return rootIdentifier;
}
@Override
- public boolean contains(final DataTreeIdentifier other) {
+ public boolean contains(final DataTreeIdentifier<?> other) {
return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
}
if (!(obj instanceof DataTreeIdentifier)) {
return false;
}
- DataTreeIdentifier other = (DataTreeIdentifier) obj;
+ final DataTreeIdentifier<?> other = (DataTreeIdentifier<?>) obj;
if (datastoreType != other.datastoreType) {
return false;
}
* @author Tony Tkacik <ttkacik@cisco.com>
*
*/
-public interface DataTreeModification {
+public interface DataTreeModification<T extends DataObject> {
/**
* Get the modification root path. This is the path of the root node
*
* @return absolute path of the root node
*/
- @Nonnull DataTreeIdentifier getRootPath();
+ @Nonnull DataTreeIdentifier<T> getRootPath();
/**
* Get the modification root node.
*
* @return modification root node
*/
- @Nonnull DataObjectModification<? extends DataObject> getRootNode();
+ @Nonnull DataObjectModification<T> getRootNode();
}
package org.opendaylight.controller.sal.binding.api;
import java.util.EventListener;
-
import org.opendaylight.yangtools.yang.binding.Notification;
/**
* capture of this interface.
*
* @param <T> the interested notification type
+ * @deprecated Deprecated unused API.
*/
+@Deprecated
public interface NotificationListener<T extends Notification> extends EventListener {
/**
* Invoked to deliver a notification.
import java.util.EventListener;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.md.sal.common.api.notify.NotificationPublishService;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.Notification;
* Interface for a notification service that provides publish/subscribe capabilities for YANG
* modeled notifications. This interface is a combination of the {@link NotificationService} and
* {@link NotificationPublishService} interfaces.
+ *
+ * @deprecated Please use {@link org.opendaylight.controller.md.sal.binding.api.NotificationPublishService}.
*/
+@Deprecated
public interface NotificationProviderService extends NotificationService, NotificationPublishService<Notification> {
/**
* </pre>
* The <code>onStart</code> method will be invoked when someone publishes a <code>Start</code> notification and
* the <code>onStop</code> method will be invoked when someone publishes a <code>Stop</code> notification.
+ *
+ * @deprecated Please use {@link org.opendaylight.controller.md.sal.binding.api.NotificationService} instead.
*/
+@Deprecated
public interface NotificationService extends BindingAwareService {
/**
* Registers a generic listener implementation for a specified notification type.
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-broker-impl</artifactId>
<packaging>bundle</packaging>
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.ModuleIdentifier;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
+import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
import org.opendaylight.controller.sal.core.api.Broker;
public class BindingNotificationAdapterModule extends AbstractBindingNotificationAdapterModule {
- public BindingNotificationAdapterModule(ModuleIdentifier identifier, DependencyResolver dependencyResolver) {
+ public BindingNotificationAdapterModule(final ModuleIdentifier identifier, final DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
- public BindingNotificationAdapterModule(ModuleIdentifier identifier, DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.md.sal.binding.impl.BindingNotificationAdapterModule oldModule, java.lang.AutoCloseable oldInstance) {
+ public BindingNotificationAdapterModule(final ModuleIdentifier identifier, final DependencyResolver dependencyResolver, final org.opendaylight.controller.config.yang.md.sal.binding.impl.BindingNotificationAdapterModule oldModule, final java.lang.AutoCloseable oldInstance) {
super(identifier, dependencyResolver, oldModule, oldInstance);
}
final BindingToNormalizedNodeCodec codec = getBindingMappingServiceDependency();
final Broker.ProviderSession session = getDomAsyncBrokerDependency().registerProvider(new DummyDOMProvider());
final DOMNotificationService notifService = session.getService(DOMNotificationService.class);
- return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), notifService, SingletonHolder.INVOKER_FACTORY);
+ return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), notifService);
}
}
*/
package org.opendaylight.controller.config.yang.md.sal.binding.impl;
-import com.google.common.util.concurrent.ListeningExecutorService;
+import org.opendaylight.controller.md.sal.binding.compat.HydrogenNotificationBrokerImpl;
+
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.controller.md.sal.binding.api.NotificationService;
+import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
/**
*
public final class NotificationBrokerImplModule extends
org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractNotificationBrokerImplModule {
- public NotificationBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
- org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ public NotificationBrokerImplModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
- public NotificationBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
- org.opendaylight.controller.config.api.DependencyResolver dependencyResolver,
- NotificationBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ public NotificationBrokerImplModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver,
+ final NotificationBrokerImplModule oldModule, final java.lang.AutoCloseable oldInstance) {
super(identifier, dependencyResolver, oldModule, oldInstance);
}
@Override
public java.lang.AutoCloseable createInstance() {
+ final NotificationPublishService notificationPublishService = getNotificationPublishAdapterDependency();
+ final NotificationService notificationService = getNotificationAdapterDependency();
+
+ if(notificationPublishService != null & notificationService != null) {
+ return new HeliumNotificationProviderServiceAdapter(notificationPublishService, notificationService);
+ }
+
/*
* FIXME: Switch to new broker (which has different threading model)
* once this change is communicated with downstream users or
* we will have adapter implementation which will honor Helium
* threading model for notifications.
*/
- ListeningExecutorService listeningExecutor = SingletonHolder.getDefaultNotificationExecutor();
- NotificationBrokerImpl broker = new NotificationBrokerImpl(listeningExecutor);
- return broker;
+
+ return new HydrogenNotificationBrokerImpl(SingletonHolder.getDefaultNotificationExecutor());
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.yangtools.yang.binding.Notification;
-import com.google.common.base.Preconditions;
-
/**
* An aggregated listener registration. This is a result of registering an invoker which can handle multiple
* interfaces at the same time. In order to support correct delivery, we need to maintain per-type registrations
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicReference;
-
import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.ListenerRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-
-public class NotificationBrokerImpl implements NotificationProviderService, AutoCloseable {
- private static final Logger LOG = LoggerFactory.getLogger(NotificationBrokerImpl.class);
+@Deprecated
+public class HydrogenNotificationBrokerImpl implements NotificationProviderService, AutoCloseable {
+ private static final Logger LOG = LoggerFactory.getLogger(HydrogenNotificationBrokerImpl.class);
private final ListenerRegistry<NotificationInterestListener> interestListeners =
ListenerRegistry.create();
private final AtomicReference<ListenerMapGeneration> listeners = new AtomicReference<>(new ListenerMapGeneration());
private final ExecutorService executor;
- public NotificationBrokerImpl(final ExecutorService executor) {
+ public HydrogenNotificationBrokerImpl(final ExecutorService executor) {
this.executor = Preconditions.checkNotNull(executor);
}
@Override
public void publish(final Notification notification, final ExecutorService service) {
- for (NotificationListenerRegistration<?> r : listeners.get().listenersFor(notification)) {
+ for (final NotificationListenerRegistration<?> r : listeners.get().listenersFor(notification)) {
service.submit(new NotifyTask(r, notification));
}
}
synchronized (this) {
final Multimap<Class<? extends Notification>, NotificationListenerRegistration<?>> newListeners =
mutableListeners();
- for (NotificationListenerRegistration<?> reg : registrations) {
+ for (final NotificationListenerRegistration<?> reg : registrations) {
newListeners.put(reg.getType(), reg);
}
}
// Notifications are dispatched out of lock...
- for (NotificationListenerRegistration<?> reg : registrations) {
+ for (final NotificationListenerRegistration<?> reg : registrations) {
announceNotificationSubscription(reg.getType());
}
}
final Multimap<Class<? extends Notification>, NotificationListenerRegistration<?>> newListeners =
mutableListeners();
- for (NotificationListenerRegistration<?> reg : registrations) {
+ for (final NotificationListenerRegistration<?> reg : registrations) {
newListeners.remove(reg.getType(), reg);
}
for (final ListenerRegistration<NotificationInterestListener> listener : interestListeners) {
try {
listener.getInstance().onNotificationSubscribtion(notification);
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.warn("Listener {} reported unexpected error on notification {}",
listener.getInstance(), notification, e);
}
@Override
public ListenerRegistration<org.opendaylight.yangtools.yang.binding.NotificationListener> registerNotificationListener(final org.opendaylight.yangtools.yang.binding.NotificationListener listener) {
- final NotificationInvoker invoker = SingletonHolder.INVOKER_FACTORY.invokerFor(listener);
+ final NotificationInvoker invoker = NotificationInvoker.invokerFor(listener);
final Set<Class<? extends Notification>> types = invoker.getSupportedNotifications();
final NotificationListenerRegistration<?>[] regs = new NotificationListenerRegistration<?>[types.size()];
// Populate the registrations...
int i = 0;
- for (Class<? extends Notification> type : types) {
- regs[i] = new AggregatedNotificationListenerRegistration<Notification, Object>(type, invoker.getInvocationProxy(), regs) {
+ for (final Class<? extends Notification> type : types) {
+ regs[i] = new AggregatedNotificationListenerRegistration<Notification, Object>(type, invoker, regs) {
@Override
protected void removeRegistration() {
// Nothing to do, will be cleaned up by parent (below)
@Override
protected void removeRegistration() {
removeRegistrations(regs);
- for (ListenerRegistration<?> reg : regs) {
+ for (final ListenerRegistration<?> reg : regs) {
reg.close();
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import java.util.Arrays;
import java.util.Collection;
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.compat;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.reflect.TypeToken;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.yangtools.yang.binding.Notification;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.binding.util.NotificationListenerInvoker;
+import org.opendaylight.yangtools.yang.common.QName;
+
+final class NotificationInvoker implements org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> {
+
+ private final NotificationListener delegate;
+ private final Map<Class<? extends Notification>,InvokerContext> invokers;
+
+
+ private NotificationInvoker(final NotificationListener listener) {
+ delegate = listener;
+ final Map<Class<? extends Notification>, InvokerContext> builder = new HashMap<>();
+ for(final TypeToken<?> ifaceToken : TypeToken.of(listener.getClass()).getTypes().interfaces()) {
+ Class<?> iface = ifaceToken.getRawType();
+ if(NotificationListener.class.isAssignableFrom(iface) && BindingReflections.isBindingClass(iface)) {
+ @SuppressWarnings("unchecked")
+ final Class<? extends NotificationListener> listenerType = (Class<? extends NotificationListener>) iface;
+ final NotificationListenerInvoker invoker = NotificationListenerInvoker.from(listenerType);
+ for(final Class<? extends Notification> type : getNotificationTypes(listenerType)) {
+ builder.put(type, new InvokerContext(BindingReflections.findQName(type) , invoker));
+ }
+ }
+ }
+ invokers = ImmutableMap.copyOf(builder);
+ }
+
+ public static NotificationInvoker invokerFor(final NotificationListener listener) {
+ return new NotificationInvoker(listener);
+ }
+
+ public Set<Class<? extends Notification>> getSupportedNotifications() {
+ return invokers.keySet();
+ }
+
+ @Override
+ public void onNotification(final Notification notification) {
+ getContext(notification.getImplementedInterface()).invoke(notification);
+ };
+
+ private InvokerContext getContext(final Class<?> type) {
+ return invokers.get(type);
+ }
+
+ @SuppressWarnings("unchecked")
+ private static Set<Class<? extends Notification>> getNotificationTypes(final Class<? extends org.opendaylight.yangtools.yang.binding.NotificationListener> type) {
+ // TODO: Investigate possibility and performance impact if we cache this or expose
+ // it from NotificationListenerInvoker
+ final Set<Class<? extends Notification>> ret = new HashSet<>();
+ for(final Method method : type.getMethods()) {
+ if(BindingReflections.isNotificationCallback(method)) {
+ final Class<? extends Notification> notification = (Class<? extends Notification>) method.getParameterTypes()[0];
+ ret.add(notification);
+ }
+ }
+ return ret;
+ }
+
+ private class InvokerContext {
+
+ private final QName name;
+ private final NotificationListenerInvoker invoker;
+
+ private InvokerContext(final QName name, final NotificationListenerInvoker invoker) {
+ this.name = name;
+ this.invoker = invoker;
+ }
+
+ public void invoke(final Notification notification) {
+ invoker.invokeNotification(delegate, name, notification);
+ }
+
+ }
+
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.sal.binding.impl;
+package org.opendaylight.controller.md.sal.binding.compat;
import org.opendaylight.yangtools.yang.binding.Notification;
import org.slf4j.Logger;
import java.util.Set;
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
/**
* The DataBrokerImpl simply defers to the DOMDataBroker for all its operations.
*
*/
-public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker implements DataBroker {
+public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker implements DataBroker, DataTreeChangeService {
static final Factory<DataBroker> BUILDER_FACTORY = new BindingDOMAdapterBuilder.Factory<DataBroker>() {
}
};
+ private final DataTreeChangeService treeChangeService;
public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec) {
super(domDataBroker, codec);
- }
-
- @Deprecated
- public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec, final SchemaService schemaService) {
- super(domDataBroker, codec,schemaService);
+ final DOMDataTreeChangeService domTreeChange = (DOMDataTreeChangeService) domDataBroker.getSupportedExtensions().get(DOMDataTreeChangeService.class);
+ if(domTreeChange != null) {
+ treeChangeService = BindingDOMDataTreeChangeServiceAdapter.create(codec, domTreeChange);
+ } else {
+ treeChangeService = null;
+ }
}
@Override
}
@Override
- protected DataBroker createInstance(BindingToNormalizedNodeCodec codec,
- ClassToInstanceMap<DOMService> delegates) {
- DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
+ protected DataBroker createInstance(final BindingToNormalizedNodeCodec codec,
+ final ClassToInstanceMap<DOMService> delegates) {
+ final DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
return new BindingDOMDataBrokerAdapter(domDataBroker, codec);
}
+ }
-
+ @Override
+ public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(
+ final DataTreeIdentifier<T> treeId, final L listener) {
+ if(treeChangeService == null) {
+ throw new UnsupportedOperationException("Underlying data broker does not expose DOMDataTreeChangeService.");
+ }
+ return treeChangeService.registerDataTreeChangeListener(treeId, listener);
}
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Adapter wrapping Binding {@link DataTreeChangeListener} and exposing
+ * it as {@link DOMDataTreeChangeListener} and translated DOM events
+ * to their Binding equivalent.
+ *
+ */
+final class BindingDOMDataTreeChangeListenerAdapter<T extends DataObject> implements DOMDataTreeChangeListener {
+
+ private final BindingToNormalizedNodeCodec codec;
+ private final DataTreeChangeListener<T> listener;
+ private final LogicalDatastoreType store;
+
+ BindingDOMDataTreeChangeListenerAdapter(final BindingToNormalizedNodeCodec codec, final DataTreeChangeListener<T> listener,
+ final LogicalDatastoreType store) {
+ this.codec = Preconditions.checkNotNull(codec);
+ this.listener = Preconditions.checkNotNull(listener);
+ this.store = Preconditions.checkNotNull(store);
+ }
+
+ @Override
+ public void onDataTreeChanged(final Collection<DataTreeCandidate> domChanges) {
+ final Collection<DataTreeModification<T>> bindingChanges = LazyDataTreeModification.from(codec, domChanges, store);
+ listener.onDataTreeChanged(bindingChanges);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+
+/**
+ *
+ * Adapter exposing Binding {@link DataTreeChangeService} and wrapping
+ * {@link DOMDataTreeChangeService} and is responsible for translation
+ * and instantiation of {@link BindingDOMDataTreeChangeListenerAdapter}
+ * adapters.
+ *
+ * Each registered {@link DataTreeChangeListener} is wrapped using
+ * adapter and registered directly to DOM service.
+ */
+final class BindingDOMDataTreeChangeServiceAdapter implements DataTreeChangeService {
+
+ private final BindingToNormalizedNodeCodec codec;
+ private final DOMDataTreeChangeService dataTreeChangeService;
+
+ private BindingDOMDataTreeChangeServiceAdapter(final BindingToNormalizedNodeCodec codec,
+ final DOMDataTreeChangeService dataTreeChangeService) {
+ this.codec = Preconditions.checkNotNull(codec);
+ this.dataTreeChangeService = Preconditions.checkNotNull(dataTreeChangeService);
+ }
+
+ static DataTreeChangeService create(final BindingToNormalizedNodeCodec codec,
+ final DOMDataTreeChangeService dataTreeChangeService) {
+ return new BindingDOMDataTreeChangeServiceAdapter(codec, dataTreeChangeService);
+ }
+
+ @Override
+ public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(
+ final DataTreeIdentifier<T> treeId, final L listener) {
+ final DOMDataTreeIdentifier domIdentifier = toDomTreeIdentifier(treeId);
+ final BindingDOMDataTreeChangeListenerAdapter<T> domListener = new BindingDOMDataTreeChangeListenerAdapter<>(codec,listener, treeId.getDatastoreType());
+ final ListenerRegistration<BindingDOMDataTreeChangeListenerAdapter<T>> domReg = dataTreeChangeService.registerDataTreeChangeListener(domIdentifier, domListener);
+ return new BindingDataTreeChangeListenerRegistration<>(listener,domReg);
+ }
+
+ private DOMDataTreeIdentifier toDomTreeIdentifier(final DataTreeIdentifier<?> treeId) {
+ final YangInstanceIdentifier domPath = codec.toYangInstanceIdentifier(treeId.getRootIdentifier());
+ return new DOMDataTreeIdentifier(treeId.getDatastoreType(), domPath);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.reflect.TypeToken;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.yangtools.yang.binding.Notification;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.binding.util.NotificationListenerInvoker;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+class BindingDOMNotificationListenerAdapter implements DOMNotificationListener {
+
+ private final BindingNormalizedNodeSerializer codec;
+ private final NotificationListener delegate;
+ private final Map<SchemaPath,NotificationListenerInvoker> invokers;
+
+ public BindingDOMNotificationListenerAdapter(final BindingNormalizedNodeSerializer codec, final NotificationListener delegate) {
+ this.codec = codec;
+ this.delegate = delegate;
+ this.invokers = createInvokerMapFor(delegate.getClass());
+ }
+
+ @Override
+ public void onNotification(@Nonnull final DOMNotification notification) {
+ final Notification baNotification = deserialize(notification);
+ final QName notificationQName = notification.getType().getLastComponent();
+ getInvoker(notification.getType()).invokeNotification(delegate, notificationQName, baNotification);
+ }
+
+ private Notification deserialize(final DOMNotification notification) {
+ if(notification instanceof LazySerializedDOMNotification) {
+ return ((LazySerializedDOMNotification) notification).getBindingData();
+ }
+ return codec.fromNormalizedNodeNotification(notification.getType(), notification.getBody());
+ }
+
+ private NotificationListenerInvoker getInvoker(final SchemaPath type) {
+ return invokers.get(type);
+ }
+
+ protected Set<SchemaPath> getSupportedNotifications() {
+ return invokers.keySet();
+ }
+
+ public static Map<SchemaPath, NotificationListenerInvoker> createInvokerMapFor(final Class<? extends NotificationListener> implClz) {
+ final Map<SchemaPath, NotificationListenerInvoker> builder = new HashMap<>();
+ for(final TypeToken<?> ifaceToken : TypeToken.of(implClz).getTypes().interfaces()) {
+ Class<?> iface = ifaceToken.getRawType();
+ if(NotificationListener.class.isAssignableFrom(iface) && BindingReflections.isBindingClass(iface)) {
+ @SuppressWarnings("unchecked")
+ final Class<? extends NotificationListener> listenerType = (Class<? extends NotificationListener>) iface;
+ final NotificationListenerInvoker invoker = NotificationListenerInvoker.from(listenerType);
+ for(final SchemaPath path : getNotificationTypes(listenerType)) {
+ builder.put(path, invoker);
+ }
+ }
+ }
+ return ImmutableMap.copyOf(builder);
+ }
+
+ private static Set<SchemaPath> getNotificationTypes(final Class<? extends NotificationListener> type) {
+ // TODO: Investigate possibility and performance impact if we cache this or expose
+ // it from NotificationListenerInvoker
+ final Set<SchemaPath> ret = new HashSet<>();
+ for(final Method method : type.getMethods()) {
+ if(BindingReflections.isNotificationCallback(method)) {
+ final Class<?> notification = method.getParameterTypes()[0];
+ final QName name = BindingReflections.findQName(notification);
+ ret.add(SchemaPath.create(true, name));
+ }
+ }
+ return ret;
+ }
+}
\ No newline at end of file
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Set;
import java.util.concurrent.TimeUnit;
-import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
import org.opendaylight.controller.md.sal.dom.api.DOMService;
import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
public class BindingDOMNotificationPublishServiceAdapter implements NotificationPublishService, AutoCloseable {
private final BindingNormalizedNodeSerializer codecRegistry;
private final DOMNotificationPublishService domPublishService;
- public BindingDOMNotificationPublishServiceAdapter(BindingNormalizedNodeSerializer codecRegistry, DOMNotificationPublishService domPublishService) {
+ public BindingDOMNotificationPublishServiceAdapter(final BindingNormalizedNodeSerializer codecRegistry, final DOMNotificationPublishService domPublishService) {
this.codecRegistry = codecRegistry;
this.domPublishService = domPublishService;
}
}
private DOMNotification toDomNotification(final Notification notification) {
- final ContainerNode domNotification = codecRegistry.toNormalizedNodeNotification(notification);
- return new DOMNotificationImpl(domNotification);
+ return LazySerializedDOMNotification.create(codecRegistry, notification);
}
@Override
}
- private static class DOMNotificationImpl implements DOMNotification {
-
- private final SchemaPath type;
- private final ContainerNode body;
-
- public DOMNotificationImpl(final ContainerNode body) {
- this.type = SchemaPath.create(true, body.getIdentifier().getNodeType());
- this.body = body;
- }
-
- @Nonnull
- @Override
- public SchemaPath getType() {
- return this.type;
- }
-
- @Nonnull
- @Override
- public ContainerNode getBody() {
- return this.body;
- }
- }
-
protected static class Builder extends BindingDOMAdapterBuilder<NotificationPublishService> {
@Override
}
@Override
- protected NotificationPublishService createInstance(BindingToNormalizedNodeCodec codec,
- ClassToInstanceMap<DOMService> delegates) {
- BindingNormalizedNodeSerializer codecReg = codec.getCodecRegistry();
- DOMNotificationPublishService domPublish = delegates.getInstance(DOMNotificationPublishService.class);
+ protected NotificationPublishService createInstance(final BindingToNormalizedNodeCodec codec,
+ final ClassToInstanceMap<DOMService> delegates) {
+ final BindingNormalizedNodeSerializer codecReg = codec.getCodecRegistry();
+ final DOMNotificationPublishService domPublish = delegates.getInstance(DOMNotificationPublishService.class);
return new BindingDOMNotificationPublishServiceAdapter(codecReg, domPublish);
}
import com.google.common.collect.ClassToInstanceMap;
import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
import java.util.Set;
-import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.NotificationService;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
public class BindingDOMNotificationServiceAdapter implements NotificationService, AutoCloseable {
};
private final BindingNormalizedNodeSerializer codec;
private final DOMNotificationService domNotifService;
- private final NotificationInvokerFactory notificationInvokerFactory;
- public BindingDOMNotificationServiceAdapter(BindingNormalizedNodeSerializer codec, DOMNotificationService domNotifService, NotificationInvokerFactory notificationInvokerFactory) {
+ public BindingDOMNotificationServiceAdapter(final BindingNormalizedNodeSerializer codec, final DOMNotificationService domNotifService) {
this.codec = codec;
this.domNotifService = domNotifService;
- this.notificationInvokerFactory = notificationInvokerFactory;
}
@Override
- public <T extends NotificationListener> ListenerRegistration<T> registerNotificationListener(T listener) {
- final NotificationInvokerFactory.NotificationInvoker invoker = notificationInvokerFactory.invokerFor(listener);
- final DOMNotificationListener domListener = new NotificationInvokerImpl(invoker);
- final Collection<SchemaPath> schemaPaths = convertNotifTypesToSchemaPath(invoker.getSupportedNotifications());
- final ListenerRegistration<DOMNotificationListener> domRegistration =
- domNotifService.registerNotificationListener(domListener, schemaPaths);
+ public <T extends NotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener) {
+ final BindingDOMNotificationListenerAdapter domListener = new BindingDOMNotificationListenerAdapter(codec, listener);
+ final ListenerRegistration<BindingDOMNotificationListenerAdapter> domRegistration =
+ domNotifService.registerNotificationListener(domListener, domListener.getSupportedNotifications());
return new ListenerRegistrationImpl<>(listener, domRegistration);
}
-
-
- private Collection<SchemaPath> convertNotifTypesToSchemaPath(Set<Class<? extends Notification>> notificationTypes) {
- final List<SchemaPath> schemaPaths = new ArrayList<>();
- for (Class<? extends Notification> notificationType : notificationTypes) {
- schemaPaths.add(SchemaPath.create(true, BindingReflections.findQName(notificationType)));
- }
- return schemaPaths;
- }
-
@Override
public void close() throws Exception {
private static class ListenerRegistrationImpl<T extends NotificationListener> extends AbstractListenerRegistration<T> {
private final ListenerRegistration<?> listenerRegistration;
- public ListenerRegistrationImpl(T listener, ListenerRegistration<?> listenerRegistration) {
+ public ListenerRegistrationImpl(final T listener, final ListenerRegistration<?> listenerRegistration) {
super(listener);
this.listenerRegistration = listenerRegistration;
}
}
}
- private class NotificationInvokerImpl implements DOMNotificationListener {
- private final NotificationInvokerFactory.NotificationInvoker invoker;
-
- public NotificationInvokerImpl(NotificationInvokerFactory.NotificationInvoker invoker) {
- this.invoker = invoker;
- }
-
- @Override
- public void onNotification(@Nonnull DOMNotification notification) {
- final Notification baNotification =
- codec.fromNormalizedNodeNotification(notification.getType(), notification.getBody());
- invoker.getInvocationProxy().onNotification(baNotification);
-
- }
- }
-
private static class Builder extends BindingDOMAdapterBuilder<NotificationService> {
-
@Override
- protected NotificationService createInstance(BindingToNormalizedNodeCodec codec,
- ClassToInstanceMap<DOMService> delegates) {
- DOMNotificationService domNotification = delegates.getInstance(DOMNotificationService.class);
- NotificationInvokerFactory invokerFactory = SingletonHolder.INVOKER_FACTORY;
- return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), domNotification, invokerFactory);
+ protected NotificationService createInstance(final BindingToNormalizedNodeCodec codec,
+ final ClassToInstanceMap<DOMService> delegates) {
+ final DOMNotificationService domNotification = delegates.getInstance(DOMNotificationService.class);
+ return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), domNotification);
}
@Override
public Set<? extends Class<? extends DOMService>> getRequiredDelegates() {
return ImmutableSet.of(DOMNotificationService.class);
}
-
-
-
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+class BindingDataTreeChangeListenerRegistration<L extends DataTreeChangeListener<?>> extends AbstractListenerRegistration<L> {
+
+ private final ListenerRegistration<?> domReg;
+
+ BindingDataTreeChangeListenerRegistration(final L listener, final ListenerRegistration<?> domReg) {
+ super(listener);
+ this.domReg = Preconditions.checkNotNull(domReg);
+ }
+
+ @Override
+ protected void removeRegistration() {
+ domReg.close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Optional;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+
+/**
+ *
+ * Defines structural mapping of Normalized Node to Binding data
+ * addressable by Instance Identifier.
+ *
+ * Not all binding data are addressable by instance identifier
+ * and there are some differences.
+ *
+ * See {@link #NOT_ADDRESSABLE},{@link #INVISIBLE_CONTAINER},{@link #VISIBLE_CONTAINER}
+ * for more details.
+ *
+ *
+ */
+enum BindingStructuralType {
+
+ /**
+ * DOM Item is not addressable in Binding Instance Identifier,
+ * data is not lost, but are available only via parent object.
+ *
+ * Such types of data are leaf-lists, leafs, list without keys
+ * or anyxml.
+ *
+ */
+ NOT_ADDRESSABLE,
+ /**
+ * Data container is addressable in NormalizedNode format,
+ * but in Binding it is not represented in Instance Identifier.
+ *
+ * This are choice / case nodes.
+ *
+ * This data is still accessible using parent object and their
+ * children are addressable.
+ *
+ */
+ INVISIBLE_CONTAINER,
+ /**
+ * Data container is addressable in NormalizedNode format,
+ * but in Binding it is not represented in Instance Identifier.
+ *
+ * This are list nodes.
+ *
+ * This data is still accessible using parent object and their
+ * children are addressable.
+ *
+ */
+ INVISIBLE_LIST,
+ /**
+ * Data container is addressable in Binding Instance Identifier format
+ * and also YangInstanceIdentifier format.
+ *
+ */
+ VISIBLE_CONTAINER,
+ /**
+ * Mapping algorithm was unable to detect type or was not updated after introduction
+ * of new NormalizedNode type.
+ */
+ UNKNOWN;
+
+ static BindingStructuralType from(final DataTreeCandidateNode domChildNode) {
+ final Optional<NormalizedNode<?, ?>> dataBased = domChildNode.getDataAfter().or(domChildNode.getDataBefore());
+ if(dataBased.isPresent()) {
+ return from(dataBased.get());
+ }
+ return from(domChildNode.getIdentifier());
+ }
+
+ private static BindingStructuralType from(final PathArgument identifier) {
+ if(identifier instanceof NodeIdentifierWithPredicates || identifier instanceof AugmentationIdentifier) {
+ return VISIBLE_CONTAINER;
+ }
+ if(identifier instanceof NodeWithValue) {
+ return NOT_ADDRESSABLE;
+ }
+ return UNKNOWN;
+ }
+
+ static BindingStructuralType from(final NormalizedNode<?, ?> data) {
+ if(isNotAddressable(data)) {
+ return NOT_ADDRESSABLE;
+ }
+ if(data instanceof MapNode) {
+ return INVISIBLE_LIST;
+ }
+ if(data instanceof ChoiceNode) {
+ return INVISIBLE_CONTAINER;
+ }
+ if(isVisibleContainer(data)) {
+ return VISIBLE_CONTAINER;
+ }
+ return UNKNOWN;
+ }
+
+ private static boolean isVisibleContainer(final NormalizedNode<?, ?> data) {
+ return data instanceof MapEntryNode || data instanceof ContainerNode || data instanceof AugmentationNode;
+ }
+
+ private static boolean isNotAddressable(final NormalizedNode<?, ?> d) {
+ return d instanceof LeafNode
+ || d instanceof AnyXmlNode
+ || d instanceof LeafSetNode
+ || d instanceof LeafSetEntryNode;
+ }
+
+}
import com.google.common.base.Function;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableBiMap;
import java.lang.reflect.Method;
+import java.util.AbstractMap.SimpleEntry;
import java.util.Iterator;
+import java.util.Map;
import java.util.Map.Entry;
import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTree;
import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeFactory;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
import org.opendaylight.yangtools.sal.binding.generator.util.BindingRuntimeContext;
import org.opendaylight.yangtools.yang.binding.BindingMapping;
+import org.opendaylight.yangtools.yang.binding.DataContainer;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.Notification;
import org.opendaylight.yangtools.yang.binding.RpcService;
import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
import org.opendaylight.yangtools.yang.common.QNameModule;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, SchemaContextListener, AutoCloseable {
+public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, BindingNormalizedNodeSerializer, SchemaContextListener, AutoCloseable {
private final BindingNormalizedNodeCodecRegistry codecRegistry;
private DataNormalizer legacyToNormalized;
return codecRegistry.toYangInstanceIdentifier(binding);
}
- @SuppressWarnings({ "unchecked", "rawtypes" })
- public Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
- final InstanceIdentifier<? extends DataObject> bindingPath, final DataObject bindingObject) {
- return codecRegistry.toNormalizedNode((InstanceIdentifier) bindingPath, bindingObject);
+ @Override
+ public YangInstanceIdentifier toYangInstanceIdentifier(final InstanceIdentifier<?> binding) {
+ return codecRegistry.toYangInstanceIdentifier(binding);
+ }
+ @Override
+ public <T extends DataObject> Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
+ final InstanceIdentifier<T> path, final T data) {
+ return codecRegistry.toNormalizedNode(path, data);
}
+ @SuppressWarnings({"unchecked", "rawtypes"})
public Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
final Entry<InstanceIdentifier<? extends DataObject>, DataObject> binding) {
- return toNormalizedNode(binding.getKey(),binding.getValue());
+ return toNormalizedNode((InstanceIdentifier) binding.getKey(),binding.getValue());
+ }
+
+ @Override
+ public Entry<InstanceIdentifier<?>, DataObject> fromNormalizedNode(final YangInstanceIdentifier path,
+ final NormalizedNode<?, ?> data) {
+ return codecRegistry.fromNormalizedNode(path, data);
+ }
+
+ @Override
+ public Notification fromNormalizedNodeNotification(final SchemaPath path, final ContainerNode data) {
+ return codecRegistry.fromNormalizedNodeNotification(path, data);
+ }
+
+ @Override
+ public DataObject fromNormalizedNodeRpcData(final SchemaPath path, final ContainerNode data) {
+ return codecRegistry.fromNormalizedNodeRpcData(path, data);
+ }
+
+ @Override
+ public InstanceIdentifier<?> fromYangInstanceIdentifier(final YangInstanceIdentifier dom) {
+ return codecRegistry.fromYangInstanceIdentifier(dom);
+ }
+
+ @Override
+ public ContainerNode toNormalizedNodeNotification(final Notification data) {
+ return codecRegistry.toNormalizedNodeNotification(data);
+ }
+
+ @Override
+ public ContainerNode toNormalizedNodeRpcData(final DataContainer data) {
+ return codecRegistry.toNormalizedNodeRpcData(data);
}
/**
}
@Override
- public BindingCodecTree create(BindingRuntimeContext context) {
+ public BindingCodecTree create(final BindingRuntimeContext context) {
return codecRegistry.create(context);
}
@Override
- public BindingCodecTree create(SchemaContext context, Class<?>... bindingClasses) {
+ public BindingCodecTree create(final SchemaContext context, final Class<?>... bindingClasses) {
return codecRegistry.create(context, bindingClasses);
}
+ @Nonnull protected Map.Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> getSubtreeCodec(
+ final YangInstanceIdentifier domIdentifier) {
+
+ final BindingCodecTree currentCodecTree = codecRegistry.getCodecContext();
+ final InstanceIdentifier<?> bindingPath = codecRegistry.fromYangInstanceIdentifier(domIdentifier);
+ Preconditions.checkArgument(bindingPath != null);
+ /**
+ * If we are able to deserialize YANG instance identifier, getSubtreeCodec must
+ * return non-null value.
+ */
+ final BindingCodecTreeNode<?> codecContext = currentCodecTree.getSubtreeCodec(bindingPath);
+ return new SimpleEntry<InstanceIdentifier<?>, BindingCodecTreeNode<?>>(bindingPath, codecContext);
+ }
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Identifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Lazily translated {@link DataObjectModification} based on {@link DataTreeCandidateNode}.
+ *
+ * {@link LazyDataObjectModification} represents Data tree change event,
+ * but whole tree is not translated or resolved eagerly, but only child nodes
+ * which are directly accessed by user of data object modification.
+ *
+ * @param <T> Type of Binding Data Object
+ */
+final class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
+
+ private final static Logger LOG = LoggerFactory.getLogger(LazyDataObjectModification.class);
+
+ private final BindingCodecTreeNode<T> codec;
+ private final DataTreeCandidateNode domData;
+ private final PathArgument identifier;
+ private Collection<DataObjectModification<? extends DataObject>> childNodesCache;
+
+ private LazyDataObjectModification(final BindingCodecTreeNode<T> codec, final DataTreeCandidateNode domData) {
+ this.codec = Preconditions.checkNotNull(codec);
+ this.domData = Preconditions.checkNotNull(domData);
+ this.identifier = codec.deserializePathArgument(domData.getIdentifier());
+ }
+
+ static <T extends DataObject> DataObjectModification<T> create(final BindingCodecTreeNode<T> codec,
+ final DataTreeCandidateNode domData) {
+ return new LazyDataObjectModification<>(codec,domData);
+ }
+
+ private static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
+ final Collection<DataTreeCandidateNode> domChildNodes) {
+ final ArrayList<DataObjectModification<? extends DataObject>> result = new ArrayList<>(domChildNodes.size());
+ populateList(result, parentCodec, domChildNodes);
+ return result;
+ }
+
+ private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
+ final BindingCodecTreeNode<?> parentCodec, final Collection<DataTreeCandidateNode> domChildNodes) {
+ for (final DataTreeCandidateNode domChildNode : domChildNodes) {
+ final BindingStructuralType type = BindingStructuralType.from(domChildNode);
+ if (type != BindingStructuralType.NOT_ADDRESSABLE) {
+ /*
+ * Even if type is UNKNOWN, from perspective of BindingStructuralType
+ * we try to load codec for it. We will use that type to further specify
+ * debug log.
+ */
+ try {
+ final BindingCodecTreeNode<?> childCodec =
+ parentCodec.yangPathArgumentChild(domChildNode.getIdentifier());
+ populateList(result,type, childCodec, domChildNode);
+ } catch (final IllegalArgumentException e) {
+ if (type == BindingStructuralType.UNKNOWN) {
+ LOG.debug("Unable to deserialize unknown DOM node {}",domChildNode,e);
+ } else {
+ LOG.debug("Binding representation for DOM node {} was not found",domChildNode,e);
+ }
+ }
+ }
+ }
+ }
+
+ private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
+ final BindingStructuralType type, final BindingCodecTreeNode<?> childCodec,
+ final DataTreeCandidateNode domChildNode) {
+ switch (type) {
+ case INVISIBLE_LIST:
+ // We use parent codec intentionally.
+ populateListWithSingleCodec(result, childCodec, domChildNode.getChildNodes());
+ break;
+ case INVISIBLE_CONTAINER:
+ populateList(result, childCodec, domChildNode.getChildNodes());
+ break;
+ case UNKNOWN:
+ case VISIBLE_CONTAINER:
+ result.add(create(childCodec, domChildNode));
+ default:
+ break;
+ }
+ }
+
+ private static void populateListWithSingleCodec(final List<DataObjectModification<? extends DataObject>> result,
+ final BindingCodecTreeNode<?> codec, final Collection<DataTreeCandidateNode> childNodes) {
+ for (final DataTreeCandidateNode child : childNodes) {
+ result.add(create(codec, child));
+ }
+ }
+
+ @Override
+ public T getDataBefore() {
+ return deserialize(domData.getDataBefore());
+ }
+
+ @Override
+ public T getDataAfter() {
+ return deserialize(domData.getDataAfter());
+ }
+
+ @Override
+ public Class<T> getDataType() {
+ return codec.getBindingClass();
+ }
+
+ @Override
+ public PathArgument getIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public DataObjectModification.ModificationType getModificationType() {
+ switch(domData.getModificationType()) {
+ case WRITE:
+ return DataObjectModification.ModificationType.WRITE;
+ case SUBTREE_MODIFIED:
+ return DataObjectModification.ModificationType.SUBTREE_MODIFIED;
+ case DELETE:
+ return DataObjectModification.ModificationType.DELETE;
+
+ default:
+ // TODO: Should we lie about modification type instead of exception?
+ throw new IllegalStateException("Unsupported DOM Modification type " + domData.getModificationType());
+ }
+ }
+
+ @Override
+ public Collection<DataObjectModification<? extends DataObject>> getModifiedChildren() {
+ if (childNodesCache == null) {
+ childNodesCache = from(codec, domData.getChildNodes());
+ }
+ return childNodesCache;
+ }
+
+ @Override
+ public DataObjectModification<? extends DataObject> getModifiedChild(final PathArgument arg) {
+ final List<YangInstanceIdentifier.PathArgument> domArgumentList = new ArrayList<>();
+ final BindingCodecTreeNode<?> childCodec = codec.bindingPathArgumentChild(arg, domArgumentList);
+ final Iterator<YangInstanceIdentifier.PathArgument> toEnter = domArgumentList.iterator();
+ DataTreeCandidateNode current = domData;
+ while (toEnter.hasNext() && current != null) {
+ current = current.getModifiedChild(toEnter.next());
+ }
+ if (current != null) {
+ return create(childCodec, current);
+ }
+ return null;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
+ final Class<C> listItem, final K listKey) {
+ return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.IdentifiableItem<>(listItem, listKey));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(final Class<C> arg) {
+ return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.Item<>(arg));
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(
+ final Class<C> augmentation) {
+ return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.Item<>(augmentation));
+ }
+
+ private T deserialize(final Optional<NormalizedNode<?, ?>> dataAfter) {
+ if (dataAfter.isPresent()) {
+ return codec.deserialize(dataAfter.get());
+ }
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map.Entry;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Lazily translated {@link DataTreeModification} based on {@link DataTreeCandidate}.
+ *
+ * {@link DataTreeModification} represents Data tree change event,
+ * but whole tree is not translated or resolved eagerly, but only child nodes
+ * which are directly accessed by user of data object modification.
+ *
+ */
+class LazyDataTreeModification<T extends DataObject> implements DataTreeModification<T> {
+
+ private final DataTreeIdentifier<T> path;
+ private final DataObjectModification<T> rootNode;
+
+ LazyDataTreeModification(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> path, final BindingCodecTreeNode<T> codec, final DataTreeCandidate domChange) {
+ this.path = new DataTreeIdentifier<>(datastoreType, path);
+ this.rootNode = LazyDataObjectModification.create(codec, domChange.getRootNode());
+ }
+
+ @Override
+ public DataObjectModification<T> getRootNode() {
+ return rootNode;
+ }
+
+ @Override
+ public DataTreeIdentifier<T> getRootPath() {
+ return path;
+ }
+
+ @SuppressWarnings({"unchecked", "rawtypes"})
+ static <T extends DataObject> DataTreeModification<T> create(final BindingToNormalizedNodeCodec codec, final DataTreeCandidate domChange,
+ final LogicalDatastoreType datastoreType) {
+ final Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> codecCtx =
+ codec.getSubtreeCodec(domChange.getRootPath());
+ return (DataTreeModification<T>) new LazyDataTreeModification(datastoreType, codecCtx.getKey(), codecCtx.getValue(), domChange);
+ }
+
+ static <T extends DataObject> Collection<DataTreeModification<T>> from(final BindingToNormalizedNodeCodec codec,
+ final Collection<DataTreeCandidate> domChanges, final LogicalDatastoreType datastoreType) {
+ final List<DataTreeModification<T>> result = new ArrayList<>(domChanges.size());
+ for (final DataTreeCandidate domChange : domChanges) {
+ result.add(LazyDataTreeModification.<T>create(codec, domChange, datastoreType));
+ }
+ return result;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.yangtools.yang.binding.Notification;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Lazy serialized implementation of DOM Notification.
+ *
+ * This implementation performs serialization of data, only if receiver
+ * of notification actually accessed data from notification.
+ *
+ */
+class LazySerializedDOMNotification implements DOMNotification {
+
+ private final BindingNormalizedNodeSerializer codec;
+ private final Notification data;
+ private final SchemaPath type;
+
+ private ContainerNode domBody;
+
+ private LazySerializedDOMNotification(final BindingNormalizedNodeSerializer codec, final Notification data, final SchemaPath type) {
+ super();
+ this.codec = codec;
+ this.data = data;
+ this.type = type;
+ }
+
+ static DOMNotification create(final BindingNormalizedNodeSerializer codec, final Notification data) {
+ final SchemaPath type = SchemaPath.create(true, BindingReflections.findQName(data.getImplementedInterface()));
+ return new LazySerializedDOMNotification(codec, data, type);
+ }
+
+ @Override
+ public SchemaPath getType() {
+ return type;
+ }
+
+ @Override
+ public ContainerNode getBody() {
+ if (domBody == null) {
+ domBody = codec.toNormalizedNodeNotification(data);
+ }
+ return domBody;
+ }
+
+ protected Notification getBindingData() {
+ return data;
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Exception is raised when supplied Bidning Aware
- * RPCService class is not routed and was used in context
- * where routed RPCs should only be used.
- *
- */
-public class RpcIsNotRoutedException extends IllegalStateException {
-
- private static final long serialVersionUID = 1L;
-
- public RpcIsNotRoutedException(final String message, final Throwable cause) {
- super(Preconditions.checkNotNull(message), cause);
- }
-
- public RpcIsNotRoutedException(final String message) {
- super(Preconditions.checkNotNull(message));
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public interface RuntimeCodeGenerator {
-
- /**
- * Returns an instance of provided RpcService type which delegates all calls
- * to the delegate.
- *
- * <p>
- * Returned instance:
- * <ul>
- * <li>implements provided subclass of RpcService type and
- * {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy} interface.
- * <li>
- * <p>
- * delegates all invocations of methods, which are defined in RpcService
- * subtype to delegate which is defined by
- * {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy#setDelegate(Object)}.
- * <p>
- * If delegate is not defined (<code>getDelegate() == null</code>)
- * implementation throws {@link IllegalStateException}
- * <li>{@link org.opendaylight.controller.sal.binding.spi.DelegateProxy#getDelegate()} - returns the delegate to which
- * all calls are delegated.
- * <li>{@link org.opendaylight.controller.sal.binding.spi.DelegateProxy#setDelegate(Object)} - sets the delegate for
- * particular instance
- *
- * </ul>
- *
- * @param serviceType
- * - Subclass of RpcService for which direct proxy is to be
- * generated.
- * @return Instance of RpcService of provided serviceType which implements
- * and {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
- * @throws IllegalArgumentException
- *
- */
- <T extends RpcService> T getDirectProxyFor(Class<T> serviceType) throws IllegalArgumentException;
-
- /**
- * Returns an instance of provided RpcService type which routes all calls to
- * other instances selected on particular input field.
- *
- * <p>
- * Returned instance:
- * <ul>
- * <li>Implements:
- * <ul>
- * <li>{@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
- * <li>{@link RpcRouter}
- * </ul>
- * <li>
- * routes all invocations of methods, which are defined in RpcService
- * subtype based on method arguments and routing information defined in the
- * RpcRoutingTables for this instance
- * {@link RpcRouter#getRoutingTable(Class)}.
- * <ul>
- * <li>
- * Implementation uses
- * {@link RpcRouter#getService(Class, org.opendaylight.yangtools.yang.binding.InstanceIdentifier)} method to
- * retrieve particular instance to which call will be routed.
- * <li>
- * Instance of {@link org.opendaylight.yangtools.yang.binding.InstanceIdentifier} is determined by first argument of
- * method and is retrieved via method which is annotated with
- * {@link org.opendaylight.yangtools.yang.binding.annotations.RoutingContext}.
- * Class representing Routing Context Identifier is retrieved by a
- * {@link org.opendaylight.yangtools.yang.binding.annotations.RoutingContext}.
- * <li>If first argument is not defined / {@link org.opendaylight.yangtools.yang.binding.annotations.RoutingContext} annotation
- * is not present on any field invocation will be delegated to default
- * service {@link RpcRouter#getDefaultService()}.
- * </ul>
- *
- * @param serviceType
- * - Subclass of RpcService for which Router is to be generated.
- * @return Instance of RpcService of provided serviceType which implements
- * also {@link RpcRouter}<T> and {@link org.opendaylight.controller.sal.binding.spi.DelegateProxy}
- * @throws RpcIsNotRoutedException
- */
- <T extends RpcService> RpcRouter<T> getRouterFor(Class<T> serviceType,String name) throws IllegalArgumentException, RpcIsNotRoutedException;
-
- NotificationInvokerFactory getInvokerFactory();
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import java.lang.reflect.Field;
-import java.util.Map;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public final class RuntimeCodeHelper {
- private RuntimeCodeHelper() {
- throw new UnsupportedOperationException("Utility class should never be instantiated");
- }
-
- private static Field getField(final Class<?> cls, final String name) {
- try {
- return cls.getField(name);
- } catch (NoSuchFieldException e) {
- throw new IllegalArgumentException(
- String.format("Class %s is missing field %s", cls, name), e);
- } catch (SecurityException e) {
- throw new IllegalStateException(String.format("Failed to examine class %s", cls), e);
- }
- }
-
- private static Field getDelegateField(final Class<?> cls) {
- return getField(cls, RuntimeCodeSpecification.DELEGATE_FIELD);
- }
-
- private static Object getFieldValue(final Field field, final Object obj) {
- try {
- return field.get(obj);
- } catch (IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to get field %s of object %s", field, obj), e);
- }
- }
-
- private static void setFieldValue(final Field field, final Object obj, final Object value) {
- try {
- field.set(obj, value);
- } catch (IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to set field %s to %s", field, value), e);
- }
- }
-
- /**
- * Helper method to return delegate from ManagedDirectedProxy with use of reflection.
- *
- * Note: This method uses reflection, but access to delegate field should be
- * avoided and called only if necessary.
- */
- @SuppressWarnings("unchecked")
- public static <T extends RpcService> T getDelegate(final RpcService proxy) {
- return (T)getFieldValue(getDelegateField(proxy.getClass()), proxy);
- }
-
- /**
- * Helper method to set delegate to ManagedDirectedProxy with use of reflection.
- *
- * Note: This method uses reflection, but setting delegate field should not occur too much
- * to introduce any significant performance hits.
- */
- public static void setDelegate(final Object proxy, final Object delegate) {
- final Field field = getDelegateField(proxy.getClass());
-
- if (delegate != null) {
- final Class<?> ft = field.getType();
- if (!ft.isAssignableFrom(delegate.getClass())) {
- throw new IllegalArgumentException(
- String.format("Field %s type %s is not compatible with delegate type %s",
- field, ft, delegate.getClass()));
- }
- }
-
- setFieldValue(field, proxy, delegate);
- }
-
- @SuppressWarnings("unchecked")
- public static Map<InstanceIdentifier<? extends Object>,? extends RpcService> getRoutingTable(final RpcService target, final Class<? extends BaseIdentity> tableClass) {
- final Field field = getField(target.getClass(), RuntimeCodeSpecification.getRoutingTableField(tableClass));
- return (Map<InstanceIdentifier<? extends Object>,? extends RpcService>) getFieldValue(field, target);
- }
-
- public static void setRoutingTable(final RpcService target, final Class<? extends BaseIdentity> tableClass, final Map<InstanceIdentifier<? extends Object>,? extends RpcService> routingTable) {
- final Field field = getField(target.getClass(), RuntimeCodeSpecification.getRoutingTableField(tableClass));
- setFieldValue(field, target, routingTable);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public final class RuntimeCodeSpecification {
- public final static String DIRECT_PROXY_SUFFIX = "DirectProxy";
- public final static String INVOKER_SUFFIX = "ListenerInvoker";
- public final static String ROUTER_SUFFIX = "Router";
-
- public final static String DELEGATE_FIELD = "_delegate";
- public final static String ROUTING_TABLE_FIELD_PREFIX = "_routes_";
-
- private RuntimeCodeSpecification() {
- throw new UnsupportedOperationException("Utility class");
- }
-
- /**
- * Returns a name for generated interface
- */
- private static String getGeneratedName(final Class<? extends Object> cls, final String suffix) {
- return cls.getName() + "$$Broker$" + suffix;
- }
-
- public static String getInvokerName(final Class<? extends NotificationListener> listener) {
- return getGeneratedName(listener, RuntimeCodeSpecification.INVOKER_SUFFIX);
- }
-
- /**
- * Returns a name for DirectProxy implementation
- */
- public static String getDirectProxyName(final Class<? extends RpcService> base) {
- return getGeneratedName(base, RuntimeCodeSpecification.DIRECT_PROXY_SUFFIX);
- }
-
- /**
- * Returns a name for Router implementation
- */
- public static String getRouterName(final Class<? extends RpcService> base) {
- return getGeneratedName(base, RuntimeCodeSpecification.ROUTER_SUFFIX);
- }
-
- /**
- * Returns a field name for specified routing context
- */
- public static String getRoutingTableField(final Class<? extends BaseIdentity> routingContext) {
- return "_routes_" + routingContext.getSimpleName();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.Iterables;
-import java.util.Map;
-import java.util.WeakHashMap;
-import javassist.ClassPool;
-import javassist.CtClass;
-import javassist.CtMethod;
-import javassist.NotFoundException;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
-import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
-import org.opendaylight.yangtools.util.ClassLoaderUtils;
-import org.opendaylight.yangtools.yang.binding.BindingMapping;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
-
-abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator, NotificationInvokerFactory {
- @GuardedBy("this")
- private final Map<Class<? extends NotificationListener>, RuntimeGeneratedInvokerPrototype> invokerClasses = new WeakHashMap<>();
- private final CtClass brokerNotificationListener;
- protected final JavassistUtils utils;
-
- protected AbstractRuntimeCodeGenerator(final ClassPool pool) {
- utils = JavassistUtils.forClassPool(pool);
-
- /*
- * Make sure Javassist ClassPool sees the classloader of RpcService
- */
- utils.ensureClassLoader(RpcService.class);
-
- brokerNotificationListener = utils.asCtClass(org.opendaylight.controller.sal.binding.api.NotificationListener.class);
- }
-
- protected final CtClass getBrokerNotificationListener() {
- return brokerNotificationListener;
- }
-
- protected abstract RuntimeGeneratedInvokerPrototype generateListenerInvoker(Class<? extends NotificationListener> cls);
- protected abstract <T extends RpcService> Supplier<T> directProxySupplier(final Class<T> serviceType);
- protected abstract <T extends RpcService> Supplier<T> routerSupplier(final Class<T> serviceType, RpcServiceMetadata metadata);
-
- private RpcServiceMetadata getRpcMetadata(final CtClass iface) throws ClassNotFoundException, NotFoundException, RpcIsNotRoutedException {
- final RpcServiceMetadata metadata = new RpcServiceMetadata();
-
- for (CtMethod method : iface.getMethods()) {
- if (isRpcMethodWithInput(iface, method)) {
- final RpcMetadata routingPair = getRpcMetadata(method);
- if (routingPair != null) {
- metadata.addContext(routingPair.getContext());
- metadata.addRpcMethod(method.getName(), routingPair);
-
- /*
- * Force-load the RPC class representing the "input" of this RPC.
- *
- * FIXME: this is pre-existing side-effect of the original code, which
- * kept a reference to the loaded class, but it did not use it.
- *
- * There was no explanation as to why forcing this load was
- * necessary. As far as I can tell now is that it forces the
- * resolution of method arguments, which would (according to
- * my reading of JLS) occur only when the method is invoked via
- * binding-aware class action, not when coming from
- * binding-independent world. Whether that makes sense or not,
- * remains to be investigated.
- */
- Thread.currentThread().getContextClassLoader().loadClass(routingPair.getInputType().getName());
- } else {
- throw new RpcIsNotRoutedException(String.format("RPC %s from %s is not routed", method.getName(), iface.getName()));
- }
- }
- }
-
- return metadata;
- }
-
-
- private boolean isRpcMethodWithInput(final CtClass iface, final CtMethod method) throws NotFoundException {
- if(iface.equals(method.getDeclaringClass())
- && method.getParameterTypes().length == 1) {
- final CtClass onlyArg = method.getParameterTypes()[0];
- if(onlyArg.isInterface() && onlyArg.getName().endsWith(BindingMapping.RPC_INPUT_SUFFIX)) {
- return true;
- }
- }
- return false;
- }
-
- private RpcMetadata getRpcMetadata(final CtMethod method) throws NotFoundException {
- final CtClass inputClass = method.getParameterTypes()[0];
- return rpcMethodMetadata(inputClass, inputClass, method.getName());
- }
-
- private RpcMetadata rpcMethodMetadata(final CtClass dataClass, final CtClass inputClass, final String rpcMethod) throws NotFoundException {
- for (CtMethod method : dataClass.getMethods()) {
- if (method.getName().startsWith("get") && method.getParameterTypes().length == 0) {
- for (Object annotation : method.getAvailableAnnotations()) {
- if (annotation instanceof RoutingContext) {
- boolean encapsulated = !method.getReturnType().equals(utils.asCtClass(InstanceIdentifier.class));
- return new RpcMetadata(rpcMethod, ((RoutingContext)annotation).value(), method, encapsulated, inputClass);
- }
- }
- }
- }
-
- for (CtClass iface : dataClass.getInterfaces()) {
- final RpcMetadata ret = rpcMethodMetadata(iface, inputClass, rpcMethod);
- if(ret != null) {
- return ret;
- }
- }
- return null;
- }
-
- private synchronized RuntimeGeneratedInvokerPrototype resolveInvokerClass(final Class<? extends NotificationListener> cls) {
- RuntimeGeneratedInvokerPrototype invoker = invokerClasses.get(cls);
- if (invoker != null) {
- return invoker;
- }
-
- synchronized (utils) {
- invoker = ClassLoaderUtils.withClassLoader(cls.getClassLoader(), new Supplier<RuntimeGeneratedInvokerPrototype>() {
- @Override
- public RuntimeGeneratedInvokerPrototype get() {
- return generateListenerInvoker(cls);
- }
- });
- }
-
- invokerClasses.put(cls, invoker);
- return invoker;
- }
-
- @Override
- public final NotificationInvokerFactory getInvokerFactory() {
- return this;
- }
-
- @Override
- public final <T extends RpcService> T getDirectProxyFor(final Class<T> serviceType) {
- synchronized (utils) {
- return ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), directProxySupplier(serviceType));
- }
- }
-
- @Override
- public final <T extends RpcService> RpcRouter<T> getRouterFor(final Class<T> serviceType, final String name) throws RpcIsNotRoutedException {
- final RpcServiceMetadata metadata = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), new Supplier<RpcServiceMetadata>() {
- @Override
- public RpcServiceMetadata get() {
- try {
- return getRpcMetadata(utils.asCtClass(serviceType));
- } catch (ClassNotFoundException | NotFoundException e) {
- throw new IllegalStateException(String.format("Failed to load metadata for class %s", serviceType), e);
- }
- }
- });
-
- if (Iterables.isEmpty(metadata.getContexts())) {
- throw new RpcIsNotRoutedException("Service doesn't have routing context associated.");
- }
-
- synchronized (utils) {
- final T instance = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), routerSupplier(serviceType, metadata));
- return new RpcRouterCodegenInstance<T>(name, serviceType, instance, metadata.getContexts());
- }
- }
-
- @Override
- public NotificationInvoker invokerFor(final NotificationListener instance) {
- final Class<? extends NotificationListener> cls = instance.getClass();
- final RuntimeGeneratedInvokerPrototype prototype = resolveInvokerClass(cls);
-
- try {
- return RuntimeGeneratedInvoker.create(instance, prototype);
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to create invoker for %s", instance), e);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
-import java.lang.reflect.Method;
-import java.util.Map;
-import javassist.CannotCompileException;
-import javassist.ClassPool;
-import javassist.CtClass;
-import javassist.CtMethod;
-import javassist.NotFoundException;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeSpecification;
-import org.opendaylight.yangtools.sal.binding.generator.util.ClassGenerator;
-import org.opendaylight.yangtools.sal.binding.generator.util.MethodGenerator;
-import org.opendaylight.yangtools.util.ClassLoaderUtils;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.RpcImplementation;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
-
-final class DefaultRuntimeCodeGenerator extends AbstractRuntimeCodeGenerator {
-
- DefaultRuntimeCodeGenerator(final ClassPool pool) {
- super(pool);
- }
-
- @Override
- protected <T extends RpcService> Supplier<T> directProxySupplier(final Class<T> serviceType) {
- return new Supplier<T>() {
- @SuppressWarnings("unchecked")
- @Override
- public T get() {
- final String proxyName = RuntimeCodeSpecification.getDirectProxyName(serviceType);
-
- final Class<?> potentialClass = ClassLoaderUtils.tryToLoadClassWithTCCL(proxyName);
- if (potentialClass != null) {
- try {
- return (T)potentialClass.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException("Failed to instantiate class " + potentialClass.getName(), e);
- }
- }
-
- final CtClass supertype = utils.asCtClass(serviceType);
- final String directProxyName = RuntimeCodeSpecification.getDirectProxyName(serviceType);
-
- final CtClass createdCls;
- try {
- createdCls = utils.createClass(directProxyName, supertype, new ClassGenerator() {
- @Override
- public void process(final CtClass cls) throws CannotCompileException {
- utils.field(cls, RuntimeCodeSpecification.DELEGATE_FIELD, serviceType);
- utils.implementsType(cls, utils.asCtClass(RpcImplementation.class));
- utils.implementMethodsFrom(cls, supertype, new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
- sb.append("{\n");
- sb.append(" if (").append(RuntimeCodeSpecification.DELEGATE_FIELD).append(" == null) {\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No default provider is available\");\n");
- sb.append(" }\n");
- sb.append(" return ($r) ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append('.').append(method.getName()).append("($$);\n");
- sb.append("}\n");
- method.setBody(sb.toString());
- }
- });
-
- // FIXME: copy this one...
- utils.implementMethodsFrom(cls, utils.asCtClass(RpcImplementation.class), new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
- sb.append("{\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No provider is processing supplied message\");\n");
- sb.append(" return ($r) null;\n");
- sb.append("}\n");
- method.setBody(sb.toString());
- }
- });
- }
- });
- } catch (CannotCompileException e) {
- throw new IllegalStateException("Failed to create class " + directProxyName, e);
- }
-
- final Class<?> c;
- try {
- c = createdCls.toClass(serviceType.getClassLoader(), serviceType.getProtectionDomain());
- } catch (CannotCompileException e) {
- throw new IllegalStateException(String.format("Failed to create class %s", createdCls), e);
- }
-
- try {
- return (T) c.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to instantiated class %s", c), e);
- }
- }
- };
- }
-
- @Override
- protected <T extends RpcService> Supplier<T> routerSupplier(final Class<T> serviceType, final RpcServiceMetadata metadata) {
- return new Supplier<T>() {
- @SuppressWarnings("unchecked")
- @Override
- public T get() {
- final CtClass supertype = utils.asCtClass(serviceType);
- final String routerName = RuntimeCodeSpecification.getRouterName(serviceType);
- final Class<?> potentialClass = ClassLoaderUtils.tryToLoadClassWithTCCL(routerName);
- if (potentialClass != null) {
- try {
- return (T)potentialClass.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException("Failed to instantiate class", e);
- }
- }
-
- final CtClass targetCls;
- try {
- targetCls = utils.createClass(routerName, supertype, new ClassGenerator() {
- @Override
- public void process(final CtClass cls) throws CannotCompileException {
- utils.field(cls, RuntimeCodeSpecification.DELEGATE_FIELD, serviceType);
- //utils.field(cls, REMOTE_INVOKER_FIELD,iface);
- utils.implementsType(cls, utils.asCtClass(RpcImplementation.class));
-
- for (final Class<? extends BaseIdentity> ctx : metadata.getContexts()) {
- utils.field(cls, RuntimeCodeSpecification.getRoutingTableField(ctx), Map.class);
- }
-
- utils.implementMethodsFrom(cls, supertype, new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final int ptl;
- try {
- ptl = method.getParameterTypes().length;
- } catch (NotFoundException e) {
- throw new CannotCompileException(e);
- }
- final StringBuilder sb = new StringBuilder();
-
- switch (ptl) {
- case 0:
- sb.append("return ($r) ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append('.').append(method.getName()).append("($$);");
- break;
- case 1:
- final RpcMetadata rpcMeta = metadata.getRpcMethod(method.getName());
- final String rtGetter = rpcMeta.getInputRouteGetter().getName();
- final String stName = supertype.getName();
-
- sb.append('\n');
- sb.append("{\n");
- sb.append(" if ($1 == null) {\n");
- sb.append(" throw new IllegalArgumentException(\"RPC input must not be null and must contain a value for field ").append(rtGetter).append("\");\n");
- sb.append(" }\n");
- sb.append(" if ($1.").append(rtGetter).append("() == null) {\n");
- sb.append(" throw new IllegalArgumentException(\"Field ").append(rtGetter).append(" must not be null\");\n");
- sb.append(" }\n");
-
- sb.append(" final org.opendaylight.yangtools.yang.binding.InstanceIdentifier identifier = $1.").append(rtGetter).append("()");
- if (rpcMeta.isRouteEncapsulated()) {
- sb.append(".getValue()");
- }
- sb.append(";\n");
-
- sb.append(" ").append(supertype.getName()).append(" instance = (").append(stName).append(") ").append(RuntimeCodeSpecification.getRoutingTableField(rpcMeta.getContext())).append(".get(identifier);\n");
- sb.append(" if (instance == null) {\n");
- sb.append(" instance = ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append(";\n");
- sb.append(" }\n");
-
- sb.append(" if (instance == null) {\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No routable provider is processing routed message for \" + String.valueOf(identifier));\n");
- sb.append(" }\n");
- sb.append(" return ($r) instance.").append(method.getName()).append("($$);\n");
- sb.append('}');
- break;
- default:
- throw new CannotCompileException(String.format("Unsupported parameters length %s", ptl));
- }
-
- method.setBody(sb.toString());
- }
- });
-
- // FIXME: move this into a template class
- utils.implementMethodsFrom(cls, utils.asCtClass(RpcImplementation.class), new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
- sb.append("{\n");
- sb.append(" throw new java.lang.IllegalStateException(\"No provider is processing supplied message\");\n");
- sb.append(" return ($r) null;\n");
- sb.append("}\n");
-
- method.setBody(sb.toString());
- }
- });
- }
- });
- } catch (CannotCompileException e) {
- throw new IllegalStateException("Failed to create class " + routerName, e);
- }
-
- final Class<?> c;
- try {
- c = targetCls.toClass(serviceType.getClassLoader(), serviceType.getProtectionDomain());
- } catch (CannotCompileException e) {
- throw new IllegalStateException(String.format("Failed to compile class %s", targetCls), e);
- }
-
- try {
- return (T)c.newInstance();
- } catch (InstantiationException | IllegalAccessException e) {
- throw new IllegalStateException(String.format("Failed to instantiate class %s", c), e);
- }
- }
- };
- }
-
- @SuppressWarnings("unchecked")
- @Override
- protected RuntimeGeneratedInvokerPrototype generateListenerInvoker(final Class<? extends NotificationListener> listenerType) {
- final String invokerName = RuntimeCodeSpecification.getInvokerName(listenerType);
- final CtClass targetCls;
-
- // Builder for a set of supported types. Filled while the target class is being generated
- final Builder<Class<? extends Notification>> b = ImmutableSet.builder();
-
- try {
- targetCls = utils.createClass(invokerName, getBrokerNotificationListener(), new ClassGenerator() {
- @Override
- public void process(final CtClass cls) throws CannotCompileException {
- utils.field(cls, RuntimeCodeSpecification.DELEGATE_FIELD, listenerType);
- utils.implementMethodsFrom(cls, getBrokerNotificationListener(), new MethodGenerator() {
- @Override
- public void process(final CtMethod method) throws CannotCompileException {
- final StringBuilder sb = new StringBuilder("\n");
-
- sb.append("{\n");
-
- for (Method m : listenerType.getMethods()) {
- if (BindingReflections.isNotificationCallback(m)) {
- final Class<?> argType = m.getParameterTypes()[0];
-
- // populates builder above
- b.add((Class<? extends Notification>) argType);
-
- sb.append(" if ($1 instanceof ").append(argType.getName()).append(") {\n");
- sb.append(" ").append(RuntimeCodeSpecification.DELEGATE_FIELD).append('.').append(m.getName()).append("((").append(argType.getName()).append(") $1);\n");
- sb.append(" return null;\n");
- sb.append(" } else ");
- }
- }
-
- sb.append(" return null;\n");
- sb.append("}\n");
- method.setBody(sb.toString());
- }
- });
- }
- });
- } catch (CannotCompileException e) {
- throw new IllegalStateException("Failed to create class " + invokerName, e);
- }
-
- final Class<?> finalClass;
- try {
- finalClass = targetCls.toClass(listenerType.getClassLoader(), listenerType.getProtectionDomain());
- } catch (CannotCompileException e) {
- throw new IllegalStateException(String.format("Failed to compile class %s", targetCls), e);
- }
-
- return new RuntimeGeneratedInvokerPrototype(b.build(), (Class<? extends org.opendaylight.controller.sal.binding.api.NotificationListener<?>>) finalClass);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import javassist.CtClass;
-import javassist.CtMethod;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-final class RpcMetadata {
- private final Class<? extends BaseIdentity> context;
- private final CtMethod inputRouteGetter;
- private final Boolean routeEncapsulated;
- private final CtClass inputType;
- private final String methodName;
-
- public Class<? extends BaseIdentity> getContext() {
- return context;
- }
-
- public CtMethod getInputRouteGetter() {
- return inputRouteGetter;
- }
-
- public CtClass getInputType() {
- return inputType;
- }
-
- public boolean isRouteEncapsulated() {
- return routeEncapsulated;
- }
-
- public RpcMetadata(final String methodName, final Class<? extends BaseIdentity> context, final CtMethod inputRouteGetter, final boolean routeEncapsulated, final CtClass inputType) {
- this.inputRouteGetter = Preconditions.checkNotNull(inputRouteGetter);
- this.methodName = Preconditions.checkNotNull(methodName);
- this.inputType = Preconditions.checkNotNull(inputType);
- this.context = Preconditions.checkNotNull(context);
- this.routeEncapsulated = routeEncapsulated;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + methodName.hashCode();
- result = prime * result + context.hashCode();
- result = prime * result + inputRouteGetter.hashCode();
- result = prime * result + routeEncapsulated.hashCode();
- result = prime * result + inputType.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof RpcMetadata)) {
- return false;
- }
- final RpcMetadata other = (RpcMetadata) obj;
- if (!methodName.equals(other.methodName)) {
- return false;
- }
- if (!context.equals(other.context)) {
- return false;
- }
- if (!inputRouteGetter.equals(other.inputRouteGetter)) {
- return false;
- }
- if (!routeEncapsulated.equals(other.routeEncapsulated)) {
- return false;
- }
- return inputType.equals(other.inputType);
- }
-
- @Override
- public String toString() {
- return Objects.toStringHelper(this)
- .add("context", context)
- .add("inputRouteGetter", inputRouteGetter)
- .add("inputType", inputType)
- .add("methodName", methodName)
- .add("routeEncapsulated", routeEncapsulated)
- .toString();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import static org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper.setRoutingTable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChange;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRoutingTable;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class RpcRouterCodegenInstance<T extends RpcService> implements //
-RpcRouter<T>, RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>> {
-
- private static final Logger LOG = LoggerFactory.getLogger(RpcRouterCodegenInstance.class);
-
- private final Class<T> serviceType;
-
- private final T invocationProxy;
-
- private final Set<Class<? extends BaseIdentity>> contexts;
-
- private final ListenerRegistry<RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> listeners;
-
- private final Map<Class<? extends BaseIdentity>, RpcRoutingTableImpl<? extends BaseIdentity, T>> routingTables;
-
- @SuppressWarnings("unchecked")
- public RpcRouterCodegenInstance(final String name,final Class<T> type, final T routerImpl, final Iterable<Class<? extends BaseIdentity>> contexts) {
- this.listeners = ListenerRegistry.create();
- this.serviceType = type;
- this.invocationProxy = routerImpl;
- this.contexts = ImmutableSet.copyOf(contexts);
- Map<Class<? extends BaseIdentity>, RpcRoutingTableImpl<? extends BaseIdentity, T>> mutableRoutingTables = new HashMap<>();
- for (Class<? extends BaseIdentity> ctx : contexts) {
- RpcRoutingTableImpl<? extends BaseIdentity, T> table = new RpcRoutingTableImpl<>(name,ctx,type);
-
- @SuppressWarnings("rawtypes")
- Map invokerView = table.getRoutes();
-
- setRoutingTable(invocationProxy, ctx, invokerView);
- mutableRoutingTables.put(ctx, table);
- table.registerRouteChangeListener(this);
- }
- this.routingTables = ImmutableMap.copyOf(mutableRoutingTables);
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
-
- @Override
- public T getInvocationProxy() {
- return invocationProxy;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public <C extends BaseIdentity> RpcRoutingTable<C, T> getRoutingTable(final Class<C> routeContext) {
- return (RpcRoutingTable<C, T>) routingTables.get(routeContext);
- }
-
- @Override
- public T getDefaultService() {
- return RuntimeCodeHelper.getDelegate(invocationProxy);
- }
-
- @Override
- public Set<Class<? extends BaseIdentity>> getContexts() {
- return contexts;
- }
-
- @Override
- public <L extends RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
- final L listener) {
- return listeners.registerWithType(listener);
- }
-
- @Override
- public void onRouteChange(final RouteChange<Class<? extends BaseIdentity>, InstanceIdentifier<?>> change) {
- for (ListenerRegistration<RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> listener : listeners) {
- try {
- listener.getInstance().onRouteChange(change);
- } catch (Exception e) {
- LOG.error("Error occured during invoker listener {}", listener.getInstance(), e);
- }
- }
- }
-
- @Override
- public T getService(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
- return routingTables.get(context).getRoute(path);
- }
-
- @Override
- public RoutedRpcRegistration<T> addRoutedRpcImplementation(final T service) {
- return new RoutedRpcRegistrationImpl(service);
- }
-
- public void removeDefaultImplementation(final T instance) {
- RpcService current = RuntimeCodeHelper.getDelegate(invocationProxy);
- if(instance == current) {
- RuntimeCodeHelper.setDelegate(invocationProxy, null);
- }
- }
-
- @Override
- public RpcRegistration<T> registerDefaultService(final T service) {
- RuntimeCodeHelper.setDelegate(invocationProxy, service);
- return new DefaultRpcImplementationRegistration(service);
- }
-
- private final class RoutedRpcRegistrationImpl extends AbstractObjectRegistration<T> implements RoutedRpcRegistration<T> {
- /*
- * FIXME: retaining this collection is not completely efficient. We really should be storing
- * a reference to this registration, as a particular listener may be registered multiple
- * times -- and then this goes kaboom in various aspects.
- */
- @GuardedBy("this")
- private final Collection<Class<? extends BaseIdentity>> contexts = new ArrayList<>(1);
-
- public RoutedRpcRegistrationImpl(final T instance) {
- super(instance);
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
-
- @Override
- public synchronized void registerPath(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
- if (isClosed()) {
- LOG.debug("Closed registration of {} ignoring new path {}", getInstance(), path);
- return;
- }
-
- routingTables.get(context).updateRoute(path, getInstance());
- contexts.add(context);
- }
-
- @Override
- public synchronized void unregisterPath(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
- if (isClosed()) {
- LOG.debug("Closed unregistration of {} ignoring new path {}", getInstance(), path);
- return;
- }
-
- routingTables.get(context).removeRoute(path, getInstance());
- contexts.remove(context);
- }
-
- @Deprecated
- @Override
- public void registerInstance(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> instance) {
- registerPath(context, instance);
- }
-
- @Deprecated
- @Override
- public void unregisterInstance(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> instance) {
- unregisterPath(context, instance);
- }
-
- @Override
- protected synchronized void removeRegistration() {
- for (Class<? extends BaseIdentity> ctx : contexts) {
- routingTables.get(ctx).removeAllReferences(getInstance());
- }
- contexts.clear();
- }
- }
-
- private final class DefaultRpcImplementationRegistration extends AbstractObjectRegistration<T> implements RpcRegistration<T> {
-
-
- protected DefaultRpcImplementationRegistration(final T instance) {
- super(instance);
- }
-
- @Override
- protected void removeRegistration() {
- removeDefaultImplementation(this.getInstance());
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
- }
-
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
-import org.opendaylight.controller.md.sal.common.impl.routing.RoutingUtils;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRoutingTable;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.Mutable;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class RpcRoutingTableImpl<C extends BaseIdentity, S extends RpcService> implements
- Mutable, //
- RpcRoutingTable<C, S>, //
- RouteChangePublisher<Class<? extends BaseIdentity>, InstanceIdentifier<?>> {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(RpcRoutingTableImpl.class);
- private final String routerName;
- private final Class<S> serviceType;
-
- private final Class<C> contextType;
- private final ConcurrentMap<InstanceIdentifier<?>, S> routes;
- private final Map<InstanceIdentifier<?>, S> unmodifiableRoutes;
-
- private RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>> listener;
- private S defaultRoute;
-
- public RpcRoutingTableImpl(final String routerName,final Class<C> contextType, final Class<S> serviceType) {
- super();
- this.routerName = routerName;
- this.serviceType = serviceType;
- this.contextType = contextType;
- this.routes = new ConcurrentHashMap<>();
- this.unmodifiableRoutes = Collections.unmodifiableMap(routes);
- }
-
- @Override
- public void setDefaultRoute(final S target) {
- defaultRoute = target;
- }
-
- @Override
- public S getDefaultRoute() {
- return defaultRoute;
- }
-
- @Override
- public <L extends RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
- final L listener) {
- return new SingletonListenerRegistration<L>(listener);
- }
-
- @Override
- public Class<C> getIdentifier() {
- return contextType;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public void updateRoute(final InstanceIdentifier<?> path, final S service) {
- S previous = this.routes.put(path, service);
-
- LOGGER.debug("Route {} updated to {} in routing table {}",path,service,this);
- @SuppressWarnings("rawtypes")
- RouteChangeListener listenerCapture = listener;
- if (previous == null && listenerCapture != null) {
- listenerCapture.onRouteChange(RoutingUtils.announcementChange(contextType, path));
- }
- }
-
-
- @Override
- @SuppressWarnings("unchecked")
- public void removeRoute(final InstanceIdentifier<?> path) {
- S previous = this.routes.remove(path);
- LOGGER.debug("Route {} to {} removed in routing table {}",path,previous,this);
- @SuppressWarnings("rawtypes")
- RouteChangeListener listenerCapture = listener;
- if (previous != null && listenerCapture != null) {
- listenerCapture.onRouteChange(RoutingUtils.removalChange(contextType, path));
- }
- }
-
- void removeRoute(final InstanceIdentifier<?> path, final S service) {
- @SuppressWarnings("rawtypes")
- RouteChangeListener listenerCapture = listener;
- if (routes.remove(path, service) && listenerCapture != null) {
- LOGGER.debug("Route {} to {} removed in routing table {}",path,service,this);
- listenerCapture.onRouteChange(RoutingUtils.removalChange(contextType, path));
- }
- }
-
- @Override
- public S getRoute(final InstanceIdentifier<?> nodeInstance) {
- S route = routes.get(nodeInstance);
- if (route != null) {
- return route;
- }
- return getDefaultRoute();
- }
-
- @Override
- public Map<InstanceIdentifier<?>, S> getRoutes() {
- return unmodifiableRoutes;
- }
-
- void removeAllReferences(final S service) {
- // FIXME: replace this via properly-synchronized BiMap (or something)
- final Iterator<S> it = routes.values().iterator();
- while (it.hasNext()) {
- final S s = it.next();
- if (service.equals(s)) {
- it.remove();
- }
- }
- }
-
- @Override
- public String toString() {
- return "RpcRoutingTableImpl [router=" + routerName + ", service=" + serviceType.getSimpleName() + ", context="
- + contextType.getSimpleName() + "]";
- }
-
- private class SingletonListenerRegistration<L extends RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>>> extends
- AbstractObjectRegistration<L>
- implements ListenerRegistration<L> {
-
- public SingletonListenerRegistration(final L instance) {
- super(instance);
- listener = instance;
- }
-
- @Override
- protected void removeRegistration() {
- listener = null;
- }
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-
-import com.google.common.collect.Iterables;
-
-final class RpcServiceMetadata {
- private final Set<Class<? extends BaseIdentity>> contexts = new HashSet<>();
- private final Map<String, RpcMetadata> rpcMethods = new HashMap<>();
- private final Iterable<Class<? extends BaseIdentity>> roContexts = Iterables.unmodifiableIterable(contexts);
-
- public Iterable<Class<? extends BaseIdentity>> getContexts() {
- return roContexts;
- }
-
- public RpcMetadata getRpcMethod(final String name) {
- return rpcMethods.get(name);
- }
-
- public void addContext(final Class<? extends BaseIdentity> context) {
- contexts.add(context);
- }
-
- public void addRpcMethod(final String name, final RpcMetadata routingPair) {
- rpcMethods.put(name, routingPair);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import java.util.Set;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-final class RuntimeGeneratedInvoker implements NotificationInvoker {
- private final org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> invocationProxy;
- private final RuntimeGeneratedInvokerPrototype prototype;
- private final NotificationListener delegate;
-
- @SuppressWarnings("unchecked")
- private RuntimeGeneratedInvoker(final NotificationListener delegate, final RuntimeGeneratedInvokerPrototype prototype, final org.opendaylight.controller.sal.binding.api.NotificationListener<?> proxy) {
- this.invocationProxy = (org.opendaylight.controller.sal.binding.api.NotificationListener<Notification>) proxy;
- this.delegate = Preconditions.checkNotNull(delegate);
- this.prototype = prototype;
- }
-
- public static RuntimeGeneratedInvoker create(final NotificationListener delegate, final RuntimeGeneratedInvokerPrototype prototype) throws InstantiationException, IllegalAccessException {
- final org.opendaylight.controller.sal.binding.api.NotificationListener<?> proxy = Preconditions.checkNotNull(prototype.getProtoClass().newInstance());
- RuntimeCodeHelper.setDelegate(proxy, delegate);
- return new RuntimeGeneratedInvoker(delegate, prototype, proxy);
- }
-
- @Override
- public NotificationListener getDelegate() {
- return delegate;
- }
-
- @Override
- public org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> getInvocationProxy() {
- return invocationProxy;
- }
-
- @Override
- public Set<Class<? extends Notification>> getSupportedNotifications() {
- return prototype.getSupportedNotifications();
- }
-
- @Override
- public void close() {
- // Nothing to do
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + delegate.hashCode();
- result = prime * result + invocationProxy.hashCode();
- result = prime * result + prototype.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof RuntimeGeneratedInvoker)) {
- return false;
- }
- final RuntimeGeneratedInvoker other = (RuntimeGeneratedInvoker) obj;
- if (!delegate.equals(other.delegate)) {
- return false;
- }
- if (!invocationProxy.equals(other.invocationProxy)) {
- return false;
- }
- return prototype.equals(other.prototype);
- }
-
- @Override
- public String toString() {
- return Objects.toStringHelper(this).toString();
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import java.util.Set;
-
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-
-final class RuntimeGeneratedInvokerPrototype {
- private final Set<Class<? extends Notification>> supportedNotifications;
- private final Class<? extends NotificationListener<?>> protoClass;
-
- public RuntimeGeneratedInvokerPrototype(final Set<Class<? extends Notification>> supportedNotifications, final Class<? extends NotificationListener<?>> protoClass) {
- this.supportedNotifications = Preconditions.checkNotNull(supportedNotifications);
- this.protoClass = Preconditions.checkNotNull(protoClass);
- }
-
- public Set<Class<? extends Notification>> getSupportedNotifications() {
- return supportedNotifications;
- }
-
- public Class<? extends NotificationListener<?>> getProtoClass() {
- return protoClass;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + supportedNotifications.hashCode();
- result = prime * result + protoClass.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof RuntimeGeneratedInvokerPrototype)) {
- return false;
- }
- final RuntimeGeneratedInvokerPrototype other = (RuntimeGeneratedInvokerPrototype) obj;
- if (!protoClass.equals(other.protoClass)) {
- return false;
- }
- return supportedNotifications.equals(other.supportedNotifications);
- }
-
- @Override
- public String toString() {
- return Objects.toStringHelper(this)
- .add("protoClass", protoClass)
- .add("supportedNotifications", supportedNotifications)
- .toString();
- }
-}
import java.util.concurrent.TimeUnit;
import javassist.ClassPool;
import org.apache.commons.lang3.StringUtils;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public static final ClassPool CLASS_POOL = ClassPool.getDefault();
public static final JavassistUtils JAVASSIST = JavassistUtils.forClassPool(CLASS_POOL);
- public static final org.opendaylight.controller.sal.binding.codegen.impl.DefaultRuntimeCodeGenerator RPC_GENERATOR_IMPL = new org.opendaylight.controller.sal.binding.codegen.impl.DefaultRuntimeCodeGenerator(
- CLASS_POOL);
- public static final RuntimeCodeGenerator RPC_GENERATOR = RPC_GENERATOR_IMPL;
- public static final NotificationInvokerFactory INVOKER_FACTORY = RPC_GENERATOR_IMPL.getInvokerFactory();
public static final int CORE_NOTIFICATION_THREADS = 4;
public static final int MAX_NOTIFICATION_THREADS = 32;
if (NOTIFICATION_EXECUTOR == null) {
int queueSize = MAX_NOTIFICATION_QUEUE_SIZE;
- String queueValue = System.getProperty(NOTIFICATION_QUEUE_SIZE_PROPERTY);
+ final String queueValue = System.getProperty(NOTIFICATION_QUEUE_SIZE_PROPERTY);
if (StringUtils.isNotBlank(queueValue)) {
try {
queueSize = Integer.parseInt(queueValue);
logger.trace("Queue size was set to {}", queueSize);
- } catch (NumberFormatException e) {
+ } catch (final NumberFormatException e) {
logger.warn("Cannot parse {} as set by {}, using default {}", queueValue,
NOTIFICATION_QUEUE_SIZE_PROPERTY, queueSize);
}
public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) {
try {
executor.getQueue().put(r);
- } catch (InterruptedException e) {
+ } catch (final InterruptedException e) {
throw new RejectedExecutionException("Interrupted while waiting on the queue", e);
}
}
@Deprecated
public static synchronized ListeningExecutorService getDefaultCommitExecutor() {
if (COMMIT_EXECUTOR == null) {
- ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-commit-%d").build();
+ final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-commit-%d").build();
/*
* FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction
* ordering guarantees, which means that using a concurrent threadpool results
* in inconsistent data being present. Once proper primitives are introduced,
* concurrency can be reintroduced.
*/
- ExecutorService executor = Executors.newSingleThreadExecutor(factory);
+ final ExecutorService executor = Executors.newSingleThreadExecutor(factory);
COMMIT_EXECUTOR = MoreExecutors.listeningDecorator(executor);
}
public static ExecutorService getDefaultChangeEventExecutor() {
if (CHANGE_EVENT_EXECUTOR == null) {
- ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-change-%d").build();
+ final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-change-%d").build();
/*
* FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction
* ordering guarantees, which means that using a concurrent threadpool results
* in inconsistent data being present. Once proper primitives are introduced,
* concurrency can be reintroduced.
*/
- ExecutorService executor = Executors.newSingleThreadExecutor(factory);
+ final ExecutorService executor = Executors.newSingleThreadExecutor(factory);
CHANGE_EVENT_EXECUTOR = MoreExecutors.listeningDecorator(executor);
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataReader;
-import org.opendaylight.controller.md.sal.common.impl.routing.AbstractDataReadRouter;
-import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataBroker;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.util.DataObjectReadingUtil;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
-import com.google.common.collect.Maps;
-
-@Deprecated
-public class DataBrokerImpl extends
- AbstractDataBroker<InstanceIdentifier<? extends DataObject>, DataObject, DataChangeListener> //
- implements DataProviderService, AutoCloseable {
-
- private final static class ContainsWildcarded implements Predicate<InstanceIdentifier<? extends DataObject>> {
-
- private final InstanceIdentifier<? extends DataObject> key;
-
- public ContainsWildcarded(final InstanceIdentifier<? extends DataObject> key) {
- this.key = key;
- }
-
- @Override
- public boolean apply(final InstanceIdentifier<? extends DataObject> input) {
- return key.containsWildcarded(input);
- }
- }
-
- private final static class IsContainedWildcarded implements Predicate<InstanceIdentifier<? extends DataObject>> {
-
- private final InstanceIdentifier<? extends DataObject> key;
-
- public IsContainedWildcarded(final InstanceIdentifier<? extends DataObject> key) {
- this.key = key;
- }
-
- @Override
- public boolean apply(final InstanceIdentifier<? extends DataObject> input) {
- return input.containsWildcarded(key);
- }
- }
-
- private final AtomicLong nextTransaction = new AtomicLong();
- private final AtomicLong createdTransactionsCount = new AtomicLong();
- private final DelegatingDataReadRouter router = new DelegatingDataReadRouter();
- private DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> rootCommitHandler;
-
- public DataBrokerImpl() {
- setDataReadRouter(router);
- }
-
- public void setDataReadDelegate(final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate) {
- router.setDelegate(delegate);
- }
-
- public AtomicLong getCreatedTransactionsCount() {
- return createdTransactionsCount;
- }
-
- @Override
- public DataTransactionImpl beginTransaction() {
- String transactionId = "BA-" + nextTransaction.getAndIncrement();
- createdTransactionsCount.getAndIncrement();
- return new DataTransactionImpl(transactionId, this);
- }
-
- @Override
- public void close() {
-
- }
-
- @Override
- protected Predicate<InstanceIdentifier<? extends DataObject>> createContainsPredicate(
- final InstanceIdentifier<? extends DataObject> key) {
- return new ContainsWildcarded(key);
- }
-
- @Override
- protected Predicate<InstanceIdentifier<? extends DataObject>> createIsContainedPredicate(
- final InstanceIdentifier<? extends DataObject> key) {
- return new IsContainedWildcarded(key);
- }
-
- @SuppressWarnings({ "unchecked", "rawtypes" })
- @Override
- protected Map<InstanceIdentifier<? extends DataObject>, DataObject> deepGetBySubpath(
- final Map<InstanceIdentifier<? extends DataObject>, DataObject> dataSet,
- final InstanceIdentifier<? extends DataObject> path) {
- Builder<InstanceIdentifier<? extends DataObject>, DataObject> builder = ImmutableMap.builder();
- Map<InstanceIdentifier<? extends DataObject>, DataObject> potential = Maps.filterKeys(dataSet,
- createIsContainedPredicate(path));
- for (Entry<InstanceIdentifier<? extends DataObject>, DataObject> entry : potential.entrySet()) {
- try {
- builder.putAll(DataObjectReadingUtil.readData(entry.getValue(), (InstanceIdentifier) entry.getKey(),
- path));
- } catch (Exception e) {
- // FIXME : Log exception;
- }
- }
- return builder.build();
-
- }
-
- public class DelegatingDataReadRouter extends
- AbstractDataReadRouter<InstanceIdentifier<? extends DataObject>, DataObject> {
-
- private DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate;
-
- @Override
- public DataObject readConfigurationData(final InstanceIdentifier<? extends DataObject> path) {
- return delegate.readConfigurationData(path);
- }
-
- public void setDelegate(final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public DataObject readOperationalData(final InstanceIdentifier<? extends DataObject> path) {
- return delegate.readOperationalData(path);
- }
-
- @Override
- protected DataObject merge(final InstanceIdentifier<? extends DataObject> path, final Iterable<DataObject> data) {
- throw new UnsupportedOperationException("Not supported");
- }
-
- @Override
- public Registration registerConfigurationReader(
- final InstanceIdentifier<? extends DataObject> path,
- final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
- throw new UnsupportedOperationException("Not supported");
- }
-
- @Override
- public Registration registerOperationalReader(
- final InstanceIdentifier<? extends DataObject> path,
- final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> reader) {
- throw new UnsupportedOperationException("Not supported");
- }
- }
-
- @Override
- protected ImmutableList<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> affectedCommitHandlers(
- final Set<InstanceIdentifier<? extends DataObject>> paths) {
- ImmutableList.Builder<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> handlersBuilder = ImmutableList.builder();
- return handlersBuilder //
- .add(rootCommitHandler) //
- .addAll(super.affectedCommitHandlers(paths)) //
- .build();
- }
-
- public void setRootCommitHandler(final DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> commitHandler) {
- rootCommitHandler = commitHandler;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
-import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-@Deprecated
-public class DataTransactionImpl extends AbstractDataTransaction<InstanceIdentifier<? extends DataObject>, DataObject>
- implements DataModificationTransaction {
- private final ListenerRegistry<DataTransactionListener> listeners = new ListenerRegistry<DataTransactionListener>();
-
-
-
- public DataTransactionImpl(Object identifier,DataBrokerImpl dataBroker) {
- super(identifier,dataBroker);
- }
-
- @Override
- public ListenerRegistration<DataTransactionListener> registerListener(DataTransactionListener listener) {
- return listeners.register(listener);
- }
-
- @Override
- protected void onStatusChange(TransactionStatus status) {
- for (ListenerRegistration<DataTransactionListener> listenerRegistration : listeners) {
- listenerRegistration.getInstance().onStatusUpdated(this, status);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.util.concurrent.UncheckedExecutionException;
-import java.util.EventListener;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChange;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
-import org.opendaylight.controller.md.sal.common.impl.routing.RoutingUtils;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class RpcProviderRegistryImpl implements RpcProviderRegistry, RouteChangePublisher<RpcContextIdentifier, InstanceIdentifier<?>> {
-
- private RuntimeCodeGenerator rpcFactory = SingletonHolder.RPC_GENERATOR_IMPL;
-
- // cache of proxy objects where each value in the map corresponds to a specific RpcService
- private final LoadingCache<Class<? extends RpcService>, RpcService> publicProxies = CacheBuilder.newBuilder().weakKeys().
- build(new CacheLoader<Class<? extends RpcService>, RpcService>() {
- @Override
- public RpcService load(final Class<? extends RpcService> type) {
- final RpcService proxy = rpcFactory.getDirectProxyFor(type);
- LOG.debug("Created {} as public proxy for {} in {}", proxy, type.getSimpleName(), this);
- return proxy;
- }
- });
-
- private final Cache<Class<? extends RpcService>, RpcRouter<?>> rpcRouters = CacheBuilder.newBuilder().weakKeys()
- .build();
-
- private final ListenerRegistry<RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> routeChangeListeners = ListenerRegistry
- .create();
- private final ListenerRegistry<RouterInstantiationListener> routerInstantiationListener = ListenerRegistry.create();
-
- private final static Logger LOG = LoggerFactory.getLogger(RpcProviderRegistryImpl.class);
-
- private final String name;
-
- private final ListenerRegistry<GlobalRpcRegistrationListener> globalRpcListeners = ListenerRegistry.create();
-
- public String getName() {
- return name;
- }
-
- public RpcProviderRegistryImpl(final String name) {
- super();
- this.name = name;
- }
-
- @Override
- public final <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(final Class<T> type,
- final T implementation) throws IllegalStateException {
- return getRpcRouter(type).addRoutedRpcImplementation(implementation);
- }
-
- @Override
- public final <T extends RpcService> RpcRegistration<T> addRpcImplementation(final Class<T> type, final T implementation) {
-
- // FIXME: This should be well documented - addRpcImplementation for
- // routed RPCs
- try {
- // Note: If RPC is really global, expected count of registrations
- // of this method is really low.
- RpcRouter<T> potentialRouter = getRpcRouter(type);
- checkState(potentialRouter.getDefaultService() == null,
- "Default service for routed RPC already registered.");
- return potentialRouter.registerDefaultService(implementation);
- } catch (RpcIsNotRoutedException e) {
- // NOOP - we could safely continue, since RPC is not routed
- // so we fallback to global routing.
- LOG.debug("RPC is not routed. Using global registration.",e);
- }
- T publicProxy = getRpcService(type);
- RpcService currentDelegate = RuntimeCodeHelper.getDelegate(publicProxy);
- checkState(currentDelegate == null, "Rpc service is already registered");
- LOG.debug("Registering {} as global implementation of {} in {}", implementation, type.getSimpleName(), this);
- RuntimeCodeHelper.setDelegate(publicProxy, implementation);
- notifyGlobalRpcAdded(type);
- return new RpcProxyRegistration<T>(type, implementation, this);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public final <T extends RpcService> T getRpcService(final Class<T> type) {
- return (T) publicProxies.getUnchecked(type);
- }
-
-
- public <T extends RpcService> RpcRouter<T> getRpcRouter(final Class<T> type) {
- try {
- final AtomicBoolean created = new AtomicBoolean(false);
- @SuppressWarnings( "unchecked")
- // LoadingCache is unsuitable for RpcRouter since we need to distinguish
- // first creation of RPC Router, so that is why
- // we are using normal cache with load API and shared AtomicBoolean
- // for this call, which will be set to true if router was created.
- RpcRouter<T> router = (RpcRouter<T>) rpcRouters.get(type,new Callable<RpcRouter<?>>() {
-
- @Override
- public org.opendaylight.controller.sal.binding.api.rpc.RpcRouter<?> call() {
- RpcRouter<?> router = rpcFactory.getRouterFor(type, name);
- router.registerRouteChangeListener(new RouteChangeForwarder<T>(type));
- LOG.debug("Registering router {} as global implementation of {} in {}", router, type.getSimpleName(), this);
- RuntimeCodeHelper.setDelegate(getRpcService(type), router.getInvocationProxy());
- created.set(true);
- return router;
- }
- });
- if(created.get()) {
- notifyListenersRoutedCreated(router);
- }
- return router;
- } catch (ExecutionException | UncheckedExecutionException e) {
- // We rethrow Runtime Exceptions which were wrapped by
- // Execution Exceptions
- // otherwise we throw IllegalStateException with original
- Throwables.propagateIfPossible(e.getCause());
- throw new IllegalStateException("Could not load RPC Router for "+type.getName(),e);
- }
- }
-
- private void notifyGlobalRpcAdded(final Class<? extends RpcService> type) {
- for(ListenerRegistration<GlobalRpcRegistrationListener> listener : globalRpcListeners) {
- try {
- listener.getInstance().onGlobalRpcRegistered(type);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {}", e);
- }
- }
-
- }
-
- private void notifyListenersRoutedCreated(final RpcRouter<?> router) {
-
- for (ListenerRegistration<RouterInstantiationListener> listener : routerInstantiationListener) {
- try {
- listener.getInstance().onRpcRouterCreated(router);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {}", e);
- }
- }
-
- }
-
- public ListenerRegistration<RouterInstantiationListener> registerRouterInstantiationListener(
- final RouterInstantiationListener listener) {
- ListenerRegistration<RouterInstantiationListener> reg = routerInstantiationListener.register(listener);
- try {
- for (RpcRouter<?> router : rpcRouters.asMap().values()) {
- listener.onRpcRouterCreated(router);
- }
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {}", e);
- }
- return reg;
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
- final L listener) {
- return (ListenerRegistration<L>) routeChangeListeners.register(listener);
- }
-
- public RuntimeCodeGenerator getRpcFactory() {
- return rpcFactory;
- }
-
- public void setRpcFactory(final RuntimeCodeGenerator rpcFactory) {
- this.rpcFactory = rpcFactory;
- }
-
- public interface RouterInstantiationListener extends EventListener {
- void onRpcRouterCreated(RpcRouter<?> router);
- }
-
- public ListenerRegistration<GlobalRpcRegistrationListener> registerGlobalRpcRegistrationListener(final GlobalRpcRegistrationListener listener) {
- return globalRpcListeners.register(listener);
- }
-
- public interface GlobalRpcRegistrationListener extends EventListener {
- void onGlobalRpcRegistered(Class<? extends RpcService> cls);
- void onGlobalRpcUnregistered(Class<? extends RpcService> cls);
-
- }
-
- private final class RouteChangeForwarder<T extends RpcService> implements RouteChangeListener<Class<? extends BaseIdentity>, InstanceIdentifier<?>> {
- private final Class<T> type;
-
- RouteChangeForwarder(final Class<T> type) {
- this.type = type;
- }
-
- @Override
- public void onRouteChange(final RouteChange<Class<? extends BaseIdentity>, InstanceIdentifier<?>> change) {
- Map<RpcContextIdentifier, Set<InstanceIdentifier<?>>> announcements = new HashMap<>();
- for (Entry<Class<? extends BaseIdentity>, Set<InstanceIdentifier<?>>> entry : change.getAnnouncements()
- .entrySet()) {
- RpcContextIdentifier key = RpcContextIdentifier.contextFor(type, entry.getKey());
- announcements.put(key, entry.getValue());
- }
- Map<RpcContextIdentifier, Set<InstanceIdentifier<?>>> removals = new HashMap<>();
- for (Entry<Class<? extends BaseIdentity>, Set<InstanceIdentifier<?>>> entry : change.getRemovals()
- .entrySet()) {
- RpcContextIdentifier key = RpcContextIdentifier.contextFor(type, entry.getKey());
- removals.put(key, entry.getValue());
- }
- RouteChange<RpcContextIdentifier, InstanceIdentifier<?>> toPublish = RoutingUtils
- .<RpcContextIdentifier, InstanceIdentifier<?>> change(announcements, removals);
- for (ListenerRegistration<RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> listener : routeChangeListeners) {
- try {
- listener.getInstance().onRouteChange(toPublish);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener",listener.getInstance(),e);
- }
- }
- }
- }
-
- private static final class RpcProxyRegistration<T extends RpcService> extends AbstractObjectRegistration<T> implements RpcRegistration<T> {
- private final RpcProviderRegistryImpl registry;
- private final Class<T> serviceType;
-
- RpcProxyRegistration(final Class<T> type, final T service, final RpcProviderRegistryImpl registry) {
- super(service);
- this.registry = Preconditions.checkNotNull(registry);
- this.serviceType = type;
- }
-
- @Override
- public Class<T> getServiceType() {
- return serviceType;
- }
-
- @Override
- protected void removeRegistration() {
- T publicProxy = registry.getRpcService(serviceType);
- RpcService currentDelegate = RuntimeCodeHelper.getDelegate(publicProxy);
- if (currentDelegate == getInstance()) {
- RuntimeCodeHelper.setDelegate(publicProxy, null);
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.binding.impl;
-
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.ServiceRegistration;
-
-@SuppressWarnings("all")
-public class RpcProxyContext {
- public RpcProxyContext(final Class<? extends RpcService> proxyClass) {
- this.proxyClass = proxyClass;
- }
-
- protected final Class<? extends RpcService> proxyClass;
-
- protected RpcService _proxy;
-
- public RpcService getProxy() {
- return this._proxy;
- }
-
- public void setProxy(final RpcService proxy) {
- this._proxy = proxy;
- }
-
- protected ServiceRegistration<? extends RpcService> _registration;
-
- public ServiceRegistration<? extends RpcService> getRegistration() {
- return this._registration;
- }
-
- public void setRegistration(final ServiceRegistration<? extends RpcService> registration) {
- this._registration = registration;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl.util;
-
-import org.opendaylight.controller.md.sal.common.impl.routing.AbstractDataReadRouter;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class BindingAwareDataReaderRouter extends AbstractDataReadRouter<InstanceIdentifier<? extends DataObject>,DataObject> {
- @Override
- protected DataObject merge(final InstanceIdentifier<? extends DataObject> path, final Iterable<DataObject> data) {
- return data.iterator().next();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.spi;
-
-import java.util.Set;
-
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-public interface NotificationInvokerFactory {
-
- NotificationInvoker invokerFor(org.opendaylight.yangtools.yang.binding.NotificationListener instance);
-
- public interface NotificationInvoker {
-
- Set<Class<? extends Notification>> getSupportedNotifications();
-
- NotificationListener<Notification> getInvocationProxy();
-
- public abstract void close();
-
- org.opendaylight.yangtools.yang.binding.NotificationListener getDelegate();
-
- }
-}
base config:module-type;
config:provided-service binding-dom-mapping-service;
config:provided-service sal:binding-codec-tree-factory;
+ config:provided-service sal:binding-normalized-node-serializer;
config:java-name-prefix RuntimeMapping;
}
}
}
+ augment "/config:modules/config:module/config:configuration" {
+ case binding-notification-broker {
+ when "/config:modules/config:module/config:type = 'binding-notification-broker'";
+ container notification-adapter {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity binding-new-notification-service;
+ }
+ }
+ }
+
+ container notification-publish-adapter {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity binding-new-notification-publish-service;
+ }
+ }
+ }
+ }
+ }
+
augment "/config:modules/config:module/config:state" {
case binding-notification-broker {
when "/config:modules/config:module/config:type = 'binding-notification-broker'";
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
import com.google.common.collect.ImmutableList;
final CountDownLatch latch = new CountDownLatch(1);
final TwoLevelListChanged testData = createTestData();
- final NotifTestListener testNotifListener = new NotifTestListener(latch);
+ final NotifTestListenerChild testNotifListener = new NotifTestListenerChild(latch);
final ListenerRegistration<NotificationListener> listenerRegistration =
notificationProviderService.registerNotificationListener(testNotifListener);
notificationProviderService.publish(testData);
- latch.await();
+ latch.await(500L, TimeUnit.MILLISECONDS);
assertTrue(testNotifListener.getReceivedNotifications().size() == 1);
assertEquals(testData, testNotifListener.getReceivedNotifications().get(0));
listenerRegistration.close();
}
+ private static class NotifTestListenerChild extends NotifTestListener {
+
+ public NotifTestListenerChild(final CountDownLatch latch) {
+ super(latch);
+ }
+ }
private static class NotifTestListener implements OpendaylightMdsalListTestListener {
private List<TwoLevelListChanged> receivedNotifications = new ArrayList<>();
private CountDownLatch latch;
--- /dev/null
+package org.opendaylight.controller.md.sal.binding.impl.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.USES_ONE_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.complexUsesAugment;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.SettableFuture;
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
+import org.opendaylight.controller.md.sal.binding.test.AbstractDataBrokerTest;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+
+public class DataTreeChangeListenerTest extends AbstractDataBrokerTest {
+
+ private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
+ private static final PathArgument TOP_ARGUMENT= TOP_PATH.getPathArguments().iterator().next();
+ private static final InstanceIdentifier<TopLevelList> FOO_PATH = path(TOP_FOO_KEY);
+ private static final PathArgument FOO_ARGUMENT = Iterables.getLast(FOO_PATH.getPathArguments());
+ private static final TopLevelList FOO_DATA = topLevelList(TOP_FOO_KEY, complexUsesAugment(USES_ONE_KEY));
+ private static final InstanceIdentifier<TopLevelList> BAR_PATH = path(TOP_BAR_KEY);
+ private static final PathArgument BAR_ARGUMENT = Iterables.getLast(BAR_PATH.getPathArguments());
+ private static final TopLevelList BAR_DATA = topLevelList(TOP_BAR_KEY);
+private static final DataTreeIdentifier<Top> TOP_IDENTIFIER = new DataTreeIdentifier<Top>(LogicalDatastoreType.OPERATIONAL,
+ TOP_PATH);
+
+ private static final Top TOP_INITIAL_DATA = top(FOO_DATA);
+
+ private BindingDOMDataBrokerAdapter dataBrokerImpl;
+
+ private static final class EventCapturingListener<T extends DataObject> implements DataTreeChangeListener<T> {
+
+ private SettableFuture<Collection<DataTreeModification<T>>> changes = SettableFuture.create();
+
+ @Override
+ public void onDataTreeChanged(final Collection<DataTreeModification<T>> changes) {
+ this.changes.set(changes);
+
+ }
+
+ Collection<DataTreeModification<T>> nextEvent() throws Exception {
+ final Collection<DataTreeModification<T>> result = changes.get(200,TimeUnit.MILLISECONDS);
+ changes = SettableFuture.create();
+ return result;
+ }
+
+ }
+
+ @Override
+ protected Iterable<YangModuleInfo> getModuleInfos() throws Exception {
+ return ImmutableSet.of(
+ BindingReflections.getModuleInfo(TwoLevelList.class),
+ BindingReflections.getModuleInfo(TreeComplexUsesAugment.class)
+ );
+ }
+
+ @Override
+ protected void setupWithDataBroker(final DataBroker dataBroker) {
+ dataBrokerImpl = (BindingDOMDataBrokerAdapter) dataBroker;
+ }
+
+ @Test
+ public void testTopLevelListener() throws Exception {
+ final EventCapturingListener<Top> listener = new EventCapturingListener<>();
+ dataBrokerImpl.registerDataTreeChangeListener(TOP_IDENTIFIER, listener);
+
+ createAndVerifyTop(listener);
+
+ putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
+ final DataObjectModification<Top> afterBarPutEvent = Iterables.getOnlyElement(listener.nextEvent()).getRootNode();
+ verifyModification(afterBarPutEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
+ final DataObjectModification<TopLevelList> barPutMod = afterBarPutEvent.getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
+ assertNotNull(barPutMod);
+ verifyModification(barPutMod, BAR_ARGUMENT, ModificationType.WRITE);
+
+ deleteTx(BAR_PATH).submit().checkedGet();
+ final DataObjectModification<Top> afterBarDeleteEvent = Iterables.getOnlyElement(listener.nextEvent()).getRootNode();
+ verifyModification(afterBarDeleteEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
+ final DataObjectModification<TopLevelList> barDeleteMod = afterBarDeleteEvent.getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
+ verifyModification(barDeleteMod, BAR_ARGUMENT, ModificationType.DELETE);
+ }
+
+ @Test
+ public void testWildcardedListListener() throws Exception {
+ final EventCapturingListener<TopLevelList> listener = new EventCapturingListener<>();
+ final DataTreeIdentifier<TopLevelList> wildcard = new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, TOP_PATH.child(TopLevelList.class));
+ dataBrokerImpl.registerDataTreeChangeListener(wildcard, listener);
+
+ putTx(TOP_PATH, TOP_INITIAL_DATA).submit().checkedGet();
+
+ final DataTreeModification<TopLevelList> fooWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
+ assertEquals(FOO_PATH, fooWriteEvent.getRootPath().getRootIdentifier());
+ verifyModification(fooWriteEvent.getRootNode(), FOO_ARGUMENT, ModificationType.WRITE);
+
+ putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
+ final DataTreeModification<TopLevelList> barWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
+ assertEquals(BAR_PATH, barWriteEvent.getRootPath().getRootIdentifier());
+ verifyModification(barWriteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.WRITE);
+
+ deleteTx(BAR_PATH).submit().checkedGet();
+ final DataTreeModification<TopLevelList> barDeleteEvent = Iterables.getOnlyElement(listener.nextEvent());
+ assertEquals(BAR_PATH, barDeleteEvent.getRootPath().getRootIdentifier());
+ verifyModification(barDeleteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.DELETE);
+ }
+
+
+
+ private void createAndVerifyTop(final EventCapturingListener<Top> listener) throws Exception {
+ putTx(TOP_PATH,TOP_INITIAL_DATA).submit().checkedGet();
+ final Collection<DataTreeModification<Top>> events = listener.nextEvent();
+
+ assertFalse("Non empty collection should be received.",events.isEmpty());
+ final DataTreeModification<Top> initialWrite = Iterables.getOnlyElement(events);
+ final DataObjectModification<? extends DataObject> initialNode = initialWrite.getRootNode();
+ verifyModification(initialNode,TOP_PATH.getPathArguments().iterator().next(),ModificationType.WRITE);
+ assertEquals(TOP_INITIAL_DATA, initialNode.getDataAfter());
+ }
+
+ private void verifyModification(final DataObjectModification<? extends DataObject> barWrite, final PathArgument pathArg,
+ final ModificationType eventType) {
+ assertEquals(pathArg.getType(), barWrite.getDataType());
+ assertEquals(eventType,barWrite.getModificationType());
+ assertEquals(pathArg, barWrite.getIdentifier());
+ }
+
+ private <T extends DataObject> WriteTransaction putTx(final InstanceIdentifier<T> path,final T data) {
+ final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.OPERATIONAL, path, data);
+ return tx;
+ }
+
+ private WriteTransaction deleteTx(final InstanceIdentifier<?> path) {
+ final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
+ tx.delete(LogicalDatastoreType.OPERATIONAL, path);
+ return tx;
+ }
+}
+++ /dev/null
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-
-import com.google.common.base.Throwables;
-import java.util.Arrays;
-import javassist.ClassPool;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumRpcProviderRegistry;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.binding.test.AbstractSchemaAwareTest;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMRpcRouter;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.OpendaylightTestRpcServiceService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.TestContext;
-import org.opendaylight.yangtools.binding.data.codec.gen.impl.DataObjectSerializerGenerator;
-import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-
-public class RpcProviderRegistryTest extends AbstractSchemaAwareTest {
-
- private static InstanceIdentifier<TopLevelList> FOO_PATH = path(TOP_FOO_KEY);
- private static InstanceIdentifier<TopLevelList> BAR_PATH = path(TOP_BAR_KEY);
- private static RpcContextIdentifier ROUTING_CONTEXT = RpcContextIdentifier.contextFor(OpendaylightTestRoutedRpcService.class, TestContext.class);
-
- private RpcProviderRegistry rpcRegistry;
-
-
- @Override
- protected Iterable<YangModuleInfo> getModuleInfos() {
- try {
- return Arrays.asList(
- BindingReflections.getModuleInfo(TopLevelList.class),
- BindingReflections.getModuleInfo(OpendaylightTestRoutedRpcService.class),
- BindingReflections.getModuleInfo(OpendaylightTestRpcServiceService.class));
- } catch (final Exception e) {
- throw Throwables.propagate(e);
- }
- }
-
- @Override
- protected void setupWithSchema(final SchemaContext context) {
- final DataObjectSerializerGenerator generator = StreamWriterGenerator.create(JavassistUtils.forClassPool(ClassPool.getDefault()));
- final BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
- final GeneratedClassLoadingStrategy classLoadingStrategy = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
- final BindingToNormalizedNodeCodec codec = new BindingToNormalizedNodeCodec(classLoadingStrategy, codecRegistry);
- final DOMRpcRouter domRpcRegistry = new DOMRpcRouter();
- domRpcRegistry.onGlobalContextUpdated(context);
- codec.onGlobalContextUpdated(context);
- final RpcConsumerRegistry consumer = new BindingDOMRpcServiceAdapter(domRpcRegistry, codec);
- final BindingDOMRpcProviderServiceAdapter provider = new BindingDOMRpcProviderServiceAdapter( domRpcRegistry,codec);
- rpcRegistry = new HeliumRpcProviderRegistry(consumer,provider);
- }
-
- @Test
- public void testGlobalRpcRegistrations() throws Exception {
- final OpendaylightTestRpcServiceService one = Mockito.mock(OpendaylightTestRpcServiceService.class);
- final OpendaylightTestRpcServiceService two = Mockito.mock(OpendaylightTestRpcServiceService.class);
-
- final RpcRegistration<OpendaylightTestRpcServiceService> regOne = rpcRegistry.addRpcImplementation(OpendaylightTestRpcServiceService.class, one);
- assertNotNull(regOne);
- rpcRegistry.addRpcImplementation(OpendaylightTestRpcServiceService.class, two);
- regOne.close();
- final RpcRegistration<OpendaylightTestRpcServiceService> regTwo = rpcRegistry.addRpcImplementation(OpendaylightTestRpcServiceService.class, two);
- assertNotNull(regTwo);
- }
-
-
- @Test
- @Ignore
- public void nonRoutedRegisteredAsRouted() {
- final OpendaylightTestRpcServiceService one = Mockito.mock(OpendaylightTestRpcServiceService.class);
- try {
- final RoutedRpcRegistration<OpendaylightTestRpcServiceService> reg = rpcRegistry.addRoutedRpcImplementation(OpendaylightTestRpcServiceService.class, one);
- reg.registerPath(null, BAR_PATH);
- fail("RpcIsNotRoutedException should be thrown");
- } catch (final RpcIsNotRoutedException e) {
- assertNotNull(e.getMessage());
- } catch (final Exception e) {
- fail("RpcIsNotRoutedException should be thrown");
- }
-
- }
-
-}
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
import org.opendaylight.controller.sal.binding.test.util.MockSchemaService;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
}
public NotificationService createNotificationService() {
- return new BindingDOMNotificationServiceAdapter(bindingToNormalized.getCodecRegistry(), domNotificationRouter,
- SingletonHolder.INVOKER_FACTORY);
+ return new BindingDOMNotificationServiceAdapter(bindingToNormalized.getCodecRegistry(), domNotificationRouter);
}
public NotificationPublishService createNotificationPublishService() {
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import java.util.ArrayList;
-import java.util.List;
-import javassist.ClassPool;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcRoutingTable;
-import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
-import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
-import org.opendaylight.controller.sal.binding.test.mock.BarListener;
-import org.opendaylight.controller.sal.binding.test.mock.BarUpdate;
-import org.opendaylight.controller.sal.binding.test.mock.FlowDelete;
-import org.opendaylight.controller.sal.binding.test.mock.FooListener;
-import org.opendaylight.controller.sal.binding.test.mock.FooService;
-import org.opendaylight.controller.sal.binding.test.mock.FooUpdate;
-import org.opendaylight.controller.sal.binding.test.mock.ReferencableObject;
-import org.opendaylight.controller.sal.binding.test.mock.ReferencableObjectKey;
-import org.opendaylight.controller.sal.binding.test.mock.SimpleInput;
-import org.opendaylight.yangtools.yang.binding.Augmentation;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.DataContainer;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class DefaultRuntimeCodeGeneratorTest {
-
- private RuntimeCodeGenerator codeGenerator;
- private NotificationInvokerFactory invokerFactory;
-
- @Before
- public void initialize() {
- this.codeGenerator = new DefaultRuntimeCodeGenerator(ClassPool.getDefault());
- this.invokerFactory = codeGenerator.getInvokerFactory();
- }
-
- @Test
- public void testGenerateDirectProxy() {
- FooService product = codeGenerator.getDirectProxyFor(FooService.class);
- assertNotNull(product);
- }
-
- @Test
- public void testGenerateRouter() throws Exception {
- RpcRouter<FooService> product = codeGenerator.getRouterFor(FooService.class,"test");
- assertNotNull(product);
- assertNotNull(product.getInvocationProxy());
-
- assertEquals("2 fields should be generated.", 2, product.getInvocationProxy().getClass().getFields().length);
-
- verifyRouting(product);
- }
-
- @Test
- public void testInvoker() throws Exception {
-
- FooListenerImpl fooListener = new FooListenerImpl();
-
- NotificationInvoker invokerFoo = invokerFactory.invokerFor(fooListener);
-
-
- assertSame(fooListener,invokerFoo.getDelegate());
- assertNotNull(invokerFoo.getSupportedNotifications());
- assertEquals(1, invokerFoo.getSupportedNotifications().size());
- assertNotNull(invokerFoo.getInvocationProxy());
-
- FooUpdateImpl fooOne = new FooUpdateImpl();
- invokerFoo.getInvocationProxy().onNotification(fooOne);
-
- assertEquals(1, fooListener.receivedFoos.size());
- assertSame(fooOne, fooListener.receivedFoos.get(0));
-
- CompositeListenerImpl composite = new CompositeListenerImpl();
-
- NotificationInvoker invokerComposite = invokerFactory.invokerFor(composite);
-
- assertNotNull(invokerComposite.getSupportedNotifications());
- assertEquals(3, invokerComposite.getSupportedNotifications().size());
- assertNotNull(invokerComposite.getInvocationProxy());
-
- invokerComposite.getInvocationProxy().onNotification(fooOne);
-
- assertEquals(1, composite.receivedFoos.size());
- assertSame(fooOne, composite.receivedFoos.get(0));
-
- assertEquals(0, composite.receivedBars.size());
-
- BarUpdateImpl barOne = new BarUpdateImpl();
-
- invokerComposite.getInvocationProxy().onNotification(barOne);
-
- assertEquals(1, composite.receivedFoos.size());
- assertEquals(1, composite.receivedBars.size());
- assertSame(barOne, composite.receivedBars.get(0));
-
- }
-
- private void verifyRouting(final RpcRouter<FooService> product) {
- assertNotNull("Routing table should be initialized", product.getRoutingTable(BaseIdentity.class));
-
- RpcRoutingTable<BaseIdentity, FooService> routingTable = product.getRoutingTable(BaseIdentity.class);
-
- int servicesCount = 2;
- int instancesPerService = 3;
-
- InstanceIdentifier<?>[][] identifiers = identifiers(servicesCount, instancesPerService);
- FooService service[] = new FooService[] { mock(FooService.class, "Instance 0"),
- mock(FooService.class, "Instance 1") };
-
- for (int i = 0; i < service.length; i++) {
- for (InstanceIdentifier<?> instance : identifiers[i]) {
- routingTable.updateRoute(instance, service[i]);
- }
- }
-
- assertEquals("All instances should be registered.", servicesCount * instancesPerService, routingTable
- .getRoutes().size());
-
- SimpleInput[] instance_0_input = new SimpleInputImpl[] { new SimpleInputImpl(identifiers[0][0]),
- new SimpleInputImpl(identifiers[0][1]), new SimpleInputImpl(identifiers[0][2]) };
-
- SimpleInput[] instance_1_input = new SimpleInputImpl[] { new SimpleInputImpl(identifiers[1][0]),
- new SimpleInputImpl(identifiers[1][1]), new SimpleInputImpl(identifiers[1][2]) };
-
- // We test sending mock messages
-
- product.getInvocationProxy().simple(instance_0_input[0]);
- verify(service[0]).simple(instance_0_input[0]);
-
- product.getInvocationProxy().simple(instance_0_input[1]);
- product.getInvocationProxy().simple(instance_0_input[2]);
-
- verify(service[0]).simple(instance_0_input[1]);
- verify(service[0]).simple(instance_0_input[2]);
-
- product.getInvocationProxy().simple(instance_1_input[0]);
-
- // We should have call to instance 1
- verify(service[1]).simple(instance_1_input[0]);
-
- /*
- * Generated RPC service should throw illegalArgumentException
- * with message if rpc input is null.
- */
- try {
- product.getInvocationProxy().simple(null);
- fail("Generated RPC router should throw IllegalArgumentException on null input");
- } catch (IllegalArgumentException e){
- assertNotNull(e.getMessage());
- }
-
-
- /*
- * Generated RPC service should throw illegalArgumentException
- * with message if rpc route is null.
- */
- try {
- SimpleInput withoutValue = new SimpleInputImpl(null);
- product.getInvocationProxy().simple(withoutValue);
- fail("Generated RPC router should throw IllegalArgumentException on null value for route");
- } catch (IllegalArgumentException e){
- assertNotNull(e.getMessage());
- }
-
- }
-
- private InstanceIdentifier<?>[][] identifiers(final int serviceSize, final int instancesPerService) {
- InstanceIdentifier<?>[][] ret = new InstanceIdentifier[serviceSize][];
- int service = 0;
- for (int i = 0; i < serviceSize; i++) {
-
- InstanceIdentifier<?>[] instanceIdentifiers = new InstanceIdentifier[instancesPerService];
- ret[i] = instanceIdentifiers;
- for (int id = 0; id < instancesPerService; id++) {
- instanceIdentifiers[id] = referencableIdentifier(service * instancesPerService + id);
- }
- service++;
- }
-
- return ret;
- }
-
- private InstanceIdentifier<?> referencableIdentifier(final int i) {
- return InstanceIdentifier.builder(ReferencableObject.class, new ReferencableObjectKey(i)).build();
- }
-
- private static class SimpleInputImpl implements SimpleInput {
- private final InstanceIdentifier<?> identifier;
-
- public SimpleInputImpl(final InstanceIdentifier<?> _identifier) {
- this.identifier = _identifier;
- }
-
- @Override
- public <E extends Augmentation<SimpleInput>> E getAugmentation(final Class<E> augmentationType) {
- return null;
- }
-
- @Override
- public InstanceIdentifier<?> getIdentifier() {
- return this.identifier;
- }
-
- @Override
- public Class<? extends DataObject> getImplementedInterface() {
- return SimpleInput.class;
- }
- }
-
- private static class FooUpdateImpl implements FooUpdate {
- @Override
- public Class<? extends DataContainer> getImplementedInterface() {
- return FooUpdate.class;
- }
- }
-
- private static class BarUpdateImpl implements BarUpdate {
- @Override
- public Class<? extends DataContainer> getImplementedInterface() {
- return BarUpdate.class;
- }
-
- @Override
- public InstanceIdentifier<?> getInheritedIdentifier() {
- return null;
- }
- }
-
- private static class FooListenerImpl implements FooListener {
-
- List<FooUpdate> receivedFoos = new ArrayList<>();
-
- @Override
- public void onFooUpdate(final FooUpdate notification) {
- receivedFoos.add(notification);
- }
-
- }
-
- private static class CompositeListenerImpl extends FooListenerImpl implements BarListener {
-
- List<BarUpdate> receivedBars = new ArrayList<>();
- List<FlowDelete> receivedDeletes = new ArrayList<>();
-
- @Override
- public void onBarUpdate(final BarUpdate notification) {
- receivedBars.add(notification);
- }
-
- @Override
- public void onFlowDelete(final FlowDelete notification) {
- receivedDeletes.add(notification);
- }
-
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.binding.codegen.impl;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import java.lang.reflect.Field;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Ignore
-public class SingletonHolderTest {
- private static final Logger logger = LoggerFactory.getLogger(SingletonHolderTest.class);
-
- @Test
- public void testNotificationExecutor() throws Exception {
- ListeningExecutorService executor = SingletonHolder.getDefaultNotificationExecutor();
- ThreadPoolExecutor tpExecutor = (ThreadPoolExecutor) setAccessible(executor.getClass().getDeclaredField("delegate")).get(executor);
- BlockingQueue<Runnable> queue = tpExecutor.getQueue();
-
- for (int idx = 0; idx < 100; idx++) {
- final int idx2 = idx;
- logger.info("Adding {}\t{}\t{}", idx, queue.size(), tpExecutor.getActiveCount());
- executor.execute(new Runnable() {
-
- @Override
- public void run() {
- logger.info("in {}", idx2);
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- logger.info("out {}", idx2);
- }
- });
- }
- executor.shutdown();
- executor.awaitTermination(10, TimeUnit.SECONDS);
- }
-
- private static Field setAccessible(Field field) {
- field.setAccessible(true);
- return field;
- }
-}
import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
-@SuppressWarnings("deprecation")
public abstract class AbstractDataServiceTest {
protected DataProviderService baDataService;
import javassist.ClassPool;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.MountPointService;
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.controller.md.sal.binding.api.NotificationService;
+import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
import org.opendaylight.controller.md.sal.binding.compat.HeliumRpcProviderRegistry;
import org.opendaylight.controller.md.sal.binding.compat.HydrogenDataBrokerAdapter;
import org.opendaylight.controller.md.sal.binding.compat.HydrogenMountProvisionServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMMountPointServiceAdapter;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationPublishServiceAdapter;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcServiceAdapter;
import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMRpcRouter;
import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.broker.impl.mount.DOMMountPointServiceImpl;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.controller.sal.binding.api.mount.MountProviderService;
-import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
import org.opendaylight.controller.sal.binding.impl.RootBindingAwareBroker;
import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
import org.opendaylight.controller.sal.core.api.BrokerService;
private RootBindingAwareBroker baBrokerImpl;
- private NotificationBrokerImpl baNotifyImpl;
+ private HeliumNotificationProviderServiceAdapter baNotifyImpl;
private BrokerImpl biBrokerImpl;
private BindingDOMRpcProviderServiceAdapter baProviderRpc;
private DOMRpcRouter domRouter;
+ private NotificationPublishService publishService;
+
+ private NotificationService listenService;
+
+ private DOMNotificationPublishService domPublishService;
+
+ private DOMNotificationService domListenService;
+
public DOMDataBroker getDomAsyncDataBroker() {
public void startBindingNotificationBroker() {
checkState(executor != null);
- baNotifyImpl = new NotificationBrokerImpl(executor);
+ final DOMNotificationRouter router = DOMNotificationRouter.create(16);
+ domPublishService = router;
+ domListenService = router;
+ publishService = new BindingDOMNotificationPublishServiceAdapter(codec, domPublishService);
+ listenService = new BindingDOMNotificationServiceAdapter(codec, domListenService);
+ baNotifyImpl = new HeliumNotificationProviderServiceAdapter(publishService,listenService);
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-config</artifactId>
<packaging>bundle</packaging>
config:java-class "org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeFactory";
}
+ identity binding-normalized-node-serializer {
+ base "config:service-type";
+ config:java-class "org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer";
+ }
+
identity binding-notification-subscription-service {
base "config:service-type";
config:java-class "org.opendaylight.controller.sal.binding.api.NotificationService";
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-dom-it</artifactId>
<packaging>jar</packaging>
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
* FIXME: THis test should be moved to sal-binding-broker and rewriten
* to use new DataBroker API
*/
-@SuppressWarnings("deprecation")
public class ConcurrentImplicitCreateTest extends AbstractDataServiceTest {
private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
private static final TopLevelListKey BAR_KEY = new TopLevelListKey("bar");
- private static InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
- private static InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, FOO_KEY);
- private static InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, BAR_KEY);
+ private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
+ private static final InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, FOO_KEY);
+ private static final InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, BAR_KEY);
@Test
public void testConcurrentCreate() throws InterruptedException, ExecutionException {
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-
+import com.google.common.util.concurrent.SettableFuture;
import java.util.Collections;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import com.google.common.util.concurrent.SettableFuture;
-
/**
* FIXME: THis test should be moved to compat test-suite
*/
-@SuppressWarnings("deprecation")
public class WildcardedDataChangeListenerTest extends AbstractDataServiceTest {
private static final TopLevelListKey TOP_LEVEL_LIST_0_KEY = new TopLevelListKey("test:0");
package org.opendaylight.controller.sal.binding.test.bugfix;
import static org.junit.Assert.assertFalse;
-
+import com.google.common.util.concurrent.SettableFuture;
import java.util.concurrent.ExecutionException;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import com.google.common.util.concurrent.SettableFuture;
-
-@SuppressWarnings("deprecation")
public class DeleteNestedAugmentationListenParentTest extends AbstractDataServiceTest {
private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-
+import com.google.common.util.concurrent.SettableFuture;
import java.util.concurrent.TimeUnit;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import com.google.common.util.concurrent.SettableFuture;
-
-@SuppressWarnings("deprecation")
public class WriteParentListenAugmentTest extends AbstractDataServiceTest {
private static final String TLL_NAME = "foo";
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-
+import com.google.common.collect.ImmutableList;
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import com.google.common.collect.ImmutableList;
-
-@SuppressWarnings("deprecation")
public class WriteParentReadChildTest extends AbstractDataServiceTest {
private static final int LIST11_ID = 1234;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-
import java.util.concurrent.Future;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-/**
- * FIXME: Migrate to use new Data Broker APIs
- */
-@SuppressWarnings("deprecation")
public class BrokerIntegrationTest extends AbstractDataServiceTest {
private static final TopLevelListKey TLL_FOO_KEY = new TopLevelListKey("foo");
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-@SuppressWarnings("deprecation")
public class CrossBrokerMountPointTest {
private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
assertNotNull(moduleStream);
final List<InputStream> rpcModels = Collections.singletonList(moduleStream);
- @SuppressWarnings("deprecation")
- final
- Set<Module> modules = parser.parseYangModelsFromStreams(rpcModels);
- @SuppressWarnings("deprecation")
- final
- SchemaContext mountSchemaContext = parser.resolveSchemaContext(modules);
+ final Set<Module> modules = parser.parseYangModelsFromStreams(rpcModels);
+ final SchemaContext mountSchemaContext = parser.resolveSchemaContext(modules);
schemaContext = mountSchemaContext;
}
.child(TopLevelList.class, new TopLevelListKey(mount)).toInstance();
}
- @SuppressWarnings("deprecation")
@Test
public void test() throws ExecutionException, InterruptedException {
// FIXME: This is made to only make sure instance identifier codec
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-it</artifactId>
mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), // //
mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(), //
- mavenBundle("com.google.guava", "guava").versionAsInProject()
+ mavenBundle("com.google.guava", "guava").versionAsInProject(),
+ mavenBundle("com.github.romix", "java-concurrent-hash-trie-map").versionAsInProject()
);
}
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-notification-broker</type>
<name>binding-notification-broker</name>
+ <notification-adapter xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-new-notification-service</type>
+ <name>binding-notification-adapter</name>
+ </notification-adapter>
+ <notification-publish-adapter xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-new-notification-publish-service</type>
+ <name>binding-notification-publish-adapter</name>
+ </notification-publish-adapter>
</module>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-broker-impl</type>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-inmemory-data-broker</type>
<name>inmemory-data-broker</name>
+
<schema-service>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
<name>yang-schema-service</name>
</schema-service>
+
+ <config-data-store>
+ <type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
+ <name>config-store-service</name>
+ </config-data-store>
+
+ <operational-data-store>
+ <type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
+ <name>operational-store-service</name>
+ </operational-data-store>
</module>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-broker-impl</type>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-util</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-clustering-commons</artifactId>
*/
void deleteMessages(long sequenceNumber);
+ /**
+ * Returns the last sequence number contained in the journal.
+ */
+ long getLastSequenceNumber();
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * A DataPersistenceProvider implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
+ private DataPersistenceProvider delegate;
+
+ public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+ this.delegate = delegate;
+ }
+
+ public void setDelegate(DataPersistenceProvider delegate) {
+ this.delegate = delegate;
+ }
+
+ public DataPersistenceProvider getDelegate() {
+ return delegate;
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return delegate.isRecoveryApplicable();
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ delegate.persist(o, procedure);
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ delegate.saveSnapshot(o);
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ delegate.deleteSnapshots(criteria);
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ delegate.deleteMessages(sequenceNumber);
+ }
+
+ @Override
+ public long getLastSequenceNumber() {
+ return delegate.getLastSequenceNumber();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A DataPersistenceProvider implementation with persistence disabled, essentially a no-op.
+ */
+public class NonPersistentDataProvider implements DataPersistenceProvider {
+ private static final Logger LOG = LoggerFactory.getLogger(NonPersistentDataProvider.class);
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ try {
+ procedure.apply(o);
+ } catch (Exception e) {
+ LOG.error("An unexpected error occurred", e);
+ }
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ }
+
+ @Override
+ public long getLastSequenceNumber() {
+ return -1;
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.UntypedPersistentActor;
+import com.google.common.base.Preconditions;
+
+/**
+ * A DataPersistenceProvider implementation with persistence enabled.
+ */
+public class PersistentDataProvider implements DataPersistenceProvider {
+
+ private final UntypedPersistentActor persistentActor;
+
+ public PersistentDataProvider(UntypedPersistentActor persistentActor) {
+ this.persistentActor = Preconditions.checkNotNull(persistentActor, "persistentActor can't be null");
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return true;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ persistentActor.persist(o, procedure);
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ persistentActor.saveSnapshot(o);
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ persistentActor.deleteSnapshots(criteria);
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ persistentActor.deleteMessages(sequenceNumber);
+ }
+
+ @Override
+ public long getLastSequenceNumber() {
+ return persistentActor.lastSequenceNr();
+ }
+}
\ No newline at end of file
return config;
}
- public static abstract class Builder<T extends Builder>{
+ public static abstract class Builder<T extends Builder<T>> {
protected Map<String, Object> configHolder;
protected Config fallback;
package org.opendaylight.controller.cluster.common.actor;
-import akka.japi.Procedure;
-import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
unhandled(message);
}
-
- protected class PersistentDataProvider implements DataPersistenceProvider {
-
- public PersistentDataProvider(){
-
- }
-
- @Override
- public boolean isRecoveryApplicable() {
- return true;
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- AbstractUntypedPersistentActor.this.persist(o, procedure);
- }
-
- @Override
- public void saveSnapshot(Object o) {
- AbstractUntypedPersistentActor.this.saveSnapshot(o);
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
- AbstractUntypedPersistentActor.this.deleteSnapshots(criteria);
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
- AbstractUntypedPersistentActor.this.deleteMessages(sequenceNumber);
- }
- }
-
- protected class NonPersistentDataProvider implements DataPersistenceProvider {
-
- public NonPersistentDataProvider(){
-
- }
-
- @Override
- public boolean isRecoveryApplicable() {
- return false;
- }
-
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- try {
- procedure.apply(o);
- } catch (Exception e) {
- LOG.error("An unexpected error occurred", e);
- }
- }
-
- @Override
- public void saveSnapshot(Object o) {
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
-
- }
- }
}
return cachedMailBoxPushTimeout;
}
- public static class Builder<T extends Builder> extends AbstractConfig.Builder<T>{
+ public static class Builder<T extends Builder<T>> extends AbstractConfig.Builder<T>{
public Builder(String actorSystemName) {
super(actorSystemName);
private final Logger LOG = LoggerFactory.getLogger(MeteredBoundedMailbox.class);
private MeteredMessageQueue queue;
- private Integer capacity;
- private FiniteDuration pushTimeOut;
- private MetricRegistry registry;
+ private final Integer capacity;
+ private final FiniteDuration pushTimeOut;
+ private final MetricRegistry registry;
private final String QUEUE_SIZE = "q-size";
this.capacity = commonConfig.getMailBoxCapacity();
this.pushTimeOut = commonConfig.getMailBoxPushTimeout();
- MetricsReporter reporter = MetricsReporter.getInstance();
+ MetricsReporter reporter = MetricsReporter.getInstance(MeteringBehavior.DOMAIN);
registry = reporter.getMetricsRegistry();
}
String metricName = MetricRegistry.name(actorName, QUEUE_SIZE);
if (registry.getMetrics().containsKey(metricName))
+ {
return; //already registered
+ }
Gauge<Integer> queueSize = getQueueSizeGuage(monitoredQueue);
registerQueueSizeMetric(metricName, queueSize);
* The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
*/
public class MeteringBehavior implements Procedure<Object> {
+ public static final String DOMAIN = "org.opendaylight.controller.actor.metric";
private final UntypedActor meteredActor;
- private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
+ private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance(DOMAIN).getMetricsRegistry();
private final String MSG_PROCESSING_RATE = "msg-rate";
private String actorQualifiedName;
import akka.japi.Procedure;
import akka.persistence.SnapshotSelectionCriteria;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
-
import java.util.concurrent.CountDownLatch;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
/**
* This class is intended for testing purposes. It just triggers CountDownLatch's in each method.
public void setDeleteMessagesLatch(CountDownLatch deleteMessagesLatch) {
this.deleteMessagesLatch = deleteMessagesLatch;
}
+
+ @Override
+ public long getLastSequenceNumber() {
+ return -1;
+ }
}
return builder.build();
}
- private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder builder, NormalizedNodeMessages.Node node){
+ private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder<?, ?> builder, NormalizedNodeMessages.Node node){
for(NormalizedNodeMessages.Node child : node.getChildList()){
builder.withChild((DataContainerChild<?, ?>) deSerialize(child));
input = new DataInputStream(stream);
}
- public NormalizedNodeInputStreamReader(DataInput input) throws IOException {
+ public NormalizedNodeInputStreamReader(DataInput input) {
this.input = Preconditions.checkNotNull(input);
}
return children;
}
- private PathArgument readPathArgument() throws IOException {
+ public PathArgument readPathArgument() throws IOException {
// read Type
int type = input.readByte();
output = new DataOutputStream(stream);
}
- public NormalizedNodeOutputStreamWriter(DataOutput output) throws IOException {
+ public NormalizedNodeOutputStreamWriter(DataOutput output) {
this.output = Preconditions.checkNotNull(output);
}
@Override
public void close() throws IOException {
+ flush();
}
@Override
}
}
- private void writePathArgument(YangInstanceIdentifier.PathArgument pathArgument) throws IOException {
+ public void writePathArgument(YangInstanceIdentifier.PathArgument pathArgument) throws IOException {
byte type = PathArgumentTypes.getSerializablePathArgumentType(pathArgument);
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.notifications;
+
+import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+
+/**
+ * A local message initiated internally from the RaftActor when some state of a leader has changed.
+ *
+ * @author Thomas Pantelis
+ */
+public class LeaderStateChanged {
+ private final String memberId;
+ private final String leaderId;
+
+ public LeaderStateChanged(@Nonnull String memberId, @Nullable String leaderId) {
+ this.memberId = Preconditions.checkNotNull(memberId);
+ this.leaderId = leaderId;
+ }
+
+ public @Nonnull String getMemberId() {
+ return memberId;
+ }
+
+ public @Nullable String getLeaderId() {
+ return leaderId;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("LeaderStateChanged [memberId=").append(memberId).append(", leaderId=").append(leaderId)
+ .append("]");
+ return builder.toString();
+ }
+}
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
/**
- * The RoleChangeNotifier is responsible for receiving Raft role change messages and notifying
+ * The RoleChangeNotifier is responsible for receiving Raft role and leader state change messages and notifying
* the listeners (within the same node), which are registered with it.
* <p/>
* The RoleChangeNotifier is instantiated by the Shard and injected into the RaftActor.
*/
public class RoleChangeNotifier extends AbstractUntypedActor implements AutoCloseable {
- private String memberId;
- private Map<ActorPath, ActorRef> registeredListeners = Maps.newHashMap();
+ private final String memberId;
+ private final Map<ActorPath, ActorRef> registeredListeners = Maps.newHashMap();
private RoleChangeNotification latestRoleChangeNotification = null;
+ private LeaderStateChanged latestLeaderStateChanged;
public RoleChangeNotifier(String memberId) {
this.memberId = memberId;
getSender().tell(new RegisterRoleChangeListenerReply(), getSelf());
+ if(latestLeaderStateChanged != null) {
+ getSender().tell(latestLeaderStateChanged, getSelf());
+ }
+
if (latestRoleChangeNotification != null) {
getSender().tell(latestRoleChangeNotification, getSelf());
}
for (ActorRef listener: registeredListeners.values()) {
listener.tell(latestRoleChangeNotification, getSelf());
}
+ } else if (message instanceof LeaderStateChanged) {
+ latestLeaderStateChanged = (LeaderStateChanged)message;
+
+ for (ActorRef listener: registeredListeners.values()) {
+ listener.tell(latestLeaderStateChanged, getSelf());
+ }
}
}
public String getNewRole() {
return newRole;
}
+
+ @Override
+ public String toString() {
+ return "RoleChanged{" +
+ "memberId='" + memberId + '\'' +
+ ", oldRole='" + oldRole + '\'' +
+ ", newRole='" + newRole + '\'' +
+ '}';
+ }
}
@Override
- public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
+ public Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> encode() {
Preconditions.checkState(byteString!=null);
- Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> map = new HashMap<>();
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> map = new HashMap<>();
map.put(org.opendaylight.controller.protobuff.messages.shard.CompositeModificationPayload.modification,
getModificationInternal());
return map;
this.modification = (PersistentMessages.CompositeModification) Preconditions.checkNotNull(modification, "modification should not be null");
}
- @Override public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
+ @Override public Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> encode() {
Preconditions.checkState(modification!=null);
- Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> map = new HashMap<>();
+ Map<GeneratedMessage.GeneratedExtension<?, ?>, PersistentMessages.CompositeModification> map = new HashMap<>();
map.put(
org.opendaylight.controller.protobuff.messages.shard.CompositeModificationPayload.modification, this.modification);
return map;
import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.MetricRegistry;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
/**
* Maintains metrics registry that is provided to reporters.
*/
public class MetricsReporter implements AutoCloseable {
- private static final MetricRegistry METRICS_REGISTRY = new MetricRegistry();
- private static final String DOMAIN = "org.opendaylight.controller.actor.metric";
- private static final MetricsReporter INSTANCE = new MetricsReporter();
-
- private final JmxReporter jmxReporter = JmxReporter.forRegistry(METRICS_REGISTRY).inDomain(DOMAIN).build();
-
- private MetricsReporter() {
+ private static LoadingCache<String, MetricsReporter> METRIC_REPORTERS = CacheBuilder.newBuilder().build(
+ new CacheLoader<String, MetricsReporter>() {
+ @Override
+ public MetricsReporter load(String domainName) {
+ return new MetricsReporter(domainName);
+ }
+ });
+
+ private final String domainName;
+ private final JmxReporter jmxReporter;
+ private final MetricRegistry metricRegistry = new MetricRegistry();
+
+ private MetricsReporter(String domainName) {
+ this.domainName = domainName;
+ jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(domainName).build();
jmxReporter.start();
}
- public static MetricsReporter getInstance() {
- return INSTANCE;
+ public static MetricsReporter getInstance(String domainName) {
+ return METRIC_REPORTERS.getUnchecked(domainName);
}
public MetricRegistry getMetricsRegistry() {
- return METRICS_REGISTRY;
+ return metricRegistry;
}
@Override
public void close() {
jmxReporter.close();
+
+ METRIC_REPORTERS.invalidate(domainName);
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-clustering-config</artifactId>
<description>Configuration files for md-sal clustering</description>
]
}
+
+ persistence {
+ # By default the snapshots/journal directories live in KARAF_HOME. You can choose to put it somewhere else by
+ # modifying the following two properties. The directory location specified may be a relative or absolute path.
+ # The relative path is always relative to KARAF_HOME.
+
+ # snapshot-store.local.dir = "target/snapshots"
+ # journal.leveldb.dir = "target/journal"
+
+ }
}
}
# failing an operation (eg transaction create and change listener registration).
#shard-initialization-timeout-in-seconds=300
-# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
-#shard-journal-recovery-log-batch-size=5000
+# The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.
+#shard-journal-recovery-log-batch-size=1000
# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
#shard-snapshot-batch-count=20000
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common-impl</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common-util</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-connector-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-distributed-datastore</artifactId>
<packaging>bundle</packaging>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft-example</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
- <version>1.2.0-SNAPSHOT</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import static com.google.common.base.Preconditions.checkState;
+import com.google.common.collect.ImmutableMap;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore>
+ implements DOMDataBroker, AutoCloseable {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
+
+ private final AtomicLong txNum = new AtomicLong();
+ private final AtomicLong chainNum = new AtomicLong();
+ private final Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> extensions;
+ private volatile AutoCloseable closeable;
+
+ protected AbstractDOMBroker(final Map<LogicalDatastoreType, DOMStore> datastores) {
+ super(datastores);
+
+ boolean treeChange = true;
+ for (DOMStore ds : datastores.values()) {
+ if (!(ds instanceof DOMStoreTreeChangePublisher)) {
+ treeChange = false;
+ break;
+ }
+ }
+
+ if (treeChange) {
+ extensions = ImmutableMap.<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension>of(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
+ @Override
+ public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(final DOMDataTreeIdentifier treeId, final L listener) {
+ DOMStore publisher = getTxFactories().get(treeId.getDatastoreType());
+ checkState(publisher != null, "Requested logical data store is not available.");
+
+ return ((DOMStoreTreeChangePublisher) publisher).registerTreeChangeListener(treeId.getRootIdentifier(), listener);
+ }
+ });
+ } else {
+ extensions = Collections.emptyMap();
+ }
+ }
+
+ public void setCloseable(final AutoCloseable closeable) {
+ this.closeable = closeable;
+ }
+
+ @Override
+ public void close() {
+ super.close();
+
+ if (closeable != null) {
+ try {
+ closeable.close();
+ } catch (Exception e) {
+ LOG.debug("Error closing instance", e);
+ }
+ }
+ }
+
+ @Override
+ protected Object newTransactionIdentifier() {
+ return "DOM-" + txNum.getAndIncrement();
+ }
+
+ @Override
+ public ListenerRegistration<DOMDataChangeListener> registerDataChangeListener(final LogicalDatastoreType store,
+ final YangInstanceIdentifier path, final DOMDataChangeListener listener, final DataChangeScope triggeringScope) {
+
+ DOMStore potentialStore = getTxFactories().get(store);
+ checkState(potentialStore != null, "Requested logical data store is not available.");
+ return potentialStore.registerChangeListener(path, listener, triggeringScope);
+ }
+
+ @Override
+ public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
+ return extensions;
+ }
+
+ @Override
+ public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
+ checkNotClosed();
+
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
+ for (Map.Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
+ backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
+ }
+
+ final long chainId = chainNum.getAndIncrement();
+ LOG.debug("Transaction chain {} created with listener {}, backing store chains {}", chainId, listener,
+ backingChains);
+ return new DOMBrokerTransactionChain(chainId, backingChains, this, listener);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public abstract class AbstractDOMBrokerTransaction<K, T extends DOMStoreTransaction> implements
+ AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
+
+ private Map<K, T> backingTxs;
+ private final Object identifier;
+ private final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories;
+
+ /**
+ *
+ * Creates new composite Transactions.
+ *
+ * @param identifier
+ * Identifier of transaction.
+ */
+ protected AbstractDOMBrokerTransaction(final Object identifier, Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
+ this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
+ this.storeTxFactories = Preconditions.checkNotNull(storeTxFactories, "Store Transaction Factories should not be null");
+ this.backingTxs = new EnumMap(LogicalDatastoreType.class);
+ }
+
+ /**
+ * Returns subtransaction associated with supplied key.
+ *
+ * @param key
+ * @return
+ * @throws NullPointerException
+ * if key is null
+ * @throws IllegalArgumentException
+ * if no subtransaction is associated with key.
+ */
+ protected final T getSubtransaction(final K key) {
+ Preconditions.checkNotNull(key, "key must not be null.");
+
+ T ret = backingTxs.get(key);
+ if(ret == null){
+ ret = createTransaction(key);
+ backingTxs.put(key, ret);
+ }
+ Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+ return ret;
+ }
+
+ protected abstract T createTransaction(final K key);
+
+ /**
+ * Returns immutable Iterable of all subtransactions.
+ *
+ */
+ protected Collection<T> getSubtransactions() {
+ return backingTxs.values();
+ }
+
+ @Override
+ public Object getIdentifier() {
+ return identifier;
+ }
+
+ protected void closeSubtransactions() {
+ /*
+ * We share one exception for all failures, which are added
+ * as supressedExceptions to it.
+ */
+ IllegalStateException failure = null;
+ for (T subtransaction : backingTxs.values()) {
+ try {
+ subtransaction.close();
+ } catch (Exception e) {
+ // If we did not allocated failure we allocate it
+ if (failure == null) {
+ failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
+ } else {
+ // We update it with additional exceptions, which occurred during error.
+ failure.addSuppressed(e);
+ }
+ }
+ }
+ // If we have failure, we throw it at after all attempts to close.
+ if (failure != null) {
+ throw failure;
+ }
+ }
+
+ protected DOMStoreTransactionFactory getTxFactory(K type){
+ return storeTxFactories.get(type);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
+
+public abstract class AbstractDOMTransactionFactory<T extends DOMStoreTransactionFactory> implements AutoCloseable {
+ private static final AtomicIntegerFieldUpdater<AbstractDOMTransactionFactory> UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(AbstractDOMTransactionFactory.class, "closed");
+ private final Map<LogicalDatastoreType, T> storeTxFactories;
+ private volatile int closed = 0;
+
+ protected AbstractDOMTransactionFactory(final Map<LogicalDatastoreType, T> txFactories) {
+ this.storeTxFactories = new EnumMap<>(txFactories);
+ }
+
+ /**
+ * Implementations must return unique identifier for each and every call of
+ * this method;
+ *
+ * @return new Unique transaction identifier.
+ */
+ protected abstract Object newTransactionIdentifier();
+
+ /**
+ *
+ * @param transaction
+ * @param cohorts
+ * @return
+ */
+ protected abstract CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts);
+
+ /**
+ *
+ * @return
+ */
+ public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ checkNotClosed();
+
+ return new DOMBrokerReadOnlyTransaction(newTransactionIdentifier(), storeTxFactories);
+ }
+
+
+ /**
+ *
+ * @return
+ */
+ public final DOMDataWriteTransaction newWriteOnlyTransaction() {
+ checkNotClosed();
+
+ return new DOMBrokerWriteOnlyTransaction(newTransactionIdentifier(), storeTxFactories, this);
+ }
+
+
+ /**
+ *
+ * @return
+ */
+ public final DOMDataReadWriteTransaction newReadWriteTransaction() {
+ checkNotClosed();
+
+ return new DOMBrokerReadWriteTransaction<>(newTransactionIdentifier(), storeTxFactories, this);
+ }
+
+ /**
+ * Convenience accessor of backing factories intended to be used only by
+ * finalization of this class.
+ *
+ * <b>Note:</b>
+ * Finalization of this class may want to access other functionality of
+ * supplied Transaction factories.
+ *
+ * @return Map of backing transaction factories.
+ */
+ protected final Map<LogicalDatastoreType, T> getTxFactories() {
+ return storeTxFactories;
+ }
+
+ /**
+ * Checks if instance is not closed.
+ *
+ * @throws IllegalStateException If instance of this class was closed.
+ *
+ */
+ protected final void checkNotClosed() {
+ Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
+ }
+
+ @Override
+ public void close() {
+ final boolean success = UPDATER.compareAndSet(this, 0, 1);
+ Preconditions.checkState(success, "Transaction factory was already closed");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public class DOMBrokerReadOnlyTransaction<T extends DOMStoreReadTransaction>
+ extends AbstractDOMBrokerTransaction<LogicalDatastoreType, T>
+ implements DOMDataReadOnlyTransaction {
+ /**
+ * Creates new composite Transactions.
+ *
+ * @param identifier Identifier of transaction.
+ */
+ protected DOMBrokerReadOnlyTransaction(Object identifier, Map storeTxFactories) {
+ super(identifier, storeTxFactories);
+ }
+
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
+ final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return getSubtransaction(store).read(path);
+ }
+
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
+ return getSubtransaction(store).exists(path);
+ }
+
+ @Override
+ public void close() {
+ closeSubtransactions();
+ }
+
+ @Override
+ protected T createTransaction(LogicalDatastoreType key) {
+ return (T) getTxFactory(key).newReadOnlyTransaction();
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public class DOMBrokerReadWriteTransaction<T extends DOMStoreReadWriteTransaction>
+ extends DOMBrokerWriteOnlyTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
+ /**
+ * Creates new composite Transactions.
+ *
+ * @param identifier Identifier of transaction.
+ * @param storeTxFactories
+ */
+ protected DOMBrokerReadWriteTransaction(Object identifier, Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories, final AbstractDOMTransactionFactory<?> commitImpl) {
+ super(identifier, storeTxFactories, commitImpl);
+ }
+
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
+ final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return getSubtransaction(store).read(path);
+ }
+
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
+ return getSubtransaction(store).exists(path);
+ }
+
+ @Override
+ protected DOMStoreReadWriteTransaction createTransaction(LogicalDatastoreType key) {
+ return getTxFactory(key).newReadWriteTransaction();
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DOMBrokerTransactionChain extends AbstractDOMTransactionFactory<DOMStoreTransactionChain>
+ implements DOMTransactionChain {
+ private static enum State {
+ RUNNING,
+ CLOSING,
+ CLOSED,
+ FAILED,
+ }
+
+ private static final AtomicIntegerFieldUpdater<DOMBrokerTransactionChain> COUNTER_UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, "counter");
+ private static final AtomicReferenceFieldUpdater<DOMBrokerTransactionChain, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, State.class, "state");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerTransactionChain.class);
+ private final AtomicLong txNum = new AtomicLong();
+ private final AbstractDOMBroker broker;
+ private final TransactionChainListener listener;
+ private final long chainId;
+
+ private volatile State state = State.RUNNING;
+ private volatile int counter = 0;
+
+ /**
+ *
+ * @param chainId
+ * ID of transaction chain
+ * @param chains
+ * Backing {@link DOMStoreTransactionChain}s.
+ * @param listener
+ * Listener, which listens on transaction chain events.
+ * @throws NullPointerException
+ * If any of arguments is null.
+ */
+ public DOMBrokerTransactionChain(final long chainId,
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+ AbstractDOMBroker broker, final TransactionChainListener listener) {
+ super(chains);
+ this.chainId = chainId;
+ this.broker = Preconditions.checkNotNull(broker);
+ this.listener = Preconditions.checkNotNull(listener);
+ }
+
+ private void checkNotFailed() {
+ Preconditions.checkState(state != State.FAILED, "Transaction chain has failed");
+ }
+
+ @Override
+ protected Object newTransactionIdentifier() {
+ return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(
+ final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotFailed();
+ checkNotClosed();
+
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = broker.submit(transaction, cohorts);
+
+ COUNTER_UPDATER.incrementAndGet(this);
+ Futures.addCallback(ret, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ transactionCompleted();
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ transactionFailed(transaction, t);
+ }
+ });
+
+ return ret;
+ }
+
+ @Override
+ public void close() {
+ final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
+ if (!success) {
+ LOG.debug("Chain {} is no longer running", this);
+ return;
+ }
+
+ super.close();
+ for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
+ subChain.close();
+ }
+
+ if (counter == 0) {
+ finishClose();
+ }
+ }
+
+ private void finishClose() {
+ state = State.CLOSED;
+ listener.onTransactionChainSuccessful(this);
+ }
+
+ private void transactionCompleted() {
+ if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
+ finishClose();
+ }
+ }
+
+ private void transactionFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ state = State.FAILED;
+ LOG.debug("Transaction chain {}Â failed.", this, cause);
+ listener.onTransactionChainFailed(this, tx, cause);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.databroker;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class DOMBrokerWriteOnlyTransaction<T extends DOMStoreWriteTransaction>
+ extends AbstractDOMBrokerTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+
+ private static final AtomicReferenceFieldUpdater<DOMBrokerWriteOnlyTransaction, AbstractDOMTransactionFactory> IMPL_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMBrokerWriteOnlyTransaction.class, AbstractDOMTransactionFactory.class, "commitImpl");
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMBrokerWriteOnlyTransaction, Future> FUTURE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMBrokerWriteOnlyTransaction.class, Future.class, "commitFuture");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerWriteOnlyTransaction.class);
+ private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
+
+ /**
+ * Implementation of real commit. It also acts as an indication that
+ * the transaction is running -- which we flip atomically using
+ * {@link #IMPL_UPDATER}.
+ */
+ private volatile AbstractDOMTransactionFactory<?> commitImpl;
+
+ /**
+ * Future task of transaction commit. It starts off as null, but is
+ * set appropriately on {@link #submit()} and {@link #cancel()} via
+ * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
+ *
+ * Lazy set is safe for use because it is only referenced to in the
+ * {@link #cancel()} slow path, where we will busy-wait for it. The
+ * fast path gets the benefit of a store-store barrier instead of the
+ * usual store-load barrier.
+ */
+ private volatile Future<?> commitFuture;
+
+ protected DOMBrokerWriteOnlyTransaction(final Object identifier,
+ Map storeTxFactories, final AbstractDOMTransactionFactory<?> commitImpl) {
+ super(identifier, storeTxFactories);
+ this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
+ }
+
+ @Override
+ protected T createTransaction(LogicalDatastoreType key) {
+ // FIXME : Casting shouldn't be necessary here
+ return (T) getTxFactory(key).newWriteOnlyTransaction();
+ }
+
+ @Override
+ public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ checkRunning(commitImpl);
+ getSubtransaction(store).write(path, data);
+ }
+
+ @Override
+ public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ checkRunning(commitImpl);
+ getSubtransaction(store).delete(path);
+ }
+
+ @Override
+ public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ checkRunning(commitImpl);
+ getSubtransaction(store).merge(path, data);
+ }
+
+ @Override
+ public boolean cancel() {
+ final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
+ if (impl != null) {
+ LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+ FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
+ closeSubtransactions();
+ return true;
+ }
+
+ // The transaction is in process of being submitted or cancelled. Busy-wait
+ // for the corresponding future.
+ Future<?> future;
+ do {
+ future = commitFuture;
+ } while (future == null);
+
+ return future.cancel(false);
+ }
+
+ @Deprecated
+ @Override
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
+ checkRunning(impl);
+
+ final Collection<T> txns = getSubtransactions();
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
+
+ // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+ for (DOMStoreWriteTransaction txn : txns) {
+ cohorts.add(txn.ready());
+ }
+
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+ FUTURE_UPDATER.lazySet(this, ret);
+ return ret;
+ }
+
+ private void checkRunning(final AbstractDOMTransactionFactory<?> impl) {
+ Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+
+/**
+ * Abstract base class for our internal implementation of {@link DataTreeCandidateNode},
+ * which we instantiate from a serialized stream. We do not retain the before-image and
+ * do not implement {@link #getModifiedChild(PathArgument)}, as that method is only
+ * useful for end users. Instances based on this class should never be leaked outside of
+ * this component.
+ */
+abstract class AbstractDataTreeCandidateNode implements DataTreeCandidateNode {
+ private final ModificationType type;
+
+ protected AbstractDataTreeCandidateNode(final ModificationType type) {
+ this.type = Preconditions.checkNotNull(type);
+ }
+
+ @Override
+ public final DataTreeCandidateNode getModifiedChild(final PathArgument identifier) {
+ throw new UnsupportedOperationException("Not implemented");
+ }
+
+ @Override
+ public final ModificationType getModificationType() {
+ return type;
+ }
+
+ @Override
+ public final Optional<NormalizedNode<?, ?>> getDataBefore() {
+ throw new UnsupportedOperationException("Before-image not available after serialization");
+ }
+
+ static DataTreeCandidateNode createUnmodified() {
+ return new AbstractDataTreeCandidateNode(ModificationType.UNMODIFIED) {
+ @Override
+ public PathArgument getIdentifier() {
+ throw new UnsupportedOperationException("Root node does not have an identifier");
+ }
+
+ @Override
+ public Optional<NormalizedNode<?, ?>> getDataAfter() {
+ throw new UnsupportedOperationException("After-image not available after serialization");
+ }
+
+ @Override
+ public Collection<DataTreeCandidateNode> getChildNodes() {
+ throw new UnsupportedOperationException("Children not available after serialization");
+ }
+ };
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+
+/**
+ * Abstract base for transactions running on SharrdDataTree.
+ *
+ * @param <T> Backing transaction type.
+ */
+@NotThreadSafe
+abstract class AbstractShardDataTreeTransaction<T extends DataTreeSnapshot> {
+ private final T snapshot;
+ private final String id;
+ private boolean closed;
+
+ protected AbstractShardDataTreeTransaction(final String id, final T snapshot) {
+ this.snapshot = Preconditions.checkNotNull(snapshot);
+ this.id = Preconditions.checkNotNull(id);
+ }
+
+ final T getSnapshot() {
+ return snapshot;
+ }
+
+ final boolean isClosed() {
+ return closed;
+ }
+
+ /**
+ * Close this transaction and mark it as closed, allowing idempotent invocations.
+ *
+ * @return True if the transaction got closed by this method invocation.
+ */
+ protected final boolean close() {
+ if (closed) {
+ return false;
+ }
+
+ closed = true;
+ return true;
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).add("id", id).add("closed", closed).add("snapshot", snapshot).toString();
+ }
+
+ abstract void abort();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import scala.concurrent.Future;
+
+/**
+ * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this
+ * implementation. In addition to the usual set of methods it also contains the list of actor
+ * futures.
+ */
+public abstract class AbstractThreePhaseCommitCohort<T> implements DOMStoreThreePhaseCommitCohort {
+ protected static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
+ protected static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
+
+ abstract List<Future<T>> getCohortFutures();
+}
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.Lists;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import scala.concurrent.Future;
abstract class AbstractTransactionContext implements TransactionContext {
- protected final TransactionIdentifier identifier;
- protected final List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+ private final TransactionIdentifier identifier;
- AbstractTransactionContext(TransactionIdentifier identifier) {
+ protected AbstractTransactionContext(TransactionIdentifier identifier) {
this.identifier = identifier;
}
- @Override
- public List<Future<Object>> getRecordedOperationFutures() {
- return recordedOperationFutures;
+ protected final TransactionIdentifier getIdentifier() {
+ return identifier;
}
}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ChainedCommitCohort extends ShardDataTreeCohort {
+ private static final Logger LOG = LoggerFactory.getLogger(ChainedCommitCohort.class);
+ private final ReadWriteShardDataTreeTransaction transaction;
+ private final ShardDataTreeTransactionChain chain;
+ private final ShardDataTreeCohort delegate;
+
+ ChainedCommitCohort(final ShardDataTreeTransactionChain chain, final ReadWriteShardDataTreeTransaction transaction, final ShardDataTreeCohort delegate) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ this.delegate = Preconditions.checkNotNull(delegate);
+ this.chain = Preconditions.checkNotNull(chain);
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ final ListenableFuture<Void> ret = delegate.commit();
+
+ Futures.addCallback(ret, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void result) {
+ chain.clearTransaction(transaction);
+ LOG.debug("Committed transaction {}", transaction);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Transaction {} commit failed, cannot recover", transaction, t);
+ }
+ });
+
+ return ret;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return delegate.canCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegate.preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegate.abort();
+ }
+
+ @Override
+ DataTreeCandidateTip getCandidate() {
+ return delegate.getCandidate();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.dispatch.OnComplete;
+import java.util.List;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.Promise;
+
+final class ChainedTransactionProxy extends TransactionProxy {
+ private static final Logger LOG = LoggerFactory.getLogger(ChainedTransactionProxy.class);
+
+ /**
+ * Stores the ready Futures from the previous Tx in the chain.
+ */
+ private final List<Future<Object>> previousReadyFutures;
+
+ /**
+ * Stores the ready Futures from this transaction when it is readied.
+ */
+ private volatile List<Future<Object>> readyFutures;
+
+ ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
+ String transactionChainId, List<Future<Object>> previousReadyFutures) {
+ super(actorContext, transactionType, transactionChainId);
+ this.previousReadyFutures = previousReadyFutures;
+ }
+
+ List<Future<Object>> getReadyFutures() {
+ return readyFutures;
+ }
+
+ boolean isReady() {
+ return readyFutures != null;
+ }
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ @Override
+ public AbstractThreePhaseCommitCohort<?> ready() {
+ final AbstractThreePhaseCommitCohort<?> ret = super.ready();
+ readyFutures = (List)ret.getCohortFutures();
+ LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
+ readyFutures.size(), getTransactionChainId());
+ return ret;
+ }
+
+ /**
+ * This method is overridden to ensure the previous Tx's ready operations complete
+ * before we initiate the next Tx in the chain to avoid creation failures if the
+ * previous Tx's ready operations haven't completed yet.
+ */
+ @Override
+ protected Future<PrimaryShardInfo> sendFindPrimaryShardAsync(final String shardName) {
+ // Check if there are any previous ready Futures, otherwise let the super class handle it.
+ if(previousReadyFutures.isEmpty()) {
+ return super.sendFindPrimaryShardAsync(shardName);
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Waiting for {} previous ready futures for Tx {} on chain {}",
+ previousReadyFutures.size(), getIdentifier(), getTransactionChainId());
+ }
+
+ // Combine the ready Futures into 1.
+ Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
+ previousReadyFutures, getActorContext().getClientDispatcher());
+
+ // Add a callback for completion of the combined Futures.
+ final Promise<PrimaryShardInfo> returnPromise = akka.dispatch.Futures.promise();
+ OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
+ @Override
+ public void onComplete(Throwable failure, Iterable<Object> notUsed) {
+ if(failure != null) {
+ // A Ready Future failed so fail the returned Promise.
+ returnPromise.failure(failure);
+ } else {
+ LOG.debug("Previous Tx readied - sending FindPrimaryShard for {} on chain {}",
+ getIdentifier(), getTransactionChainId());
+
+ // Send the FindPrimaryShard message and use the resulting Future to complete the
+ // returned Promise.
+ returnPromise.completeWith(ChainedTransactionProxy.super.sendFindPrimaryShardAsync(shardName));
+ }
+ }
+ };
+
+ combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
+
+ return returnPromise.future();
+ }
+}
\ No newline at end of file
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.Collection;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
+import org.opendaylight.controller.cluster.databroker.AbstractDOMBroker;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.broker.impl.AbstractDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.broker.impl.TransactionCommitFailedExceptionMapper;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.LoggerFactory;
/**
- * Implementation of DOMDataCommitExecutor that coordinates transaction commits concurrently. The 3
+ * ConcurrentDOMDataBroker commits transactions concurrently. The 3
* commit phases (canCommit, preCommit, and commit) are performed serially and non-blocking
* (ie async) per transaction but multiple transaction commits can run concurrent.
*
* @author Thomas Pantelis
*/
-public class ConcurrentDOMDataBroker extends AbstractDOMDataBroker {
+public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
private static final Logger LOG = LoggerFactory.getLogger(ConcurrentDOMDataBroker.class);
private static final String CAN_COMMIT = "CAN_COMMIT";
private static final String PRE_COMMIT = "PRE_COMMIT";
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
+ if(cohorts.isEmpty()){
+ return Futures.immediateCheckedFuture(null);
+ }
+
final AsyncNotifyingSettableFuture clientSubmitFuture =
new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
final long startTime = System.nanoTime();
+ final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
+
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
new TransactionCommitFailedException(
"Can Commit failed, no detailed cause available."));
} else {
- if(remaining.decrementAndGet() == 0) {
+ if(!cohortIterator.hasNext()) {
// All cohorts completed successfully - we can move on to the preCommit phase
doPreCommit(startTime, clientSubmitFuture, transaction, cohorts);
+ } else {
+ ListenableFuture<Boolean> canCommitFuture = cohortIterator.next().canCommit();
+ Futures.addCallback(canCommitFuture, this, internalFutureCallbackExecutor);
}
}
}
}
};
- for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
- ListenableFuture<Boolean> canCommitFuture = cohort.canCommit();
- Futures.addCallback(canCommitFuture, futureCallback, internalFutureCallbackExecutor);
- }
+ ListenableFuture<Boolean> canCommitFuture = cohortIterator.next().canCommit();
+ Futures.addCallback(canCommitFuture, futureCallback, internalFutureCallbackExecutor);
}
private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
+
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
@Override
public void onSuccess(Void notUsed) {
- if(remaining.decrementAndGet() == 0) {
+ if(!cohortIterator.hasNext()) {
// All cohorts completed successfully - we can move on to the commit phase
doCommit(startTime, clientSubmitFuture, transaction, cohorts);
+ } else {
+ ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
+ Futures.addCallback(preCommitFuture, this, internalFutureCallbackExecutor);
}
}
}
};
- for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
- ListenableFuture<Void> preCommitFuture = cohort.preCommit();
- Futures.addCallback(preCommitFuture, futureCallback, internalFutureCallbackExecutor);
- }
+ ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
+ Futures.addCallback(preCommitFuture, futureCallback, internalFutureCallbackExecutor);
}
private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
+
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
@Override
public void onSuccess(Void notUsed) {
- if(remaining.decrementAndGet() == 0) {
+ if(!cohortIterator.hasNext()) {
// All cohorts completed successfully - we're done.
commitStatsTracker.addDuration(System.nanoTime() - startTime);
clientSubmitFuture.set();
+ } else {
+ ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
+ Futures.addCallback(commitFuture, this, internalFutureCallbackExecutor);
}
}
}
};
- for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
- ListenableFuture<Void> commitFuture = cohort.commit();
- Futures.addCallback(commitFuture, futureCallback, internalFutureCallbackExecutor);
- }
+ ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
+ Futures.addCallback(commitFuture, futureCallback, internalFutureCallbackExecutor);
}
private void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
DataChangeScope scope) {
Future<Object> future = actorContext.executeOperationAsync(shard,
- new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
+ new RegisterChangeListener(path, dataChangeListenerActor, scope),
actorContext.getDatastoreContext().getShardInitializationTimeout());
future.onComplete(new OnComplete<Object>(){
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterChangeListener, ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> {
+ private static final Logger LOG = LoggerFactory.getLogger(DataChangeListenerSupport.class);
+ private final List<DelayedListenerRegistration> delayedListenerRegistrations = new ArrayList<>();
+ private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
+
+ DataChangeListenerSupport(final Shard shard) {
+ super(shard);
+ }
+
+ @Override
+ void onLeadershipChange(final boolean isLeader) {
+ for (ActorSelection dataChangeListener : dataChangeListeners) {
+ dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
+ }
+
+ if (isLeader) {
+ for (DelayedListenerRegistration reg: delayedListenerRegistrations) {
+ if(!reg.isClosed()) {
+ final Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> res =
+ createDelegate(reg.getRegisterChangeListener());
+ reg.setDelegate(res.getKey());
+ if (res.getValue() != null) {
+ reg.getInstance().onDataChanged(res.getValue());
+ }
+ }
+ }
+
+ delayedListenerRegistrations.clear();
+ }
+ }
+
+ @Override
+ void onMessage(final RegisterChangeListener message, final boolean isLeader) {
+
+ LOG.debug("{}: registerDataChangeListener for {}, leader: {}", persistenceId(), message.getPath(), isLeader);
+
+ final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration;
+ final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event;
+ if (isLeader) {
+ final Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> res =
+ createDelegate(message);
+ registration = res.getKey();
+ event = res.getValue();
+ } else {
+ LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
+
+ DelayedListenerRegistration delayedReg = new DelayedListenerRegistration(message);
+ delayedListenerRegistrations.add(delayedReg);
+ registration = delayedReg;
+ event = null;
+ }
+
+ ActorRef listenerRegistration = createActor(DataChangeListenerRegistration.props(registration));
+
+ LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+ persistenceId(), listenerRegistration.path());
+
+ tellSender(new RegisterChangeListenerReply(listenerRegistration));
+ if (event != null) {
+ registration.getInstance().onDataChanged(event);
+ }
+ }
+
+ @Override
+ Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> createDelegate(
+ final RegisterChangeListener message) {
+ ActorSelection dataChangeListenerPath = selectActor(message.getDataChangeListenerPath());
+
+ // Notify the listener if notifications should be enabled or not
+ // If this shard is the leader then it will enable notifications else
+ // it will not
+ dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
+
+ // Now store a reference to the data change listener so it can be notified
+ // at a later point if notifications should be enabled or disabled
+ dataChangeListeners.add(dataChangeListenerPath);
+
+ AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
+ new DataChangeListenerProxy(dataChangeListenerPath);
+
+ LOG.debug("{}: Registering for path {}", persistenceId(), message.getPath());
+
+ return getShard().getDataStore().registerChangeListener(message.getPath(), listener,
+ message.getScope());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteArrayDataInput;
+import com.google.common.io.ByteArrayDataOutput;
+import com.google.common.io.ByteStreams;
+import com.google.protobuf.GeneratedMessage.GeneratedExtension;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputStreamReader;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeOutputStreamWriter;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages.AppendEntries;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataTreeCandidatePayload extends Payload implements Externalizable {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeCandidatePayload.class);
+ private static final long serialVersionUID = 1L;
+ private static final byte DELETE = 0;
+ private static final byte SUBTREE_MODIFIED = 1;
+ private static final byte UNMODIFIED = 2;
+ private static final byte WRITE = 3;
+
+ private transient byte[] serialized;
+
+ public DataTreeCandidatePayload() {
+ // Required by Externalizable
+ }
+
+ private DataTreeCandidatePayload(final byte[] serialized) {
+ this.serialized = Preconditions.checkNotNull(serialized);
+ }
+
+ private static void writeChildren(final NormalizedNodeOutputStreamWriter writer, final DataOutput out,
+ final Collection<DataTreeCandidateNode> children) throws IOException {
+ out.writeInt(children.size());
+ for (DataTreeCandidateNode child : children) {
+ writeNode(writer, out, child);
+ }
+ }
+
+ private static void writeNode(final NormalizedNodeOutputStreamWriter writer, final DataOutput out,
+ final DataTreeCandidateNode node) throws IOException {
+ switch (node.getModificationType()) {
+ case DELETE:
+ out.writeByte(DELETE);
+ writer.writePathArgument(node.getIdentifier());
+ break;
+ case SUBTREE_MODIFIED:
+ out.writeByte(SUBTREE_MODIFIED);
+ writer.writePathArgument(node.getIdentifier());
+ writeChildren(writer, out, node.getChildNodes());
+ break;
+ case WRITE:
+ out.writeByte(WRITE);
+ writer.writeNormalizedNode(node.getDataAfter().get());
+ break;
+ case UNMODIFIED:
+ throw new IllegalArgumentException("Unmodified candidate should never be in the payload");
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+ }
+ }
+
+ static DataTreeCandidatePayload create(DataTreeCandidate candidate) {
+ final ByteArrayDataOutput out = ByteStreams.newDataOutput();
+ try (final NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(out)) {
+ writer.writeYangInstanceIdentifier(candidate.getRootPath());
+
+ final DataTreeCandidateNode node = candidate.getRootNode();
+ switch (node.getModificationType()) {
+ case DELETE:
+ out.writeByte(DELETE);
+ break;
+ case SUBTREE_MODIFIED:
+ out.writeByte(SUBTREE_MODIFIED);
+ writeChildren(writer, out, node.getChildNodes());
+ break;
+ case UNMODIFIED:
+ out.writeByte(UNMODIFIED);
+ break;
+ case WRITE:
+ out.writeByte(WRITE);
+ writer.writeNormalizedNode(node.getDataAfter().get());
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+ }
+
+ writer.close();
+ } catch (IOException e) {
+ throw new IllegalArgumentException(String.format("Failed to serialize candidate %s", candidate), e);
+ }
+
+ return new DataTreeCandidatePayload(out.toByteArray());
+ }
+
+ private static Collection<DataTreeCandidateNode> readChildren(final NormalizedNodeInputStreamReader reader,
+ final DataInput in) throws IOException {
+ final int size = in.readInt();
+ if (size != 0) {
+ final Collection<DataTreeCandidateNode> ret = new ArrayList<>(size);
+ for (int i = 0; i < size; ++i) {
+ final DataTreeCandidateNode child = readNode(reader, in);
+ if (child != null) {
+ ret.add(child);
+ }
+ }
+ return ret;
+ } else {
+ return Collections.emptyList();
+ }
+ }
+
+ private static DataTreeCandidateNode readNode(final NormalizedNodeInputStreamReader reader,
+ final DataInput in) throws IOException {
+ final byte type = in.readByte();
+ switch (type) {
+ case DELETE:
+ return DeletedDataTreeCandidateNode.create(reader.readPathArgument());
+ case SUBTREE_MODIFIED:
+ final PathArgument identifier = reader.readPathArgument();
+ final Collection<DataTreeCandidateNode> children = readChildren(reader, in);
+ if (children.isEmpty()) {
+ LOG.debug("Modified node {} does not have any children, not instantiating it", identifier);
+ return null;
+ } else {
+ return ModifiedDataTreeCandidateNode.create(identifier, children);
+ }
+ case UNMODIFIED:
+ return null;
+ case WRITE:
+ return DataTreeCandidateNodes.fromNormalizedNode(reader.readNormalizedNode());
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + type);
+ }
+ }
+
+ private static DataTreeCandidate parseCandidate(final ByteArrayDataInput in) throws IOException {
+ final NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(in);
+ final YangInstanceIdentifier rootPath = reader.readYangInstanceIdentifier();
+ final byte type = in.readByte();
+
+ final DataTreeCandidateNode rootNode;
+ switch (type) {
+ case DELETE:
+ rootNode = DeletedDataTreeCandidateNode.create();
+ break;
+ case SUBTREE_MODIFIED:
+ rootNode = ModifiedDataTreeCandidateNode.create(readChildren(reader, in));
+ break;
+ case WRITE:
+ rootNode = DataTreeCandidateNodes.fromNormalizedNode(reader.readNormalizedNode());
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled node type " + type);
+ }
+
+ return DataTreeCandidates.newDataTreeCandidate(rootPath, rootNode);
+ }
+
+ DataTreeCandidate getCandidate() throws IOException {
+ return parseCandidate(ByteStreams.newDataInput(serialized));
+ }
+
+ @Override
+ @Deprecated
+ @SuppressWarnings("rawtypes")
+ public <T> Map<GeneratedExtension, T> encode() {
+ return null;
+ }
+
+ @Override
+ @Deprecated
+ public Payload decode(final AppendEntries.ReplicatedLogEntry.Payload payload) {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return serialized.length;
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ out.writeByte((byte)serialVersionUID);
+ out.writeInt(serialized.length);
+ out.write(serialized);
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ final long version = in.readByte();
+ Preconditions.checkArgument(version == serialVersionUID, "Unsupported serialization version %s", version);
+
+ final int length = in.readInt();
+ serialized = new byte[length];
+ in.readFully(serialized);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Proxy actor which acts as a facade to the user-provided listener. Responsible for decapsulating
+ * DataTreeChanged messages and dispatching their context to the user.
+ */
+final class DataTreeChangeListenerActor extends AbstractUntypedActor {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerActor.class);
+ private final DOMDataTreeChangeListener listener;
+ private boolean notificationsEnabled = false;
+
+ private DataTreeChangeListenerActor(final DOMDataTreeChangeListener listener) {
+ this.listener = Preconditions.checkNotNull(listener);
+ }
+
+ @Override
+ protected void handleReceive(final Object message) {
+ if (message instanceof DataTreeChanged) {
+ dataChanged((DataTreeChanged)message);
+ } else if (message instanceof EnableNotification) {
+ enableNotification((EnableNotification) message);
+ }
+ }
+
+ private void dataChanged(final DataTreeChanged message) {
+ // Do nothing if notifications are not enabled
+ if (!notificationsEnabled) {
+ LOG.debug("Notifications not enabled for listener {} - dropping change notification", listener);
+ return;
+ }
+
+ LOG.debug("Sending change notification {} to listener {}", message.getChanges(), listener);
+
+ try {
+ this.listener.onDataTreeChanged(message.getChanges());
+ } catch (Exception e) {
+ LOG.error("Error notifying listener {}", this.listener, e);
+ }
+
+ // TODO: do we really need this?
+ // It seems the sender is never null but it doesn't hurt to check. If the caller passes in
+ // a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
+ if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
+ getSender().tell(DataTreeChangedReply.getInstance(), getSelf());
+ }
+ }
+
+ private void enableNotification(final EnableNotification message) {
+ notificationsEnabled = message.isEnabled();
+ LOG.debug("{} notifications for listener {}", (notificationsEnabled ? "Enabled" : "Disabled"),
+ listener);
+ }
+
+ public static Props props(final DOMDataTreeChangeListener listener) {
+ return Props.create(new DataTreeChangeListenerCreator(listener));
+ }
+
+ private static final class DataTreeChangeListenerCreator implements Creator<DataTreeChangeListenerActor> {
+ private static final long serialVersionUID = 1L;
+ private final DOMDataTreeChangeListener listener;
+
+ DataTreeChangeListenerCreator(final DOMDataTreeChangeListener listener) {
+ this.listener = Preconditions.checkNotNull(listener);
+ }
+
+ @Override
+ public DataTreeChangeListenerActor create() {
+ return new DataTreeChangeListenerActor(listener);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.PoisonPill;
+import akka.dispatch.OnComplete;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+/**
+ * Proxy class for holding required state to lazily instantiate a listener registration with an
+ * asynchronously-discovered actor.
+ *
+ * @param <T> listener type
+ */
+final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
+ private final ActorRef dataChangeListenerActor;
+ private final ActorContext actorContext;
+
+ @GuardedBy("this")
+ private ActorSelection listenerRegistrationActor;
+
+ public DataTreeChangeListenerProxy(final ActorContext actorContext, final T listener) {
+ super(listener);
+ this.actorContext = Preconditions.checkNotNull(actorContext);
+ this.dataChangeListenerActor = actorContext.getActorSystem().actorOf(
+ DataTreeChangeListenerActor.props(getInstance()).withDispatcher(actorContext.getNotificationDispatcherPath()));
+ }
+
+ @Override
+ protected synchronized void removeRegistration() {
+ if (listenerRegistrationActor != null) {
+ listenerRegistrationActor.tell(CloseDataTreeChangeListenerRegistration.getInstance(), ActorRef.noSender());
+ listenerRegistrationActor = null;
+ }
+
+ dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
+
+ void init(final String shardName, final YangInstanceIdentifier treeId) {
+ Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+ findFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(final Throwable failure, final ActorRef shard) {
+ if (failure instanceof LocalShardNotFoundException) {
+ LOG.debug("No local shard found for {} - DataTreeChangeListener {} at path {} " +
+ "cannot be registered", shardName, getInstance(), treeId);
+ } else if (failure != null) {
+ LOG.error("Failed to find local shard {} - DataTreeChangeListener {} at path {} " +
+ "cannot be registered: {}", shardName, getInstance(), treeId, failure);
+ } else {
+ doRegistration(shard, treeId);
+ }
+ }
+ }, actorContext.getClientDispatcher());
+ }
+
+ private void setListenerRegistrationActor(final ActorSelection actor) {
+ if (actor == null) {
+ LOG.debug("Ignoring null actor on {}", this);
+ return;
+ }
+
+ synchronized (this) {
+ if (!isClosed()) {
+ this.listenerRegistrationActor = actor;
+ return;
+ }
+ }
+
+ // This registration has already been closed, notify the actor
+ actor.tell(CloseDataTreeChangeListenerRegistration.getInstance(), null);
+ }
+
+ private void doRegistration(final ActorRef shard, final YangInstanceIdentifier path) {
+
+ Future<Object> future = actorContext.executeOperationAsync(shard,
+ new RegisterDataTreeChangeListener(path, dataChangeListenerActor),
+ actorContext.getDatastoreContext().getShardInitializationTimeout());
+
+ future.onComplete(new OnComplete<Object>(){
+ @Override
+ public void onComplete(final Throwable failure, final Object result) {
+ if (failure != null) {
+ LOG.error("Failed to register DataTreeChangeListener {} at path {}",
+ getInstance(), path.toString(), failure);
+ } else {
+ RegisterDataTreeChangeListenerReply reply = (RegisterDataTreeChangeListenerReply) result;
+ setListenerRegistrationActor(actorContext.actorSelection(
+ reply.getListenerRegistrationPath()));
+ }
+ }
+ }, actorContext.getClientDispatcher());
+ }
+
+ @VisibleForTesting
+ ActorSelection getListenerRegistrationActor() {
+ return listenerRegistrationActor;
+ }
+
+ @VisibleForTesting
+ ActorRef getDataChangeListenerActor() {
+ return dataChangeListenerActor;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * Actor co-located with a shard. It exists only to terminate the registration when
+ * asked to do so via {@link CloseDataTreeChangeListenerRegistration}.
+ */
+public final class DataTreeChangeListenerRegistrationActor extends AbstractUntypedActor {
+ private final ListenerRegistration<DOMDataTreeChangeListener> registration;
+
+ public DataTreeChangeListenerRegistrationActor(final ListenerRegistration<DOMDataTreeChangeListener> registration) {
+ this.registration = Preconditions.checkNotNull(registration);
+ }
+
+ @Override
+ protected void handleReceive(Object message) throws Exception {
+ if (message instanceof CloseDataTreeChangeListenerRegistration) {
+ registration.close();
+ getSender().tell(CloseDataTreeChangeListenerRegistrationReply.getInstance(), getSelf());
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
+ }
+
+ public static Props props(final ListenerRegistration<DOMDataTreeChangeListener> registration) {
+ return Props.create(new DataTreeChangeListenerRegistrationCreator(registration));
+ }
+
+ private static final class DataTreeChangeListenerRegistrationCreator implements Creator<DataTreeChangeListenerRegistrationActor> {
+ private static final long serialVersionUID = 1L;
+ final ListenerRegistration<DOMDataTreeChangeListener> registration;
+
+ DataTreeChangeListenerRegistrationCreator(ListenerRegistration<DOMDataTreeChangeListener> registration) {
+ this.registration = Preconditions.checkNotNull(registration);
+ }
+
+ @Override
+ public DataTreeChangeListenerRegistrationActor create() {
+ return new DataTreeChangeListenerRegistrationActor(registration);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map.Entry;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> {
+ private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
+ private final ArrayList<DelayedDataTreeListenerRegistration> delayedRegistrations = new ArrayList<>();
+ private final Collection<ActorSelection> actors = new ArrayList<>();
+
+ DataTreeChangeListenerSupport(final Shard shard) {
+ super(shard);
+ }
+
+ @Override
+ void onLeadershipChange(final boolean isLeader) {
+ if (isLeader) {
+ for (DelayedDataTreeListenerRegistration reg : delayedRegistrations) {
+ reg.createDelegate(this);
+ }
+ delayedRegistrations.clear();
+ delayedRegistrations.trimToSize();
+ }
+
+ final EnableNotification msg = new EnableNotification(isLeader);
+ for (ActorSelection dataChangeListener : actors) {
+ dataChangeListener.tell(msg, getSelf());
+ }
+ }
+
+ @Override
+ void onMessage(final RegisterDataTreeChangeListener registerTreeChangeListener, final boolean isLeader) {
+ LOG.debug("{}: registerTreeChangeListener for {}, leader: {}", persistenceId(), registerTreeChangeListener.getPath(), isLeader);
+
+ final ListenerRegistration<DOMDataTreeChangeListener> registration;
+ final DataTreeCandidate event;
+ if (!isLeader) {
+ LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
+
+ DelayedDataTreeListenerRegistration delayedReg =
+ new DelayedDataTreeListenerRegistration(registerTreeChangeListener);
+ delayedRegistrations.add(delayedReg);
+ registration = delayedReg;
+ event = null;
+ } else {
+ final Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> res = createDelegate(registerTreeChangeListener);
+ registration = res.getKey();
+ event = res.getValue();
+ }
+
+ ActorRef listenerRegistration = createActor(DataTreeChangeListenerRegistrationActor.props(registration));
+
+ LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+ persistenceId(), listenerRegistration.path());
+
+ tellSender(new RegisterDataTreeChangeListenerReply(listenerRegistration));
+ if (event != null) {
+ registration.getInstance().onDataTreeChanged(Collections.singletonList(event));
+ }
+ }
+
+ @Override
+ Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> createDelegate(final RegisterDataTreeChangeListener message) {
+ ActorSelection dataChangeListenerPath = selectActor(message.getDataTreeChangeListenerPath());
+
+ // Notify the listener if notifications should be enabled or not
+ // If this shard is the leader then it will enable notifications else
+ // it will not
+ dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
+
+ // Now store a reference to the data change listener so it can be notified
+ // at a later point if notifications should be enabled or disabled
+ actors.add(dataChangeListenerPath);
+
+ DOMDataTreeChangeListener listener = new ForwardingDataTreeChangeListener(dataChangeListenerPath);
+
+ LOG.debug("{}: Registering for path {}", persistenceId(), message.getPath());
+
+ return getShard().getDataStore().registerTreeChangeListener(message.getPath(), listener);
+ }
+}
package org.opendaylight.controller.cluster.datastore;
import akka.util.Timeout;
+import com.google.common.collect.Sets;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.text.WordUtils;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
* @author Thomas Pantelis
*/
public class DatastoreContext {
+ public static final String METRICS_DOMAIN = "org.opendaylight.controller.cluster.datastore";
public static final Duration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = Duration.create(10, TimeUnit.MINUTES);
public static final int DEFAULT_OPERATION_TIMEOUT_IN_SECONDS = 5;
public static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT= 100;
+ private static Set<String> globalDatastoreTypes = Sets.newConcurrentHashSet();
+
private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
private int operationTimeoutInSeconds = DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
+ private boolean writeOnlyTransactionOptimizationsEnabled = true;
+
+ public static Set<String> getGlobalDatastoreTypes() {
+ return globalDatastoreTypes;
+ }
private DatastoreContext() {
setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
this.dataStoreType = other.dataStoreType;
this.shardBatchedModificationCount = other.shardBatchedModificationCount;
+ this.writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
return shardBatchedModificationCount;
}
+ public boolean isWriteOnlyTransactionOptimizationsEnabled() {
+ return writeOnlyTransactionOptimizationsEnabled;
+ }
+
public static class Builder {
private final DatastoreContext datastoreContext;
private int maxShardDataChangeExecutorPoolSize =
return this;
}
+ public Builder writeOnlyTransactionOptimizationsEnabled(boolean value) {
+ datastoreContext.writeOnlyTransactionOptimizationsEnabled = value;
+ return this;
+ }
+
public Builder maxShardDataChangeExecutorPoolSize(int maxShardDataChangeExecutorPoolSize) {
this.maxShardDataChangeExecutorPoolSize = maxShardDataChangeExecutorPoolSize;
return this;
datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create(
maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
+
+ if(datastoreContext.dataStoreType != null) {
+ globalDatastoreTypes.add(datastoreContext.dataStoreType);
+ }
+
return datastoreContext;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import java.util.Collections;
+import java.util.Map.Entry;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Intermediate proxy registration returned to the user when we cannot
+ * instantiate the registration immediately. It provides a bridge to
+ * a real registration which may materialize at some point in the future.
+ */
+final class DelayedDataTreeListenerRegistration implements ListenerRegistration<DOMDataTreeChangeListener> {
+ private final RegisterDataTreeChangeListener registerTreeChangeListener;
+ private volatile ListenerRegistration<DOMDataTreeChangeListener> delegate;
+ @GuardedBy("this")
+ private boolean closed;
+
+ DelayedDataTreeListenerRegistration(final RegisterDataTreeChangeListener registerTreeChangeListener) {
+ this.registerTreeChangeListener = Preconditions.checkNotNull(registerTreeChangeListener);
+ }
+
+ synchronized void createDelegate(final DelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> factory) {
+ if (!closed) {
+ final Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> res = factory.createDelegate(registerTreeChangeListener);
+ this.delegate = res.getKey();
+ if (res.getValue() != null) {
+ delegate.getInstance().onDataTreeChanged(Collections.singletonList(res.getValue()));
+ }
+ }
+ }
+
+ @Override
+ public DOMDataTreeChangeListener getInstance() {
+ final ListenerRegistration<DOMDataTreeChangeListener> d = delegate;
+ return d == null ? null : d.getInstance();
+ }
+
+ @Override
+ public synchronized void close() {
+ if (!closed) {
+ closed = true;
+ if (delegate != null) {
+ delegate.close();
+ }
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+final class DelayedListenerRegistration implements
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
+
+ private volatile boolean closed;
+
+ private final RegisterChangeListener registerChangeListener;
+
+ private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> delegate;
+
+ DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
+ this.registerChangeListener = registerChangeListener;
+ }
+
+ void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration) {
+ this.delegate = registration;
+ }
+
+ boolean isClosed() {
+ return closed;
+ }
+
+ RegisterChangeListener getRegisterChangeListener() {
+ return registerChangeListener;
+ }
+
+ @Override
+ public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
+ return delegate != null ? delegate.getInstance() : null;
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ if(delegate != null) {
+ delegate.close();
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Map.Entry;
+
+/**
+ * Base class for factories instantiating delegates.
+ *
+ * <D> delegate type
+ * <M> message type
+ * <I> initial state type
+ */
+abstract class DelegateFactory<M, D, I> {
+ abstract Entry<D, I> createDelegate(M message);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Optional;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+
+/**
+ * A deserialized {@link DataTreeCandidateNode} which represents a deletion.
+ */
+abstract class DeletedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
+ private DeletedDataTreeCandidateNode() {
+ super(ModificationType.DELETE);
+ }
+
+ static DataTreeCandidateNode create() {
+ return new DeletedDataTreeCandidateNode() {
+ @Override
+ public PathArgument getIdentifier() {
+ throw new UnsupportedOperationException("Root node does not have an identifier");
+ }
+ };
+ }
+
+ static DataTreeCandidateNode create(final PathArgument identifier) {
+ return new DeletedDataTreeCandidateNode() {
+ @Override
+ public final PathArgument getIdentifier() {
+ return identifier;
+ }
+ };
+ }
+
+ @Override
+ public final Optional<NormalizedNode<?, ?>> getDataAfter() {
+ return Optional.absent();
+ }
+
+ @Override
+ public final Collection<DataTreeCandidateNode> getChildNodes() {
+ // We would require the before-image to reconstruct the list of nodes which
+ // were deleted.
+ throw new UnsupportedOperationException("Children not available after serialization");
+ }
+}
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*
*/
public class DistributedDataStore implements DOMStore, SchemaContextListener,
- DatastoreContextConfigAdminOverlay.Listener, AutoCloseable {
+ DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
private static final String UNKNOWN_TYPE = "unknown";
private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
- private CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
+ private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
+
+ private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
private final String type;
datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType());
datastoreConfigMXBean.setContext(datastoreContext);
datastoreConfigMXBean.registerMBean();
+
+ datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContext.getDataStoreMXBeanType(), actorContext);
+ datastoreInfoMXBean.registerMBean();
}
public DistributedDataStore(ActorContext actorContext) {
return listenerRegistrationProxy;
}
+ @Override
+ public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(YangInstanceIdentifier treeId, L listener) {
+ Preconditions.checkNotNull(treeId, "treeId should not be null");
+ Preconditions.checkNotNull(listener, "listener should not be null");
+
+ final String shardName = ShardStrategyFactory.getStrategy(treeId).findShard(treeId);
+ LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
+
+ final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
+ new DataTreeChangeListenerProxy<L>(actorContext, listener);
+ listenerRegistrationProxy.init(shardName, treeId);
+
+ return listenerRegistrationProxy;
+ }
+
@Override
public DOMStoreTransactionChain createTransactionChain() {
return new TransactionChainProxy(actorContext);
@Override
public void close() {
datastoreConfigMXBean.unregisterMBean();
+ datastoreInfoMXBean.unregisterMBean();
if(closeable != null) {
try {
LOG.info("Beginning to wait for data store to become ready : {}", type);
try {
- waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS);
-
- LOG.debug("Data store {} is now ready", type);
+ if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) {
+ LOG.debug("Data store {} is now ready", type);
+ } else {
+ LOG.error("Shared leaders failed to settle in {} seconds, giving up", TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis));
+ }
} catch (InterruptedException e) {
- LOG.error("Interrupted when trying to wait for shards to become leader in a reasonable amount of time - giving up");
+ LOG.error("Interrupted while waiting for shards to settle", e);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.Collection;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Internal implementation of a {@link DOMDataTreeChangeListener} which
+ * encapsulates received notifications into a {@link DataTreeChanged}
+ * message and forwards them towards the client's {@link DataTreeChangeListenerActor}.
+ */
+final class ForwardingDataTreeChangeListener implements DOMDataTreeChangeListener {
+ private final ActorSelection actor;
+
+ ForwardingDataTreeChangeListener(final ActorSelection actor) {
+ this.actor = Preconditions.checkNotNull(actor, "actor should not be null");
+ }
+
+ @Override
+ public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
+ actor.tell(new DataTreeChanged(changes), ActorRef.noSender());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorPath;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Props;
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for factories instantiating delegates which are local to the
+ * shard leader.
+ *
+ * <D> delegate type
+ * <M> message type
+ * <I> initial state type
+ */
+abstract class LeaderLocalDelegateFactory<M, D, I> extends DelegateFactory<M, D, I> {
+ private final Shard shard;
+
+ protected LeaderLocalDelegateFactory(final Shard shard) {
+ this.shard = Preconditions.checkNotNull(shard);
+ }
+
+ protected final ActorRef getSelf() {
+ return shard.getSelf();
+ }
+
+ protected final Shard getShard() {
+ return shard;
+ }
+
+ protected final String persistenceId() {
+ return shard.persistenceId();
+ }
+
+ protected final void tellSender(final Object message) {
+ shard.getSender().tell(message, getSelf());
+ }
+
+ protected final ActorRef createActor(final Props props) {
+ return shard.getContext().actorOf(props);
+ }
+
+ protected final ActorSelection selectActor(ActorRef ref) {
+ return shard.getContext().system().actorSelection(ref.path());
+ }
+
+ protected final ActorSelection selectActor(ActorPath path) {
+ return shard.getContext().system().actorSelection(path);
+ }
+
+ /**
+ * Invoked whenever the local shard's leadership role changes.
+ *
+ * @param isLeader true if the shard has become leader, false if it has
+ * become a follower.
+ */
+ abstract void onLeadershipChange(boolean isLeader);
+ abstract void onMessage(M message, boolean isLeader);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+
+/**
+ * A deserialized {@link DataTreeCandidateNode} which represents a modification in
+ * one of its children.
+ */
+abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
+ private final Collection<DataTreeCandidateNode> children;
+
+ private ModifiedDataTreeCandidateNode(final Collection<DataTreeCandidateNode> children) {
+ super(ModificationType.SUBTREE_MODIFIED);
+ this.children = Preconditions.checkNotNull(children);
+ }
+
+ static DataTreeCandidateNode create(final Collection<DataTreeCandidateNode> children) {
+ return new ModifiedDataTreeCandidateNode(children) {
+ @Override
+ public PathArgument getIdentifier() {
+ throw new UnsupportedOperationException("Root node does not have an identifier");
+ }
+ };
+ }
+
+ static DataTreeCandidateNode create(final PathArgument identifier, final Collection<DataTreeCandidateNode> children) {
+ return new ModifiedDataTreeCandidateNode(children) {
+ @Override
+ public final PathArgument getIdentifier() {
+ return identifier;
+ }
+ };
+ }
+
+ @Override
+ public final Optional<NormalizedNode<?, ?>> getDataAfter() {
+ throw new UnsupportedOperationException("After-image not available after serialization");
+ }
+
+ @Override
+ public final Collection<DataTreeCandidateNode> getChildNodes() {
+ return children;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
+import java.util.List;
+import scala.concurrent.Future;
+
+/**
+ * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
+ * instance given out for empty transactions.
+ */
+final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
+ static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
+
+ private NoOpDOMStoreThreePhaseCommitCohort() {
+ // Hidden to prevent instantiation
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return IMMEDIATE_BOOLEAN_SUCCESS;
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return IMMEDIATE_VOID_SUCCESS;
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return IMMEDIATE_VOID_SUCCESS;
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ return IMMEDIATE_VOID_SUCCESS;
+ }
+
+ @Override
+ List<Future<Object>> getCohortFutures() {
+ return Collections.emptyList();
+ }
+}
@Override
public void closeTransaction() {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+ LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
+ }
+
+ @Override
+ public boolean supportsDirectCommit() {
+ return true;
+ }
+
+ @Override
+ public Future<Object> directCommit() {
+ LOG.debug("Tx {} directCommit called, failure: {}", getIdentifier(), failure);
+ operationLimiter.release();
+ return akka.dispatch.Futures.failed(failure);
}
@Override
public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called", identifier);
+ LOG.debug("Tx {} readyTransaction called, failure: {}", getIdentifier(), failure);
operationLimiter.release();
return akka.dispatch.Futures.failed(failure);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ LOG.debug("Tx {} deleteData called path = {}", getIdentifier(), path);
operationLimiter.release();
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ LOG.debug("Tx {} mergeData called path = {}", getIdentifier(), path);
operationLimiter.release();
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ LOG.debug("Tx {} writeData called path = {}", getIdentifier(), path);
operationLimiter.release();
}
@Override
public void readData(final YangInstanceIdentifier path, SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
+ LOG.debug("Tx {} readData called path = {}", getIdentifier(), path);
operationLimiter.release();
proxyFuture.setException(new ReadFailedException("Error reading data for path " + path, failure));
}
@Override
public void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ LOG.debug("Tx {} dataExists called path = {}", getIdentifier(), path);
operationLimiter.release();
proxyFuture.setException(new ReadFailedException("Error checking exists for path " + path, failure));
}
package org.opendaylight.controller.cluster.datastore;
-public interface OperationCallback {
+import java.util.concurrent.atomic.AtomicReference;
+
+interface OperationCallback {
+ OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+ @Override
+ public void run() {
+ }
+
+ @Override
+ public void success() {
+ }
+
+ @Override
+ public void failure() {
+ }
+ };
+
+ class Reference extends AtomicReference<OperationCallback> {
+ private static final long serialVersionUID = 1L;
+
+ public Reference(OperationCallback initialValue) {
+ super(initialValue);
+ }
+ }
+
void run();
void success();
void failure();
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+
+final class ReadOnlyShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeSnapshot> {
+ ReadOnlyShardDataTreeTransaction(final String id, final DataTreeSnapshot snapshot) {
+ super(id, snapshot);
+ }
+
+ @Override
+ void abort() {
+ close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+
+final class ReadWriteShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeModification> {
+ private final ShardDataTreeTransactionParent parent;
+
+ protected ReadWriteShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final String id, final DataTreeModification modification) {
+ super(id, modification);
+ this.parent = Preconditions.checkNotNull(parent);
+ }
+
+ @Override
+ void abort() {
+ Preconditions.checkState(close(), "Transaction is already closed");
+
+ parent.abortTransaction(this);
+ }
+
+ ShardDataTreeCohort ready() {
+ Preconditions.checkState(close(), "Transaction is already closed");
+
+ return parent.finishTransaction(this);
+ }
+}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.io.IOException;
-import java.util.Collection;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
-import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
-import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.RaftActor;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
/**
* A Shard represents a portion of the logical data tree <br/>
* <p>
- * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
+ * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
* </p>
*/
public class Shard extends RaftActor {
- private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
-
private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
@VisibleForTesting
static final String DEFAULT_NAME = "default";
// The state of this Shard
- private final InMemoryDOMDataStore store;
+ private final ShardDataTree store;
/// The name of this shard
- private final ShardIdentifier name;
+ private final String name;
private final ShardStats shardMBean;
- private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
-
- private final List<DelayedListenerRegistration> delayedListenerRegistrations =
- Lists.newArrayList();
-
private DatastoreContext datastoreContext;
- private DataPersistenceProvider dataPersistenceProvider;
-
- private SchemaContext schemaContext;
-
- private int createSnapshotTransactionCounter;
-
private final ShardCommitCoordinator commitCoordinator;
private long transactionCommitTimeout;
private final MessageTracker appendEntriesReplyTracker;
- private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
- Serialization.serializedActorPath(getSelf()));
-
-
- /**
- * Coordinates persistence recovery on startup.
- */
- private ShardRecoveryCoordinator recoveryCoordinator;
- private List<Object> currentLogRecoveryBatch;
+ private final ShardTransactionActorFactory transactionActorFactory;
- private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
+ private final ShardSnapshotCohort snapshotCohort;
- private final String txnDispatcherPath;
+ private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
+ private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
- protected Shard(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
+ protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
- super(name.toString(), mapPeerAddresses(peerAddresses),
- Optional.of(datastoreContext.getShardRaftConfig()));
+ super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
- this.name = name;
+ this.name = name.toString();
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
- this.dataPersistenceProvider = (datastoreContext.isPersistent())
- ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
- this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
- .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
+ setPersistence(datastoreContext.isPersistent());
LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
- store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
- datastoreContext.getDataStoreProperties());
-
- if(schemaContext != null) {
- store.onGlobalContextUpdated(schemaContext);
- }
+ store = new ShardDataTree(schemaContext);
shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
datastoreContext.getDataStoreMXBeanType());
- shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
+ shardMBean.setShardActor(getSelf());
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
- commitCoordinator = new ShardCommitCoordinator(TimeUnit.SECONDS.convert(1, TimeUnit.MINUTES),
- datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, name.toString());
+ commitCoordinator = new ShardCommitCoordinator(store,
+ TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
+ datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
setTransactionCommitTimeout();
appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
+
+ transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
+ new Dispatchers(context().system().dispatchers()).getDispatcherPath(
+ Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
+
+ snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
}
private void setTransactionCommitTimeout() {
datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
}
- private static Map<String, String> mapPeerAddresses(
- final Map<ShardIdentifier, String> peerAddresses) {
- Map<String, String> map = new HashMap<>();
-
- for (Map.Entry<ShardIdentifier, String> entry : peerAddresses
- .entrySet()) {
- map.put(entry.getKey().toString(), entry.getValue());
- }
-
- return map;
- }
-
public static Props props(final ShardIdentifier name,
- final Map<ShardIdentifier, String> peerAddresses,
+ final Map<String, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
Preconditions.checkNotNull(name, "name should not be null");
Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
try {
if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
handleCreateTransaction(message);
+ } else if (BatchedModifications.class.isInstance(message)) {
+ handleBatchedModifications((BatchedModifications)message);
} else if (message instanceof ForwardedReadyTransaction) {
- handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
+ commitCoordinator.handleForwardedReadyTransaction((ForwardedReadyTransaction) message,
+ getSender(), this);
} else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
} else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
} else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
closeTransactionChain(CloseTransactionChain.fromSerializable(message));
} else if (message instanceof RegisterChangeListener) {
- registerChangeListener((RegisterChangeListener) message);
+ changeSupport.onMessage((RegisterChangeListener) message, isLeader());
+ } else if (message instanceof RegisterDataTreeChangeListener) {
+ treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
} else if (message instanceof UpdateSchemaContext) {
updateSchemaContext((UpdateSchemaContext) message);
} else if (message instanceof PeerAddressResolved) {
return roleChangeNotifier;
}
+ @Override
+ protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId) {
+ return new ShardLeaderStateChanged(memberId, leaderId,
+ isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent());
+ }
+
private void onDatastoreContext(DatastoreContext context) {
datastoreContext = context;
setTransactionCommitTimeout();
- if(datastoreContext.isPersistent() &&
- dataPersistenceProvider instanceof NonPersistentRaftDataProvider) {
- dataPersistenceProvider = new PersistentDataProvider();
- } else if(!datastoreContext.isPersistent() &&
- dataPersistenceProvider instanceof PersistentDataProvider) {
- dataPersistenceProvider = new NonPersistentRaftDataProvider();
+ if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
+ setPersistence(true);
+ } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
+ setPersistence(false);
}
updateConfigParams(datastoreContext.getShardRaftConfig());
}
}
- private void handleCommitTransaction(final CommitTransaction commit) {
- final String transactionID = commit.getTransactionID();
+ private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
+ return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
+ }
- LOG.debug("{}: Committing transaction {}", persistenceId(), transactionID);
+ void continueCommit(final CohortEntry cohortEntry) throws Exception {
+ final DataTreeCandidate candidate = cohortEntry.getCohort().getCandidate();
- // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
- // this transaction.
- final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
- if(cohortEntry == null) {
- // We're not the current Tx - the Tx was likely expired b/c it took too long in
- // between the canCommit and commit messages.
- IllegalStateException ex = new IllegalStateException(
- String.format("%s: Cannot commit transaction %s - it is not the current transaction",
- persistenceId(), transactionID));
- LOG.error(ex.getMessage());
+ // If we do not have any followers and we are not using persistence
+ // or if cohortEntry has no modifications
+ // we can apply modification to the state immediately
+ if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
+ applyModificationToState(getSender(), cohortEntry.getTransactionID(), candidate);
+ } else {
+ Shard.this.persistData(getSender(), cohortEntry.getTransactionID(),
+ DataTreeCandidatePayload.create(candidate));
+ }
+ }
+
+ private void handleCommitTransaction(final CommitTransaction commit) {
+ if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
shardMBean.incrementFailedTransactionsCount();
- getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
- return;
}
+ }
- // We perform the preCommit phase here atomically with the commit phase. This is an
- // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
- // coordination of preCommit across shards in case of failure but preCommit should not
- // normally fail since we ensure only one concurrent 3-phase commit.
+ private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
+ LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
try {
// We block on the future here so we don't have to worry about possibly accessing our
// state on a different thread outside of our dispatcher. Also, the data store
// currently uses a same thread executor anyway.
- cohortEntry.getCohort().preCommit().get();
+ cohortEntry.getCohort().commit().get();
+
+ sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
+
+ shardMBean.incrementCommittedTransactionCount();
+ shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
- // If we do not have any followers and we are not using persistence we can
- // apply modification to the state immediately
- if(!hasFollowers() && !persistence().isRecoveryApplicable()){
- applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
- } else {
- Shard.this.persistData(getSender(), transactionID,
- new ModificationPayload(cohortEntry.getModification()));
- }
} catch (Exception e) {
- LOG.error("{} An exception occurred while preCommitting transaction {}",
- persistenceId(), cohortEntry.getTransactionID(), e);
+ sender.tell(new akka.actor.Status.Failure(e), getSelf());
+
+ LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
+ transactionID, e);
shardMBean.incrementFailedTransactionsCount();
- getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ } finally {
+ commitCoordinator.currentTransactionComplete(transactionID, true);
}
-
- cohortEntry.updateLastAccessTime();
}
private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
// after the commit has been replicated to a majority of the followers.
CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
- if(cohortEntry == null) {
+ if (cohortEntry == null) {
// The transaction is no longer the current commit. This can happen if the transaction
// was aborted prior, most likely due to timeout in the front-end. We need to finish
// committing the transaction though since it was successfully persisted and replicated
// transaction.
cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
if(cohortEntry != null) {
- commitWithNewTransaction(cohortEntry.getModification());
+ try {
+ store.applyForeignCandidate(transactionID, cohortEntry.getCohort().getCandidate());
+ } catch (DataValidationFailedException e) {
+ shardMBean.incrementFailedTransactionsCount();
+ LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
+ }
+
sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
} else {
// This really shouldn't happen - it likely means that persistence or replication
LOG.error(ex.getMessage());
sender.tell(new akka.actor.Status.Failure(ex), getSelf());
}
-
- return;
- }
-
- LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
-
- try {
- // We block on the future here so we don't have to worry about possibly accessing our
- // state on a different thread outside of our dispatcher. Also, the data store
- // currently uses a same thread executor anyway.
- cohortEntry.getCohort().commit().get();
-
- sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
-
- shardMBean.incrementCommittedTransactionCount();
- shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
-
- } catch (Exception e) {
- sender.tell(new akka.actor.Status.Failure(e), getSelf());
-
- LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
- transactionID, e);
- shardMBean.incrementFailedTransactionsCount();
- } finally {
- commitCoordinator.currentTransactionComplete(transactionID, true);
+ } else {
+ finishCommit(sender, transactionID, cohortEntry);
}
}
private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
- commitCoordinator.handleCanCommit(canCommit, getSender(), self());
- }
-
- private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
- LOG.debug("{}: Readying transaction {}, client version {}", persistenceId(),
- ready.getTransactionID(), ready.getTxnClientVersion());
-
- // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
- // commitCoordinator in preparation for the subsequent three phase commit initiated by
- // the front-end.
- commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
- ready.getModification());
-
- // Return our actor path as we'll handle the three phase commit, except if the Tx client
- // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
- // node. In that case, the subsequent 3-phase commit messages won't contain the
- // transactionId so to maintain backwards compatibility, we create a separate cohort actor
- // to provide the compatible behavior.
- if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
- LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
- ActorRef replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
- ready.getTransactionID()));
-
- ReadyTransactionReply readyTransactionReply =
- new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath));
- getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
- readyTransactionReply, getSelf());
-
+ commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
+ }
+
+ private void handleBatchedModifications(BatchedModifications batched) {
+ // This message is sent to prepare the modificationsa transaction directly on the Shard as an
+ // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
+ // BatchedModifications message, the caller sets the ready flag in the message indicating
+ // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
+ // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
+ // ReadyTransaction message.
+
+ // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
+ // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
+ // the primary/leader shard. However with timing and caching on the front-end, there's a small
+ // window where it could have a stale leader during leadership transitions.
+ //
+ if(isLeader()) {
+ try {
+ commitCoordinator.handleBatchedModifications(batched, getSender(), this);
+ } catch (Exception e) {
+ LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
+ batched.getTransactionID(), e);
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
} else {
-
- getSender().tell(ready.isReturnSerialized() ? READY_TRANSACTION_REPLY.toSerializable() :
- READY_TRANSACTION_REPLY, getSelf());
+ ActorSelection leader = getLeader();
+ if(leader != null) {
+ // TODO: what if this is not the first batch and leadership changed in between batched messages?
+ // We could check if the commitCoordinator already has a cached entry and forward all the previous
+ // batched modifications.
+ LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
+ leader.forward(batched, getContext());
+ } else {
+ // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
+ // it more resilient in case we're in the process of electing a new leader.
+ getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
+ "Could not find the leader for shard %s. This typically happens" +
+ " when the system is coming up or recovering and a leader is being elected. Try again" +
+ " later.", persistenceId()))), getSelf());
+ }
}
}
}
private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
- DOMStoreTransactionChain chain =
- transactionChains.remove(closeTransactionChain.getTransactionChainId());
-
- if(chain != null) {
- chain.close();
- }
+ store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
}
private ActorRef createTypedTransactionActor(int transactionType,
ShardTransactionIdentifier transactionId, String transactionChainId,
short clientVersion ) {
- DOMStoreTransactionFactory factory = store;
-
- if(!transactionChainId.isEmpty()) {
- factory = transactionChains.get(transactionChainId);
- if(factory == null){
- DOMStoreTransactionChain transactionChain = store.createTransactionChain();
- transactionChains.put(transactionChainId, transactionChain);
- factory = transactionChain;
- }
- }
-
- if(this.schemaContext == null) {
- throw new IllegalStateException("SchemaContext is not set");
- }
-
- if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
-
- shardMBean.incrementWriteOnlyTransactionCount();
-
- return createShardTransaction(factory.newWriteOnlyTransaction(), transactionId, clientVersion);
-
- } else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
-
- shardMBean.incrementReadWriteTransactionCount();
-
- return createShardTransaction(factory.newReadWriteTransaction(), transactionId, clientVersion);
-
- } else if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
-
- shardMBean.incrementReadOnlyTransactionCount();
-
- return createShardTransaction(factory.newReadOnlyTransaction(), transactionId, clientVersion);
-
- } else {
- throw new IllegalArgumentException(
- "Shard="+name + ":CreateTransaction message has unidentified transaction type="
- + transactionType);
- }
- }
-
- private ActorRef createShardTransaction(DOMStoreTransaction transaction, ShardTransactionIdentifier transactionId,
- short clientVersion){
- return getContext().actorOf(
- ShardTransaction.props(transaction, getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion)
- .withDispatcher(txnDispatcherPath),
- transactionId.toString());
-
+ return transactionActorFactory.newShardTransaction(TransactionProxy.TransactionType.fromInt(transactionType),
+ transactionId, transactionChainId, clientVersion);
}
private void createTransaction(CreateTransaction createTransaction) {
return transactionActor;
}
- private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
- throws ExecutionException, InterruptedException {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- commitCohort.preCommit().get();
- commitCohort.commit().get();
- }
-
private void commitWithNewTransaction(final Modification modification) {
- DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
- modification.apply(tx);
+ ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
+ modification.apply(tx.getSnapshot());
try {
- syncCommitTransaction(tx);
+ snapshotCohort.syncCommitTransaction(tx);
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
- } catch (InterruptedException | ExecutionException e) {
+ } catch (Exception e) {
shardMBean.incrementFailedTransactionsCount();
LOG.error("{}: Failed to commit", persistenceId(), e);
}
}
private void updateSchemaContext(final UpdateSchemaContext message) {
- this.schemaContext = message.getSchemaContext();
updateSchemaContext(message.getSchemaContext());
- store.onGlobalContextUpdated(message.getSchemaContext());
}
@VisibleForTesting
void updateSchemaContext(final SchemaContext schemaContext) {
- store.onGlobalContextUpdated(schemaContext);
- }
-
- private void registerChangeListener(final RegisterChangeListener registerChangeListener) {
-
- LOG.debug("{}: registerDataChangeListener for {}", persistenceId(), registerChangeListener.getPath());
-
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> registration;
- if(isLeader()) {
- registration = doChangeListenerRegistration(registerChangeListener);
- } else {
- LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
-
- DelayedListenerRegistration delayedReg =
- new DelayedListenerRegistration(registerChangeListener);
- delayedListenerRegistrations.add(delayedReg);
- registration = delayedReg;
- }
-
- ActorRef listenerRegistration = getContext().actorOf(
- DataChangeListenerRegistration.props(registration));
-
- LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
- persistenceId(), listenerRegistration.path());
-
- getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()), getSelf());
+ store.updateSchemaContext(schemaContext);
}
- private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> doChangeListenerRegistration(
- final RegisterChangeListener registerChangeListener) {
-
- ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
- registerChangeListener.getDataChangeListenerPath());
-
- // Notify the listener if notifications should be enabled or not
- // If this shard is the leader then it will enable notifications else
- // it will not
- dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
-
- // Now store a reference to the data change listener so it can be notified
- // at a later point if notifications should be enabled or disabled
- dataChangeListeners.add(dataChangeListenerPath);
-
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
- new DataChangeListenerProxy(dataChangeListenerPath);
-
- LOG.debug("{}: Registering for path {}", persistenceId(), registerChangeListener.getPath());
-
- return store.registerChangeListener(registerChangeListener.getPath(), listener,
- registerChangeListener.getScope());
- }
-
- private boolean isMetricsCaptureEnabled(){
+ private boolean isMetricsCaptureEnabled() {
CommonConfig config = new CommonConfig(getContext().system().settings().config());
return config.isMetricCaptureEnabled();
}
@Override
- protected
- void startLogRecoveryBatch(final int maxBatchSize) {
- currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
- }
+ protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+ return snapshotCohort;
}
@Override
- protected void appendRecoveredLogEntry(final Payload data) {
- if(data instanceof ModificationPayload) {
- try {
- currentLogRecoveryBatch.add(((ModificationPayload) data).getModification());
- } catch (ClassNotFoundException | IOException e) {
- LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
- }
- } else if (data instanceof CompositeModificationPayload) {
- currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
- } else if (data instanceof CompositeModificationByteStringPayload) {
- currentLogRecoveryBatch.add(((CompositeModificationByteStringPayload) data).getModification());
- } else {
- LOG.error("{}: Unknown state received {} during recovery", persistenceId(), data);
- }
- }
-
- @Override
- protected void applyRecoverySnapshot(final byte[] snapshotBytes) {
- if(recoveryCoordinator == null) {
- recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext,
- LOG, name.toString());
- }
-
- recoveryCoordinator.submit(snapshotBytes, store.newWriteOnlyTransaction());
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: submitted recovery sbapshot", persistenceId());
- }
- }
-
- @Override
- protected void applyCurrentLogRecoveryBatch() {
- if(recoveryCoordinator == null) {
- recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext,
- LOG, name.toString());
- }
-
- recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: submitted log recovery batch with size {}", persistenceId(),
- currentLogRecoveryBatch.size());
- }
+ @Nonnull
+ protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+ return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
}
@Override
protected void onRecoveryComplete() {
- if(recoveryCoordinator != null) {
- Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: recovery complete - committing {} Tx's", persistenceId(), txList.size());
- }
-
- for(DOMStoreWriteTransaction tx: txList) {
- try {
- syncCommitTransaction(tx);
- shardMBean.incrementCommittedTransactionCount();
- } catch (InterruptedException | ExecutionException e) {
- shardMBean.incrementFailedTransactionsCount();
- LOG.error("{}: Failed to commit", persistenceId(), e);
- }
- }
- }
-
- recoveryCoordinator = null;
- currentLogRecoveryBatch = null;
- updateJournalStats();
-
//notify shard manager
getContext().parent().tell(new ActorInitialized(), getSelf());
@Override
protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
-
- if(data instanceof ModificationPayload) {
+ if (data instanceof DataTreeCandidatePayload) {
+ if (clientActor == null) {
+ // No clientActor indicates a replica coming from the leader
+ try {
+ store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
+ } catch (DataValidationFailedException | IOException e) {
+ LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
+ }
+ } else {
+ // Replication consensus reached, proceed to commit
+ finishCommit(clientActor, identifier);
+ }
+ } else if (data instanceof ModificationPayload) {
try {
applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
} catch (ClassNotFoundException | IOException e) {
LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
}
- }
- else if (data instanceof CompositeModificationPayload) {
+ } else if (data instanceof CompositeModificationPayload) {
Object modification = ((CompositeModificationPayload) data).getModification();
applyModificationToState(clientActor, identifier, modification);
persistenceId(), data, data.getClass().getClassLoader(),
CompositeModificationPayload.class.getClassLoader());
}
-
- updateJournalStats();
-
}
private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
}
}
- private void updateJournalStats() {
- ReplicatedLogEntry lastLogEntry = getLastLogEntry();
-
- if (lastLogEntry != null) {
- shardMBean.setLastLogIndex(lastLogEntry.getIndex());
- shardMBean.setLastLogTerm(lastLogEntry.getTerm());
- }
-
- shardMBean.setCommitIndex(getCommitIndex());
- shardMBean.setLastApplied(getLastApplied());
- shardMBean.setInMemoryJournalDataSize(getRaftActorContext().getReplicatedLog().dataSize());
- }
-
- @Override
- protected void createSnapshot() {
- // Create a transaction actor. We are really going to treat the transaction as a worker
- // so that this actor does not get block building the snapshot. THe transaction actor will
- // after processing the CreateSnapshot message.
-
- ActorRef createSnapshotTransaction = createTransaction(
- TransactionProxy.TransactionType.READ_ONLY.ordinal(),
- "createSnapshot" + ++createSnapshotTransactionCounter, "",
- DataStoreVersions.CURRENT_VERSION);
-
- createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, self());
- }
-
- @VisibleForTesting
- @Override
- protected void applySnapshot(final byte[] snapshotBytes) {
- // Since this will be done only on Recovery or when this actor is a Follower
- // we can safely commit everything in here. We not need to worry about event notifications
- // as they would have already been disabled on the follower
-
- LOG.info("{}: Applying snapshot", persistenceId());
- try {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
-
- NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
-
- // delete everything first
- transaction.delete(DATASTORE_ROOT);
-
- // Add everything from the remote node back
- transaction.write(DATASTORE_ROOT, node);
- syncCommitTransaction(transaction);
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
- } finally {
- LOG.info("{}: Done applying snapshot", persistenceId());
- }
- }
-
@Override
protected void onStateChanged() {
boolean isLeader = isLeader();
- for (ActorSelection dataChangeListener : dataChangeListeners) {
- dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
- }
-
- if(isLeader) {
- for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
- if(!reg.isClosed()) {
- reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
- }
- }
-
- delayedListenerRegistrations.clear();
- }
-
- shardMBean.setRaftState(getRaftState().name());
- shardMBean.setCurrentTerm(getCurrentTerm());
+ changeSupport.onLeadershipChange(isLeader);
+ treeChangeSupport.onLeadershipChange(isLeader);
// If this actor is no longer the leader close all the transaction chains
- if(!isLeader){
- for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "{}: onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
- persistenceId(), entry.getKey(), getId());
- }
- entry.getValue().close();
+ if (!isLeader) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
+ persistenceId(), getId());
}
- transactionChains.clear();
+ store.closeAllTransactionChains();
}
}
@Override
- protected DataPersistenceProvider persistence() {
- return dataPersistenceProvider;
+ protected void onLeaderChanged(String oldLeader, String newLeader) {
+ shardMBean.incrementLeadershipChangeCount();
}
- @Override protected void onLeaderChanged(final String oldLeader, final String newLeader) {
- shardMBean.setLeader(newLeader);
- }
-
- @Override public String persistenceId() {
- return this.name.toString();
+ @Override
+ public String persistenceId() {
+ return this.name;
}
@VisibleForTesting
- DataPersistenceProvider getDataPersistenceProvider() {
- return dataPersistenceProvider;
+ ShardCommitCoordinator getCommitCoordinator() {
+ return commitCoordinator;
}
+
private static class ShardCreator implements Creator<Shard> {
private static final long serialVersionUID = 1L;
final ShardIdentifier name;
- final Map<ShardIdentifier, String> peerAddresses;
+ final Map<String, String> peerAddresses;
final DatastoreContext datastoreContext;
final SchemaContext schemaContext;
- ShardCreator(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
+ ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
this.name = name;
this.peerAddresses = peerAddresses;
}
@VisibleForTesting
- public InMemoryDOMDataStore getDataStore() {
+ public ShardDataTree getDataStore() {
return store;
}
ShardStats getShardMBean() {
return shardMBean;
}
-
- private static class DelayedListenerRegistration implements
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
-
- private volatile boolean closed;
-
- private final RegisterChangeListener registerChangeListener;
-
- private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> delegate;
-
- DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
- this.registerChangeListener = registerChangeListener;
- }
-
- void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
- NormalizedNode<?, ?>>> registration) {
- this.delegate = registration;
- }
-
- boolean isClosed() {
- return closed;
- }
-
- RegisterChangeListener getRegisterChangeListener() {
- return registerChangeListener;
- }
-
- @Override
- public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
- return delegate != null ? delegate.getInstance() : null;
- }
-
- @Override
- public void close() {
- closed = true;
- if(delegate != null) {
- delegate.close();
- }
- }
- }
}
import akka.actor.ActorRef;
import akka.actor.Status;
+import akka.serialization.Serialization;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalCause;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
import java.util.LinkedList;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
/**
*/
public class ShardCommitCoordinator {
+ // Interface hook for unit tests to replace or decorate the DOMStoreThreePhaseCommitCohorts.
+ public interface CohortDecorator {
+ ShardDataTreeCohort decorate(String transactionID, ShardDataTreeCohort actual);
+ }
+
private final Cache<String, CohortEntry> cohortCache;
private CohortEntry currentCohortEntry;
+ private final ShardDataTree dataTree;
+
private final Queue<CohortEntry> queuedCohortEntries;
private int queueCapacity;
private final String name;
- public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, Logger log,
- String name) {
- cohortCache = CacheBuilder.newBuilder().expireAfterAccess(
- cacheExpiryTimeoutInSec, TimeUnit.SECONDS).build();
+ private final RemovalListener<String, CohortEntry> cacheRemovalListener =
+ new RemovalListener<String, CohortEntry>() {
+ @Override
+ public void onRemoval(RemovalNotification<String, CohortEntry> notification) {
+ if(notification.getCause() == RemovalCause.EXPIRED) {
+ log.warn("{}: Transaction {} was timed out of the cache", name, notification.getKey());
+ }
+ }
+ };
+
+ // This is a hook for unit tests to replace or decorate the DOMStoreThreePhaseCommitCohorts.
+ private CohortDecorator cohortDecorator;
+
+ private ReadyTransactionReply readyTransactionReply;
+
+ public ShardCommitCoordinator(ShardDataTree dataTree,
+ long cacheExpiryTimeoutInSec, int queueCapacity, ActorRef shardActor, Logger log, String name) {
this.queueCapacity = queueCapacity;
this.log = log;
this.name = name;
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+
+ cohortCache = CacheBuilder.newBuilder().expireAfterAccess(cacheExpiryTimeoutInSec, TimeUnit.SECONDS).
+ removalListener(cacheRemovalListener).build();
// We use a LinkedList here to avoid synchronization overhead with concurrent queue impls
// since this should only be accessed on the shard's dispatcher.
this.queueCapacity = queueCapacity;
}
+ private ReadyTransactionReply readyTransactionReply(Shard shard) {
+ if(readyTransactionReply == null) {
+ readyTransactionReply = new ReadyTransactionReply(Serialization.serializedActorPath(shard.self()));
+ }
+
+ return readyTransactionReply;
+ }
+
/**
- * This method caches a cohort entry for the given transactions ID in preparation for the
- * subsequent 3-phase commit.
- *
- * @param transactionID the ID of the transaction
- * @param cohort the cohort to participate in the transaction commit
- * @param modification the modification made by the transaction
+ * This method is called to ready a transaction that was prepared by ShardTransaction actor. It caches
+ * the prepared cohort entry for the given transactions ID in preparation for the subsequent 3-phase commit.
*/
- public void transactionReady(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
- Modification modification) {
+ public void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard) {
+ log.debug("{}: Readying transaction {}, client version {}", name,
+ ready.getTransactionID(), ready.getTxnClientVersion());
+
+ CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), ready.getCohort(),
+ (MutableCompositeModification) ready.getModification());
+ cohortCache.put(ready.getTransactionID(), cohortEntry);
+
+ if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
+ // Return our actor path as we'll handle the three phase commit except if the Tx client
+ // version < Helium-1 version which means the Tx was initiated by a base Helium version node.
+ // In that case, the subsequent 3-phase commit messages won't contain the transactionId so to
+ // maintain backwards compatibility, we create a separate cohort actor to provide the compatible behavior.
+ ActorRef replyActorPath = shard.self();
+ if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
+ log.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", name);
+ replyActorPath = shard.getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
+ ready.getTransactionID()));
+ }
- cohortCache.put(transactionID, new CohortEntry(transactionID, cohort, modification));
+ ReadyTransactionReply readyTransactionReply =
+ new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
+ ready.getTxnClientVersion());
+ sender.tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
+ readyTransactionReply, shard.self());
+ } else {
+ if(ready.isDoImmediateCommit()) {
+ cohortEntry.setDoImmediateCommit(true);
+ cohortEntry.setReplySender(sender);
+ cohortEntry.setShard(shard);
+ handleCanCommit(cohortEntry);
+ } else {
+ // The caller does not want immediate commit - the 3-phase commit will be coordinated by the
+ // front-end so send back a ReadyTransactionReply with our actor path.
+ sender.tell(readyTransactionReply(shard), shard.self());
+ }
+ }
}
/**
- * This method handles the canCommit phase for a transaction.
+ * This method handles a BatchedModifications message for a transaction being prepared directly on the
+ * Shard actor instead of via a ShardTransaction actor. If there's no currently cached
+ * DOMStoreWriteTransaction, one is created. The batched modifications are applied to the write Tx. If
+ * the BatchedModifications is ready to commit then a DOMStoreThreePhaseCommitCohort is created.
*
- * @param canCommit the CanCommitTransaction message
- * @param sender the actor that sent the message
- * @param shard the transaction's shard actor
+ * @param batched the BatchedModifications
+ * @param shardActor the transaction's shard actor
+ *
+ * @throws ExecutionException if an error occurs loading the cache
*/
- public void handleCanCommit(CanCommitTransaction canCommit, final ActorRef sender,
- final ActorRef shard) {
- String transactionID = canCommit.getTransactionID();
+ boolean handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard)
+ throws ExecutionException {
+ CohortEntry cohortEntry = cohortCache.getIfPresent(batched.getTransactionID());
+ if(cohortEntry == null) {
+ cohortEntry = new CohortEntry(batched.getTransactionID(),
+ dataTree.newReadWriteTransaction(batched.getTransactionID(),
+ batched.getTransactionChainID()));
+ cohortCache.put(batched.getTransactionID(), cohortEntry);
+ }
+
if(log.isDebugEnabled()) {
- log.debug("{}: Processing canCommit for transaction {} for shard {}",
- name, transactionID, shard.path());
+ log.debug("{}: Applying {} batched modifications for Tx {}", name,
+ batched.getModifications().size(), batched.getTransactionID());
}
- // Lookup the cohort entry that was cached previously (or should have been) by
- // transactionReady (via the ForwardedReadyTransaction message).
- final CohortEntry cohortEntry = cohortCache.getIfPresent(transactionID);
- if(cohortEntry == null) {
- // Either canCommit was invoked before ready(shouldn't happen) or a long time passed
- // between canCommit and ready and the entry was expired from the cache.
- IllegalStateException ex = new IllegalStateException(
- String.format("%s: No cohort entry found for transaction %s", name, transactionID));
- log.error(ex.getMessage());
- sender.tell(new Status.Failure(ex), shard);
- return;
+ cohortEntry.applyModifications(batched.getModifications());
+
+ if(batched.isReady()) {
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Readying Tx {}, client version {}", name,
+ batched.getTransactionID(), batched.getVersion());
+ }
+
+ cohortEntry.ready(cohortDecorator, batched.isDoCommitOnReady());
+
+ if(batched.isDoCommitOnReady()) {
+ cohortEntry.setReplySender(sender);
+ cohortEntry.setShard(shard);
+ handleCanCommit(cohortEntry);
+ } else {
+ sender.tell(readyTransactionReply(shard), shard.self());
+ }
+ } else {
+ sender.tell(new BatchedModificationsReply(batched.getModifications().size()), shard.self());
}
- cohortEntry.setCanCommitSender(sender);
- cohortEntry.setShard(shard);
+ return batched.isReady();
+ }
+
+ private void handleCanCommit(CohortEntry cohortEntry) {
+ String transactionID = cohortEntry.getTransactionID();
+
+ if(log.isDebugEnabled()) {
+ log.debug("{}: Processing canCommit for transaction {} for shard {}",
+ name, transactionID, cohortEntry.getShard().self().path());
+ }
if(currentCohortEntry != null) {
// There's already a Tx commit in progress - attempt to queue this entry to be
" capacity %d has been reached.",
name, transactionID, queueCapacity));
log.error(ex.getMessage());
- sender.tell(new Status.Failure(ex), shard);
+ cohortEntry.getReplySender().tell(new Status.Failure(ex), cohortEntry.getShard().self());
}
} else {
// No Tx commit currently in progress - make this the current entry and proceed with
}
}
+ /**
+ * This method handles the canCommit phase for a transaction.
+ *
+ * @param canCommit the CanCommitTransaction message
+ * @param sender the actor that sent the message
+ * @param shard the transaction's shard actor
+ */
+ public void handleCanCommit(String transactionID, final ActorRef sender, final Shard shard) {
+ // Lookup the cohort entry that was cached previously (or should have been) by
+ // transactionReady (via the ForwardedReadyTransaction message).
+ final CohortEntry cohortEntry = cohortCache.getIfPresent(transactionID);
+ if(cohortEntry == null) {
+ // Either canCommit was invoked before ready(shouldn't happen) or a long time passed
+ // between canCommit and ready and the entry was expired from the cache.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("%s: No cohort entry found for transaction %s", name, transactionID));
+ log.error(ex.getMessage());
+ sender.tell(new Status.Failure(ex), shard.self());
+ return;
+ }
+
+ cohortEntry.setReplySender(sender);
+ cohortEntry.setShard(shard);
+
+ handleCanCommit(cohortEntry);
+ }
+
private void doCanCommit(final CohortEntry cohortEntry) {
+ boolean canCommit = false;
try {
// We block on the future here so we don't have to worry about possibly accessing our
// state on a different thread outside of our dispatcher. Also, the data store
// currently uses a same thread executor anyway.
- Boolean canCommit = cohortEntry.getCohort().canCommit().get();
+ canCommit = cohortEntry.getCohort().canCommit().get();
+
+ if(cohortEntry.isDoImmediateCommit()) {
+ if(canCommit) {
+ doCommit(cohortEntry);
+ } else {
+ cohortEntry.getReplySender().tell(new Status.Failure(new TransactionCommitFailedException(
+ "Can Commit failed, no detailed cause available.")), cohortEntry.getShard().self());
+ }
+ } else {
+ cohortEntry.getReplySender().tell(
+ canCommit ? CanCommitTransactionReply.YES.toSerializable() :
+ CanCommitTransactionReply.NO.toSerializable(), cohortEntry.getShard().self());
+ }
+ } catch (Exception e) {
+ log.debug("{}: An exception occurred during canCommit: {}", name, e);
- cohortEntry.getCanCommitSender().tell(
- canCommit ? CanCommitTransactionReply.YES.toSerializable() :
- CanCommitTransactionReply.NO.toSerializable(), cohortEntry.getShard());
+ Throwable failure = e;
+ if(e instanceof ExecutionException) {
+ failure = e.getCause();
+ }
+ cohortEntry.getReplySender().tell(new Status.Failure(failure), cohortEntry.getShard().self());
+ } finally {
if(!canCommit) {
- // Remove the entry from the cache now since the Tx will be aborted.
- removeCohortEntry(cohortEntry.getTransactionID());
+ // Remove the entry from the cache now.
+ currentTransactionComplete(cohortEntry.getTransactionID(), true);
}
- } catch (InterruptedException | ExecutionException e) {
- log.debug("{}: An exception occurred during canCommit: {}", name, e);
+ }
+ }
+
+ private boolean doCommit(CohortEntry cohortEntry) {
+ log.debug("{}: Committing transaction {}", name, cohortEntry.getTransactionID());
+
+ boolean success = false;
+
+ // We perform the preCommit phase here atomically with the commit phase. This is an
+ // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
+ // coordination of preCommit across shards in case of failure but preCommit should not
+ // normally fail since we ensure only one concurrent 3-phase commit.
+
+ try {
+ // We block on the future here so we don't have to worry about possibly accessing our
+ // state on a different thread outside of our dispatcher. Also, the data store
+ // currently uses a same thread executor anyway.
+ cohortEntry.getCohort().preCommit().get();
- // Remove the entry from the cache now since the Tx will be aborted.
- removeCohortEntry(cohortEntry.getTransactionID());
- cohortEntry.getCanCommitSender().tell(new Status.Failure(e), cohortEntry.getShard());
+ cohortEntry.getShard().continueCommit(cohortEntry);
+
+ cohortEntry.updateLastAccessTime();
+
+ success = true;
+ } catch (Exception e) {
+ log.error("{} An exception occurred while preCommitting transaction {}",
+ name, cohortEntry.getTransactionID(), e);
+ cohortEntry.getReplySender().tell(new akka.actor.Status.Failure(e), cohortEntry.getShard().self());
+
+ currentTransactionComplete(cohortEntry.getTransactionID(), true);
}
+
+ return success;
+ }
+
+ boolean handleCommit(final String transactionID, final ActorRef sender, final Shard shard) {
+ // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
+ // this transaction.
+ final CohortEntry cohortEntry = getCohortEntryIfCurrent(transactionID);
+ if(cohortEntry == null) {
+ // We're not the current Tx - the Tx was likely expired b/c it took too long in
+ // between the canCommit and commit messages.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("%s: Cannot commit transaction %s - it is not the current transaction",
+ name, transactionID));
+ log.error(ex.getMessage());
+ sender.tell(new akka.actor.Status.Failure(ex), shard.self());
+ return false;
+ }
+
+ return doCommit(cohortEntry);
}
/**
}
}
+ @VisibleForTesting
+ void setCohortDecorator(CohortDecorator cohortDecorator) {
+ this.cohortDecorator = cohortDecorator;
+ }
+
+
static class CohortEntry {
private final String transactionID;
- private final DOMStoreThreePhaseCommitCohort cohort;
- private final Modification modification;
- private ActorRef canCommitSender;
- private ActorRef shard;
+ private ShardDataTreeCohort cohort;
+ private final ReadWriteShardDataTreeTransaction transaction;
+ private ActorRef replySender;
+ private Shard shard;
private long lastAccessTime;
+ private boolean doImmediateCommit;
- CohortEntry(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
- Modification modification) {
+ CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ this.transactionID = transactionID;
+ }
+
+ CohortEntry(String transactionID, ShardDataTreeCohort cohort,
+ MutableCompositeModification compositeModification) {
this.transactionID = transactionID;
this.cohort = cohort;
- this.modification = modification;
+ this.transaction = null;
}
void updateLastAccessTime() {
return transactionID;
}
- DOMStoreThreePhaseCommitCohort getCohort() {
+ ShardDataTreeCohort getCohort() {
return cohort;
}
- Modification getModification() {
- return modification;
+ void applyModifications(Iterable<Modification> modifications) {
+ for (Modification modification : modifications) {
+ modification.apply(transaction.getSnapshot());
+ }
+ }
+
+ void ready(CohortDecorator cohortDecorator, boolean doImmediateCommit) {
+ Preconditions.checkState(cohort == null, "cohort was already set");
+
+ setDoImmediateCommit(doImmediateCommit);
+
+ cohort = transaction.ready();
+
+ if(cohortDecorator != null) {
+ // Call the hook for unit tests.
+ cohort = cohortDecorator.decorate(transactionID, cohort);
+ }
+ }
+
+ boolean isDoImmediateCommit() {
+ return doImmediateCommit;
+ }
+
+ void setDoImmediateCommit(boolean doImmediateCommit) {
+ this.doImmediateCommit = doImmediateCommit;
}
- ActorRef getCanCommitSender() {
- return canCommitSender;
+ ActorRef getReplySender() {
+ return replySender;
}
- void setCanCommitSender(ActorRef canCommitSender) {
- this.canCommitSender = canCommitSender;
+ void setReplySender(ActorRef replySender) {
+ this.replySender = replySender;
}
- ActorRef getShard() {
+ Shard getShard() {
return shard;
}
- void setShard(ActorRef shard) {
+ void setShard(Shard shard) {
this.shard = shard;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent;
+import org.opendaylight.controller.md.sal.dom.store.impl.ResolveDataChangeEventsTask;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTree;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Internal shard state, similar to a DOMStore, but optimized for use in the actor system,
+ * e.g. it does not expose public interfaces and assumes it is only ever called from a
+ * single thread.
+ *
+ * This class is not part of the API contract and is subject to change at any time.
+ */
+@NotThreadSafe
+@VisibleForTesting
+public final class ShardDataTree extends ShardDataTreeTransactionParent {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
+ private static final ShardDataTreeNotificationManager MANAGER = new ShardDataTreeNotificationManager();
+ private final Map<String, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
+ private final ShardDataTreeChangePublisher treeChangePublisher = new ShardDataTreeChangePublisher();
+ private final ListenerTree listenerTree = ListenerTree.create();
+ private final TipProducingDataTree dataTree;
+
+ ShardDataTree(final SchemaContext schemaContext) {
+ dataTree = InMemoryDataTreeFactory.getInstance().create();
+ if (schemaContext != null) {
+ dataTree.setSchemaContext(schemaContext);
+ }
+ }
+
+ TipProducingDataTree getDataTree() {
+ return dataTree;
+ }
+
+ void updateSchemaContext(final SchemaContext schemaContext) {
+ dataTree.setSchemaContext(schemaContext);
+ }
+
+ private ShardDataTreeTransactionChain ensureTransactionChain(final String chainId) {
+ ShardDataTreeTransactionChain chain = transactionChains.get(chainId);
+ if (chain == null) {
+ chain = new ShardDataTreeTransactionChain(chainId, this);
+ transactionChains.put(chainId, chain);
+ }
+
+ return chain;
+ }
+
+ ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final String txId, final String chainId) {
+ if (Strings.isNullOrEmpty(chainId)) {
+ return new ReadOnlyShardDataTreeTransaction(txId, dataTree.takeSnapshot());
+ }
+
+ return ensureTransactionChain(chainId).newReadOnlyTransaction(txId);
+ }
+
+ ReadWriteShardDataTreeTransaction newReadWriteTransaction(final String txId, final String chainId) {
+ if (Strings.isNullOrEmpty(chainId)) {
+ return new ReadWriteShardDataTreeTransaction(this, txId, dataTree.takeSnapshot().newModification());
+ }
+
+ return ensureTransactionChain(chainId).newReadWriteTransaction(txId);
+ }
+
+ void notifyListeners(final DataTreeCandidate candidate) {
+ LOG.debug("Notifying listeners on candidate {}", candidate);
+
+ // DataTreeChanges first, as they are more light-weight
+ treeChangePublisher.publishChanges(candidate);
+
+ // DataChanges second, as they are heavier
+ ResolveDataChangeEventsTask.create(candidate, listenerTree).resolve(MANAGER);
+ }
+
+ void closeAllTransactionChains() {
+ for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
+ chain.close();
+ }
+
+ transactionChains.clear();
+ }
+
+ void closeTransactionChain(final String transactionChainId) {
+ final ShardDataTreeTransactionChain chain = transactionChains.remove(transactionChainId);
+ if (chain != null) {
+ chain.close();
+ } else {
+ LOG.debug("Closing non-existent transaction chain {}", transactionChainId);
+ }
+ }
+
+ Entry<ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>, DOMImmutableDataChangeEvent> registerChangeListener(
+ final YangInstanceIdentifier path,
+ final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener, final DataChangeScope scope) {
+ final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> reg =
+ listenerTree.registerDataChangeListener(path, listener, scope);
+
+ final Optional<NormalizedNode<?, ?>> currentState = dataTree.takeSnapshot().readNode(path);
+ final DOMImmutableDataChangeEvent event;
+ if (currentState.isPresent()) {
+ final NormalizedNode<?, ?> data = currentState.get();
+ event = DOMImmutableDataChangeEvent.builder(DataChangeScope.BASE).setAfter(data).addCreated(path, data).build();
+ } else {
+ event = null;
+ }
+
+ return new SimpleEntry<>(reg, event);
+ }
+
+ Entry<ListenerRegistration<DOMDataTreeChangeListener>, DataTreeCandidate> registerTreeChangeListener(final YangInstanceIdentifier path,
+ final DOMDataTreeChangeListener listener) {
+ final ListenerRegistration<DOMDataTreeChangeListener> reg = treeChangePublisher.registerTreeChangeListener(path, listener);
+
+ final Optional<NormalizedNode<?, ?>> currentState = dataTree.takeSnapshot().readNode(path);
+ final DataTreeCandidate event;
+ if (currentState.isPresent()) {
+ event = DataTreeCandidates.fromNormalizedNode(path, currentState.get());
+ } else {
+ event = null;
+ }
+ return new SimpleEntry<>(reg, event);
+ }
+
+ void applyForeignCandidate(final String identifier, final DataTreeCandidate foreign) throws DataValidationFailedException {
+ LOG.debug("Applying foreign transaction {}", identifier);
+
+ final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+ DataTreeCandidates.applyToModification(mod, foreign);
+ mod.ready();
+
+ LOG.trace("Applying foreign modification {}", mod);
+ dataTree.validate(mod);
+ final DataTreeCandidate candidate = dataTree.prepare(mod);
+ dataTree.commit(candidate);
+ notifyListeners(candidate);
+ }
+
+ @Override
+ void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction) {
+ // Intentional no-op
+ }
+
+ @Override
+ ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction) {
+ final DataTreeModification snapshot = transaction.getSnapshot();
+ snapshot.ready();
+ return new SimpleShardDataTreeCohort(this, snapshot);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Collection;
+import java.util.Collections;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTreeChangePublisher;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.spi.DefaultDataTreeCandidate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@NotThreadSafe
+final class ShardDataTreeChangePublisher extends AbstractDOMStoreTreeChangePublisher {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTreeChangePublisher.class);
+
+ void publishChanges(final DataTreeCandidate candidate) {
+ processCandidateTree(candidate);
+ }
+
+ @Override
+ protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations,
+ final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
+ final Collection<DataTreeCandidate> changes = Collections.<DataTreeCandidate>singleton(new DefaultDataTreeCandidate(path, node));
+
+ for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
+ reg.getInstance().onDataTreeChanged(changes);
+ }
+ }
+
+ @Override
+ protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
+ LOG.debug("Registration {} removed", registration);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+
+public abstract class ShardDataTreeCohort {
+ ShardDataTreeCohort() {
+ // Prevent foreign instantiation
+ }
+
+ abstract DataTreeCandidateTip getCandidate();
+
+ @VisibleForTesting
+ public abstract ListenableFuture<Boolean> canCommit();
+ @VisibleForTesting
+ public abstract ListenableFuture<Void> preCommit();
+ @VisibleForTesting
+ public abstract ListenableFuture<Void> abort();
+ @VisibleForTesting
+ public abstract ListenableFuture<Void> commit();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent;
+import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ShardDataTreeNotificationManager implements NotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTreeNotificationManager.class);
+
+ @Override
+ public void submitNotification(final DataChangeListenerRegistration<?> listener, final DOMImmutableDataChangeEvent notification) {
+ LOG.debug("Notifying listener {} about {}", listener.getInstance(), notification);
+
+ listener.getInstance().onDataChanged(notification);
+ }
+
+ @Override
+ public void submitNotifications(final DataChangeListenerRegistration<?> listener, final Iterable<DOMImmutableDataChangeEvent> notifications) {
+ final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> instance = listener.getInstance();
+ LOG.debug("Notifying listener {} about {}", instance, notifications);
+
+ for (DOMImmutableDataChangeEvent n : notifications) {
+ instance.onDataChanged(n);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A transaction chain attached to a Shard.
+ */
+@NotThreadSafe
+final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardDataTreeTransactionChain.class);
+ private final ShardDataTree dataTree;
+ private final String chainId;
+
+ private ReadWriteShardDataTreeTransaction previousTx;
+ private ReadWriteShardDataTreeTransaction openTransaction;
+ private boolean closed;
+
+ ShardDataTreeTransactionChain(final String chainId, final ShardDataTree dataTree) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+ this.chainId = Preconditions.checkNotNull(chainId);
+ }
+
+ private DataTreeSnapshot getSnapshot() {
+ Preconditions.checkState(!closed, "TransactionChain %s has been closed", this);
+ Preconditions.checkState(openTransaction == null, "Transaction %s is open", openTransaction);
+
+ if (previousTx == null) {
+ return dataTree.getDataTree().takeSnapshot();
+ } else {
+ return previousTx.getSnapshot();
+ }
+ }
+
+ ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final String txId) {
+ final DataTreeSnapshot snapshot = getSnapshot();
+ LOG.debug("Allocated read-only transaction {} snapshot {}", txId, snapshot);
+
+ return new ReadOnlyShardDataTreeTransaction(txId, snapshot);
+ }
+
+ ReadWriteShardDataTreeTransaction newReadWriteTransaction(final String txId) {
+ final DataTreeSnapshot snapshot = getSnapshot();
+ LOG.debug("Allocated read-write transaction {} snapshot {}", txId, snapshot);
+
+ openTransaction = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
+ return openTransaction;
+ }
+
+ void close() {
+ closed = true;
+ }
+
+ @Override
+ protected void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction) {
+ if (transaction instanceof ReadWriteShardDataTreeTransaction) {
+ Preconditions.checkState(openTransaction != null, "Attempted to abort transaction %s while none is outstanding", transaction);
+ LOG.debug("Aborted transaction {}", transaction);
+ openTransaction = null;
+ }
+ }
+
+ @Override
+ protected ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction) {
+ Preconditions.checkState(openTransaction != null, "Attempted to finish transaction %s while none is outstanding", transaction);
+
+ // dataTree is finalizing ready the transaction, we just record it for the next
+ // transaction in chain
+ final ShardDataTreeCohort delegate = dataTree.finishTransaction(transaction);
+ openTransaction = null;
+ previousTx = transaction;
+ LOG.debug("Committing transaction {}", transaction);
+
+ return new ChainedCommitCohort(this, transaction, delegate);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("id", chainId).toString();
+ }
+
+ void clearTransaction(ReadWriteShardDataTreeTransaction transaction) {
+ if (transaction.equals(previousTx)) {
+ previousTx = null;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+abstract class ShardDataTreeTransactionParent {
+ abstract void abortTransaction(AbstractShardDataTreeTransaction<?> transaction);
+ abstract ShardDataTreeCohort finishTransaction(ReadWriteShardDataTreeTransaction transaction);
+}
import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.Address;
+import akka.actor.Cancellable;
import akka.actor.OneForOneStrategy;
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.japi.Procedure;
import akka.persistence.RecoveryCompleted;
import akka.persistence.RecoveryFailure;
+import akka.serialization.Serialization;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.PersistentDataProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
+import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
-import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
+import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
+import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
*/
public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
- private final Logger LOG = LoggerFactory.getLogger(getClass());
+ private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
// Stores a mapping between a member name and the address of the member
// Member names look like "member-1", "member-2" etc and are as specified
// A data store could be of type config/operational
private final String type;
+ private final String shardManagerIdentifierString;
+
private final ClusterWrapper cluster;
private final Configuration configuration;
this.datastoreContext = datastoreContext;
this.dataPersistenceProvider = createDataPersistenceProvider(datastoreContext.isPersistent());
this.type = datastoreContext.getDataStoreType();
+ this.shardManagerIdentifierString = ShardManagerIdentifier.builder().type(type).build().toString();
this.shardDispatcherPath =
new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
this.waitTillReadyCountdownLatch = waitTillReadyCountdownLatch;
}
protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
- return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
+ return (persistent) ? new PersistentDataProvider(this) : new NonPersistentDataProvider();
}
public static Props props(
@Override
public void handleCommand(Object message) throws Exception {
- if (FindPrimary.SERIALIZABLE_CLASS.isInstance(message)) {
- findPrimary(FindPrimary.fromSerializable(message));
+ if (message instanceof FindPrimary) {
+ findPrimary((FindPrimary)message);
} else if(message instanceof FindLocalShard){
findLocalShard((FindLocalShard) message);
} else if (message instanceof UpdateSchemaContext) {
onRoleChangeNotification((RoleChangeNotification) message);
} else if(message instanceof FollowerInitialSyncUpStatus){
onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
- } else{
+ } else if(message instanceof ShardNotInitializedTimeout) {
+ onShardNotInitializedTimeout((ShardNotInitializedTimeout)message);
+ } else if(message instanceof ShardLeaderStateChanged) {
+ onLeaderStateChanged((ShardLeaderStateChanged)message);
+ } else {
unknownMessage(message);
}
}
+ private void checkReady(){
+ if (isReadyWithLeaderId()) {
+ LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
+ persistenceId(), type, waitTillReadyCountdownLatch.getCount());
+
+ waitTillReadyCountdownLatch.countDown();
+ }
+ }
+
+ private void onLeaderStateChanged(ShardLeaderStateChanged leaderStateChanged) {
+ LOG.info("{}: Received LeaderStateChanged message: {}", persistenceId(), leaderStateChanged);
+
+ ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
+ if(shardInformation != null) {
+ shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
+ shardInformation.setLeaderId(leaderStateChanged.getLeaderId());
+ checkReady();
+ } else {
+ LOG.debug("No shard found with member Id {}", leaderStateChanged.getMemberId());
+ }
+ }
+
+ private void onShardNotInitializedTimeout(ShardNotInitializedTimeout message) {
+ ShardInformation shardInfo = message.getShardInfo();
+
+ LOG.debug("{}: Received ShardNotInitializedTimeout message for shard {}", persistenceId(),
+ shardInfo.getShardName());
+
+ shardInfo.removeOnShardInitialized(message.getOnShardInitialized());
+
+ if(!shardInfo.isShardInitialized()) {
+ LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(), shardInfo.getShardName());
+ message.getSender().tell(createNotInitializedException(shardInfo.shardId), getSelf());
+ } else {
+ LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
+ message.getSender().tell(createNoShardLeaderException(shardInfo.shardId), getSelf());
+ }
+ }
+
private void onFollowerInitialSyncStatus(FollowerInitialSyncUpStatus status) {
- LOG.info("Received follower initial sync status for {} status sync done {}", status.getName(),
- status.isInitialSyncDone());
+ LOG.info("{} Received follower initial sync status for {} status sync done {}", persistenceId(),
+ status.getName(), status.isInitialSyncDone());
ShardInformation shardInformation = findShardInformation(status.getName());
}
private void onRoleChangeNotification(RoleChangeNotification roleChanged) {
- LOG.info("Received role changed for {} from {} to {}", roleChanged.getMemberId(),
+ LOG.info("{}: Received role changed for {} from {} to {}", persistenceId(), roleChanged.getMemberId(),
roleChanged.getOldRole(), roleChanged.getNewRole());
ShardInformation shardInformation = findShardInformation(roleChanged.getMemberId());
if(shardInformation != null) {
shardInformation.setRole(roleChanged.getNewRole());
-
- if (isReady()) {
- LOG.info("All Shards are ready - data store {} is ready, available count is {}", type,
- waitTillReadyCountdownLatch.getCount());
-
- waitTillReadyCountdownLatch.countDown();
- }
-
+ checkReady();
mBean.setSyncStatus(isInSync());
}
}
return null;
}
- private boolean isReady() {
+ private boolean isReadyWithLeaderId() {
boolean isReady = true;
for (ShardInformation info : localShards.values()) {
- if(RaftState.Candidate.name().equals(info.getRole()) || Strings.isNullOrEmpty(info.getRole())){
+ if(!info.isShardReadyWithLeaderId()){
isReady = false;
break;
}
if (shardId.getShardName() == null) {
return;
}
+
markShardAsInitialized(shardId.getShardName());
}
private void markShardAsInitialized(String shardName) {
- LOG.debug("Initializing shard [{}]", shardName);
+ LOG.debug("{}: Initializing shard [{}]", persistenceId(), shardName);
+
ShardInformation shardInformation = localShards.get(shardName);
if (shardInformation != null) {
shardInformation.setActorInitialized();
+
+ shardInformation.getActor().tell(new RegisterRoleChangeListener(), self());
}
}
return;
}
- sendResponse(shardInformation, message.isWaitUntilInitialized(), new Supplier<Object>() {
+ sendResponse(shardInformation, message.isWaitUntilInitialized(), false, new Supplier<Object>() {
@Override
public Object get() {
return new LocalShardFound(shardInformation.getActor());
});
}
- private void sendResponse(ShardInformation shardInformation, boolean waitUntilInitialized,
- final Supplier<Object> messageSupplier) {
- if (!shardInformation.isShardInitialized()) {
- if(waitUntilInitialized) {
+ private void sendResponse(ShardInformation shardInformation, boolean doWait,
+ boolean wantShardReady, final Supplier<Object> messageSupplier) {
+ if (!shardInformation.isShardInitialized() || (wantShardReady && !shardInformation.isShardReadyWithLeaderId())) {
+ if(doWait) {
final ActorRef sender = getSender();
final ActorRef self = self();
- shardInformation.addRunnableOnInitialized(new Runnable() {
+
+ Runnable replyRunnable = new Runnable() {
@Override
public void run() {
sender.tell(messageSupplier.get(), self);
}
- });
+ };
+
+ OnShardInitialized onShardInitialized = wantShardReady ? new OnShardReady(replyRunnable) :
+ new OnShardInitialized(replyRunnable);
+
+ shardInformation.addOnShardInitialized(onShardInitialized);
+
+ LOG.debug("{}: Scheduling timer to wait for shard {}", persistenceId(), shardInformation.getShardName());
+
+ Cancellable timeoutSchedule = getContext().system().scheduler().scheduleOnce(
+ datastoreContext.getShardInitializationTimeout().duration(), getSelf(),
+ new ShardNotInitializedTimeout(shardInformation, onShardInitialized, sender),
+ getContext().dispatcher(), getSelf());
+
+ onShardInitialized.setTimeoutSchedule(timeoutSchedule);
+
+ } else if (!shardInformation.isShardInitialized()) {
+ LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(),
+ shardInformation.getShardName());
+ getSender().tell(createNotInitializedException(shardInformation.shardId), getSelf());
} else {
- getSender().tell(new ActorNotInitialized(), getSelf());
+ LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
+ shardInformation.getShardName());
+ getSender().tell(createNoShardLeaderException(shardInformation.shardId), getSelf());
}
return;
getSender().tell(messageSupplier.get(), getSelf());
}
+ private NoShardLeaderException createNoShardLeaderException(ShardIdentifier shardId) {
+ return new NoShardLeaderException(String.format(
+ "Could not find a leader for shard %s. This typically happens when the system is coming up or " +
+ "recovering and a leader is being elected. Try again later.", shardId));
+ }
+
+ private NotInitializedException createNotInitializedException(ShardIdentifier shardId) {
+ return new NotInitializedException(String.format(
+ "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
+ }
+
private void memberRemoved(ClusterEvent.MemberRemoved message) {
+ String memberName = message.member().roles().head();
+
+ LOG.debug("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
+ message.member().address());
+
memberNameToAddress.remove(message.member().roles().head());
}
private void memberUp(ClusterEvent.MemberUp message) {
String memberName = message.member().roles().head();
+ LOG.debug("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
+ message.member().address());
+
memberNameToAddress.put(memberName, message.member().address());
for(ShardInformation info : localShards.values()){
String shardName = info.getShardName();
- info.updatePeerAddress(getShardIdentifier(memberName, shardName),
- getShardActorPath(shardName, memberName));
+ info.updatePeerAddress(getShardIdentifier(memberName, shardName).toString(),
+ getShardActorPath(shardName, memberName), getSelf());
}
+
+ checkReady();
}
private void onDatastoreContext(DatastoreContext context) {
LOG.debug("Sending new SchemaContext to Shards");
for (ShardInformation info : localShards.values()) {
if (info.getActor() == null) {
- info.setActor(getContext().actorOf(Shard.props(info.getShardId(),
- info.getPeerAddresses(), datastoreContext, schemaContext)
- .withDispatcher(shardDispatcherPath), info.getShardId().toString()));
+ info.setActor(newShardActor(schemaContext, info));
} else {
info.getActor().tell(message, getSelf());
}
- info.getActor().tell(new RegisterRoleChangeListener(), self());
}
}
}
+ @VisibleForTesting
+ protected ClusterWrapper getCluster() {
+ return cluster;
+ }
+
+ @VisibleForTesting
+ protected ActorRef newShardActor(final SchemaContext schemaContext, ShardInformation info) {
+ return getContext().actorOf(Shard.props(info.getShardId(),
+ info.getPeerAddresses(), datastoreContext, schemaContext)
+ .withDispatcher(shardDispatcherPath), info.getShardId().toString());
+ }
+
private void findPrimary(FindPrimary message) {
- String shardName = message.getShardName();
+ LOG.debug("{}: In findPrimary: {}", persistenceId(), message);
+
+ final String shardName = message.getShardName();
+ final boolean canReturnLocalShardState = !(message instanceof RemoteFindPrimary);
// First see if the there is a local replica for the shard
final ShardInformation info = localShards.get(shardName);
if (info != null) {
- sendResponse(info, message.isWaitUntilInitialized(), new Supplier<Object>() {
+ sendResponse(info, message.isWaitUntilReady(), true, new Supplier<Object>() {
@Override
public Object get() {
- return new PrimaryFound(info.getActorPath().toString()).toSerializable();
+ String primaryPath = info.getSerializedLeaderActor();
+ Object found = canReturnLocalShardState && info.isLeader() ?
+ new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
+ new RemotePrimaryShardFound(primaryPath);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
+ }
+
+ return found;
}
});
return;
}
- List<String> members = configuration.getMembersFromShardName(shardName);
+ for(Map.Entry<String, Address> entry: memberNameToAddress.entrySet()) {
+ if(!cluster.getCurrentMemberName().equals(entry.getKey())) {
+ String path = getShardManagerActorPathBuilder(entry.getValue()).toString();
- if(cluster.getCurrentMemberName() != null) {
- members.remove(cluster.getCurrentMemberName());
- }
+ LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}", persistenceId(),
+ shardName, path);
- /**
- * FIXME: Instead of sending remote shard actor path back to sender,
- * forward FindPrimary message to remote shard manager
- */
- // There is no way for us to figure out the primary (for now) so assume
- // that one of the remote nodes is a primary
- for(String memberName : members) {
- Address address = memberNameToAddress.get(memberName);
- if(address != null){
- String path =
- getShardActorPath(shardName, memberName);
- getSender().tell(new PrimaryFound(path).toSerializable(), getSelf());
+ getContext().actorSelection(path).forward(new RemoteFindPrimary(shardName,
+ message.isWaitUntilReady()), getContext());
return;
}
}
- getSender().tell(new PrimaryNotFound(shardName).toSerializable(), getSelf());
+
+ LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
+
+ getSender().tell(new PrimaryNotFoundException(
+ String.format("No primary shard found for %s.", shardName)), getSelf());
+ }
+
+ private StringBuilder getShardManagerActorPathBuilder(Address address) {
+ StringBuilder builder = new StringBuilder();
+ builder.append(address.toString()).append("/user/").append(shardManagerIdentifierString);
+ return builder;
}
private String getShardActorPath(String shardName, String memberName) {
Address address = memberNameToAddress.get(memberName);
if(address != null) {
- StringBuilder builder = new StringBuilder();
- builder.append(address.toString())
- .append("/user/")
- .append(ShardManagerIdentifier.builder().type(type).build().toString())
- .append("/")
+ StringBuilder builder = getShardManagerActorPathBuilder(address);
+ builder.append("/")
.append(getShardIdentifier(memberName, shardName));
return builder.toString();
}
List<String> localShardActorNames = new ArrayList<>();
for(String shardName : memberShardNames){
ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
- Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
+ Map<String, String> peerAddresses = getPeerAddresses(shardName);
localShardActorNames.add(shardId.toString());
localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses));
}
* @param shardName
* @return
*/
- private Map<ShardIdentifier, String> getPeerAddresses(String shardName){
+ private Map<String, String> getPeerAddresses(String shardName){
- Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+ Map<String, String> peerAddresses = new HashMap<>();
- List<String> members =
- this.configuration.getMembersFromShardName(shardName);
+ List<String> members = this.configuration.getMembersFromShardName(shardName);
String currentMemberName = this.cluster.getCurrentMemberName();
for(String memberName : members){
if(!currentMemberName.equals(memberName)){
- ShardIdentifier shardId = getShardIdentifier(memberName,
- shardName);
- String path =
- getShardActorPath(shardName, currentMemberName);
- peerAddresses.put(shardId, path);
+ ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
+ String path = getShardActorPath(shardName, currentMemberName);
+ peerAddresses.put(shardId.toString(), path);
}
}
return peerAddresses;
return mBean;
}
- private class ShardInformation {
+ @VisibleForTesting
+ protected static class ShardInformation {
private final ShardIdentifier shardId;
private final String shardName;
private ActorRef actor;
private ActorPath actorPath;
- private final Map<ShardIdentifier, String> peerAddresses;
+ private final Map<String, String> peerAddresses;
+ private Optional<DataTree> localShardDataTree;
// flag that determines if the actor is ready for business
private boolean actorInitialized = false;
private boolean followerSyncStatus = false;
- private final List<Runnable> runnablesOnInitialized = Lists.newArrayList();
+ private final Set<OnShardInitialized> onShardInitializedSet = Sets.newHashSet();
private String role ;
+ private String leaderId;
private ShardInformation(String shardName, ShardIdentifier shardId,
- Map<ShardIdentifier, String> peerAddresses) {
+ Map<String, String> peerAddresses) {
this.shardName = shardName;
this.shardId = shardId;
this.peerAddresses = peerAddresses;
return shardId;
}
- Map<ShardIdentifier, String> getPeerAddresses() {
+ void setLocalDataTree(Optional<DataTree> localShardDataTree) {
+ this.localShardDataTree = localShardDataTree;
+ }
+
+ Optional<DataTree> getLocalShardDataTree() {
+ return localShardDataTree;
+ }
+
+ Map<String, String> getPeerAddresses() {
return peerAddresses;
}
- void updatePeerAddress(ShardIdentifier peerId, String peerAddress){
+ void updatePeerAddress(String peerId, String peerAddress, ActorRef sender){
LOG.info("updatePeerAddress for peer {} with address {}", peerId,
peerAddress);
if(peerAddresses.containsKey(peerId)){
peerId, peerAddress, actor.path());
}
- actor.tell(new PeerAddressResolved(peerId, peerAddress), getSelf());
+ actor.tell(new PeerAddressResolved(peerId.toString(), peerAddress), sender);
}
+
+ notifyOnShardInitializedCallbacks();
}
}
+ boolean isShardReady() {
+ return !RaftState.Candidate.name().equals(role) && !Strings.isNullOrEmpty(role);
+ }
+
+ boolean isShardReadyWithLeaderId() {
+ return isShardReady() && (isLeader() || peerAddresses.get(leaderId) != null);
+ }
+
boolean isShardInitialized() {
return getActor() != null && actorInitialized;
}
+ boolean isLeader() {
+ return Objects.equal(leaderId, shardId.toString());
+ }
+
+ String getSerializedLeaderActor() {
+ if(isLeader()) {
+ return Serialization.serializedActorPath(getActor());
+ } else {
+ return peerAddresses.get(leaderId);
+ }
+ }
+
void setActorInitialized() {
+ LOG.debug("Shard {} is initialized", shardId);
+
this.actorInitialized = true;
- for(Runnable runnable: runnablesOnInitialized) {
- runnable.run();
+ notifyOnShardInitializedCallbacks();
+ }
+
+ private void notifyOnShardInitializedCallbacks() {
+ if(onShardInitializedSet.isEmpty()) {
+ return;
+ }
+
+ boolean ready = isShardReadyWithLeaderId();
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId,
+ ready ? "ready" : "initialized", onShardInitializedSet.size());
}
- runnablesOnInitialized.clear();
+ Iterator<OnShardInitialized> iter = onShardInitializedSet.iterator();
+ while(iter.hasNext()) {
+ OnShardInitialized onShardInitialized = iter.next();
+ if(!(onShardInitialized instanceof OnShardReady) || ready) {
+ iter.remove();
+ onShardInitialized.getTimeoutSchedule().cancel();
+ onShardInitialized.getReplyRunnable().run();
+ }
+ }
}
- void addRunnableOnInitialized(Runnable runnable) {
- runnablesOnInitialized.add(runnable);
+ void addOnShardInitialized(OnShardInitialized onShardInitialized) {
+ onShardInitializedSet.add(onShardInitialized);
}
- public void setRole(String newRole) {
- this.role = newRole;
+ void removeOnShardInitialized(OnShardInitialized onShardInitialized) {
+ onShardInitializedSet.remove(onShardInitialized);
}
- public String getRole(){
- return this.role;
+ void setRole(String newRole) {
+ this.role = newRole;
+
+ notifyOnShardInitializedCallbacks();
}
- public void setFollowerSyncStatus(boolean syncStatus){
+ void setFollowerSyncStatus(boolean syncStatus){
this.followerSyncStatus = syncStatus;
}
- public boolean isInSync(){
+ boolean isInSync(){
if(RaftState.Follower.name().equals(this.role)){
return followerSyncStatus;
} else if(RaftState.Leader.name().equals(this.role)){
return false;
}
+ void setLeaderId(String leaderId) {
+ this.leaderId = leaderId;
+
+ notifyOnShardInitializedCallbacks();
+ }
}
private static class ShardManagerCreator implements Creator<ShardManager> {
}
}
+ private static class OnShardInitialized {
+ private final Runnable replyRunnable;
+ private Cancellable timeoutSchedule;
+
+ OnShardInitialized(Runnable replyRunnable) {
+ this.replyRunnable = replyRunnable;
+ }
+
+ Runnable getReplyRunnable() {
+ return replyRunnable;
+ }
+
+ Cancellable getTimeoutSchedule() {
+ return timeoutSchedule;
+ }
+
+ void setTimeoutSchedule(Cancellable timeoutSchedule) {
+ this.timeoutSchedule = timeoutSchedule;
+ }
+ }
+
+ private static class OnShardReady extends OnShardInitialized {
+ OnShardReady(Runnable replyRunnable) {
+ super(replyRunnable);
+ }
+ }
+
+ private static class ShardNotInitializedTimeout {
+ private final ActorRef sender;
+ private final ShardInformation shardInfo;
+ private final OnShardInitialized onShardInitialized;
+
+ ShardNotInitializedTimeout(ShardInformation shardInfo, OnShardInitialized onShardInitialized, ActorRef sender) {
+ this.sender = sender;
+ this.shardInfo = shardInfo;
+ this.onShardInitialized = onShardInitialized;
+ }
+
+ ActorRef getSender() {
+ return sender;
+ }
+
+ ShardInformation getShardInfo() {
+ return shardInfo;
+ }
+
+ OnShardInitialized getOnShardInitialized() {
+ return onShardInitialized;
+ }
+ }
+
static class SchemaContextModules implements Serializable {
private static final long serialVersionUID = -8884620101025936590L;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
public class ShardReadTransaction extends ShardTransaction {
private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
- private final DOMStoreReadTransaction transaction;
+ private final AbstractShardDataTreeTransaction<?> transaction;
- public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ public ShardReadTransaction(AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
final ActorRef sender = getSender();
final ActorRef self = getSelf();
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> future = transaction.read(DATASTORE_ROOT);
+ final Optional<NormalizedNode<?, ?>> result = transaction.getSnapshot().readNode(DATASTORE_ROOT);
- Futures.addCallback(future, new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
- @Override
- public void onSuccess(Optional<NormalizedNode<?, ?>> result) {
- byte[] serialized = SerializationUtils.serializeNormalizedNode(result.get());
- sender.tell(new CaptureSnapshotReply(serialized), self);
+ byte[] serialized = SerializationUtils.serializeNormalizedNode(result.get());
+ sender.tell(new CaptureSnapshotReply(serialized), self);
- self.tell(PoisonPill.getInstance(), self);
- }
-
- @Override
- public void onFailure(Throwable t) {
- sender.tell(new akka.actor.Status.Failure(t), self);
-
- self.tell(PoisonPill.getInstance(), self);
- }
- });
+ self.tell(PoisonPill.getInstance(), self);
}
@Override
- protected DOMStoreTransaction getDOMStoreTransaction() {
+ protected AbstractShardDataTreeTransaction<?> getDOMStoreTransaction() {
return transaction;
}
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
* Date: 8/6/14
*/
public class ShardReadWriteTransaction extends ShardWriteTransaction {
- private final DOMStoreReadWriteTransaction transaction;
-
- public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(transaction, shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
- this.transaction = transaction;
+ public ShardReadWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(transaction, shardActor, shardStats, transactionID, clientTxVersion);
}
@Override
public void handleReceive(Object message) throws Exception {
if (message instanceof ReadData) {
- readData(transaction, (ReadData) message, !SERIALIZED_REPLY);
+ readData((ReadData) message, !SERIALIZED_REPLY);
} else if (message instanceof DataExists) {
- dataExists(transaction, (DataExists) message, !SERIALIZED_REPLY);
+ dataExists((DataExists) message, !SERIALIZED_REPLY);
} else if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction, ReadData.fromSerializable(message), SERIALIZED_REPLY);
+ readData(ReadData.fromSerializable(message), SERIALIZED_REPLY);
} else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
- dataExists(transaction, DataExists.fromSerializable(message), SERIALIZED_REPLY);
-
+ dataExists(DataExists.fromSerializable(message), SERIALIZED_REPLY);
} else {
super.handleReceive(message);
}
*/
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
import org.slf4j.Logger;
/**
* committed to the data store in the order the corresponding snapshot or log batch are received
* to preserve data store integrity.
*
- * @author Thomas Panetelis
+ * @author Thomas Pantelis
*/
-class ShardRecoveryCoordinator {
-
- private static final int TIME_OUT = 10;
-
- private final List<DOMStoreWriteTransaction> resultingTxList = Lists.newArrayList();
- private final SchemaContext schemaContext;
+class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
+ private static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
+ private final DataTree store;
private final String shardName;
- private final ExecutorService executor;
private final Logger log;
- private final String name;
+ private DataTreeModification transaction;
+ private int size;
- ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, Logger log,
- String name) {
- this.schemaContext = schemaContext;
+ ShardRecoveryCoordinator(ShardDataTree store, String shardName, Logger log) {
+ this.store = store.getDataTree();
this.shardName = shardName;
this.log = log;
- this.name = name;
-
- executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("ShardRecovery-" + shardName + "-%d").build());
- }
-
- /**
- * Submits a batch of journal log entries.
- *
- * @param logEntries the serialized journal log entries
- * @param resultingTx the write Tx to which to apply the entries
- */
- void submit(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
- LogRecoveryTask task = new LogRecoveryTask(logEntries, resultingTx);
- resultingTxList.add(resultingTx);
- executor.execute(task);
}
- /**
- * Submits a snapshot.
- *
- * @param snapshotBytes the serialized snapshot
- * @param resultingTx the write Tx to which to apply the entries
- */
- void submit(byte[] snapshotBytes, DOMStoreWriteTransaction resultingTx) {
- SnapshotRecoveryTask task = new SnapshotRecoveryTask(snapshotBytes, resultingTx);
- resultingTxList.add(resultingTx);
- executor.execute(task);
+ @Override
+ public void startLogRecoveryBatch(int maxBatchSize) {
+ log.debug("{}: starting log recovery batch with max size {}", shardName, maxBatchSize);
+ transaction = store.takeSnapshot().newModification();
+ size = 0;
}
- Collection<DOMStoreWriteTransaction> getTransactions() {
- // Shutdown the executor and wait for task completion.
- executor.shutdown();
-
+ @Override
+ public void appendRecoveredLogEntry(Payload payload) {
try {
- if(executor.awaitTermination(TIME_OUT, TimeUnit.MINUTES)) {
- return resultingTxList;
+ if (payload instanceof DataTreeCandidatePayload) {
+ DataTreeCandidates.applyToModification(transaction, ((DataTreeCandidatePayload)payload).getCandidate());
+ size++;
+ } else if (payload instanceof ModificationPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((ModificationPayload) payload).getModification()).apply(transaction);
+ size++;
+ } else if (payload instanceof CompositeModificationPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((CompositeModificationPayload) payload).getModification()).apply(transaction);
+ size++;
+ } else if (payload instanceof CompositeModificationByteStringPayload) {
+ MutableCompositeModification.fromSerializable(
+ ((CompositeModificationByteStringPayload) payload).getModification()).apply(transaction);
+ size++;
} else {
- log.error("{}: Recovery for shard {} timed out after {} minutes", name, shardName, TIME_OUT);
+ log.error("{}: Unknown payload {} received during recovery", shardName, payload);
}
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
+ } catch (IOException | ClassNotFoundException e) {
+ log.error("{}: Error extracting ModificationPayload", shardName, e);
}
-
- return Collections.emptyList();
}
- private static abstract class ShardRecoveryTask implements Runnable {
-
- final DOMStoreWriteTransaction resultingTx;
-
- ShardRecoveryTask(DOMStoreWriteTransaction resultingTx) {
- this.resultingTx = resultingTx;
- }
+ private void commitTransaction(DataTreeModification tx) throws DataValidationFailedException {
+ tx.ready();
+ store.validate(tx);
+ store.commit(store.prepare(tx));
}
- private class LogRecoveryTask extends ShardRecoveryTask {
-
- private final List<Object> logEntries;
-
- LogRecoveryTask(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
- super(resultingTx);
- this.logEntries = logEntries;
- }
-
- @Override
- public void run() {
- for(int i = 0; i < logEntries.size(); i++) {
- MutableCompositeModification.fromSerializable(
- logEntries.get(i)).apply(resultingTx);
- // Null out to GC quicker.
- logEntries.set(i, null);
- }
+ /**
+ * Applies the current batched log entries to the data store.
+ */
+ @Override
+ public void applyCurrentLogRecoveryBatch() {
+ log.debug("{}: Applying current log recovery batch with size {}", shardName, size);
+ try {
+ commitTransaction(transaction);
+ } catch (DataValidationFailedException e) {
+ log.error("{}: Failed to apply recovery batch", shardName, e);
}
+ transaction = null;
}
- private class SnapshotRecoveryTask extends ShardRecoveryTask {
-
- private final byte[] snapshotBytes;
-
- SnapshotRecoveryTask(byte[] snapshotBytes, DOMStoreWriteTransaction resultingTx) {
- super(resultingTx);
- this.snapshotBytes = snapshotBytes;
- }
-
- @Override
- public void run() {
- NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
-
- // delete everything first
- resultingTx.delete(YangInstanceIdentifier.builder().build());
+ /**
+ * Applies a recovered snapshot to the data store.
+ *
+ * @param snapshotBytes the serialized snapshot
+ */
+ @Override
+ public void applyRecoverySnapshot(final byte[] snapshotBytes) {
+ log.debug("{}: Applying recovered snapshot", shardName);
- // Add everything from the remote node back
- resultingTx.write(YangInstanceIdentifier.builder().build(), node);
+ final NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+ final DataTreeModification tx = store.takeSnapshot().newModification();
+ tx.write(ROOT, node);
+ try {
+ commitTransaction(tx);
+ } catch (DataValidationFailedException e) {
+ log.error("{}: Failed to apply recovery snapshot", shardName, e);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import akka.actor.ActorRef;
+import java.util.concurrent.ExecutionException;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
+import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+
+/**
+ * Participates in raft snapshotting on behalf of a Shard actor.
+ *
+ * @author Thomas Pantelis
+ */
+class ShardSnapshotCohort implements RaftActorSnapshotCohort {
+
+ private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
+
+ private int createSnapshotTransactionCounter;
+ private final ShardTransactionActorFactory transactionActorFactory;
+ private final ShardDataTree store;
+ private final Logger log;
+ private final String logId;
+
+ ShardSnapshotCohort(ShardTransactionActorFactory transactionActorFactory, ShardDataTree store,
+ Logger log, String logId) {
+ this.transactionActorFactory = transactionActorFactory;
+ this.store = Preconditions.checkNotNull(store);
+ this.log = log;
+ this.logId = logId;
+ }
+
+ @Override
+ public void createSnapshot(ActorRef actorRef) {
+ // Create a transaction actor. We are really going to treat the transaction as a worker
+ // so that this actor does not get block building the snapshot. THe transaction actor will
+ // after processing the CreateSnapshot message.
+
+ ShardTransactionIdentifier transactionID = new ShardTransactionIdentifier(
+ "createSnapshot" + ++createSnapshotTransactionCounter);
+
+ ActorRef createSnapshotTransaction = transactionActorFactory.newShardTransaction(
+ TransactionProxy.TransactionType.READ_ONLY, transactionID, "", DataStoreVersions.CURRENT_VERSION);
+
+ createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, actorRef);
+ }
+
+ @Override
+ public void applySnapshot(byte[] snapshotBytes) {
+ // Since this will be done only on Recovery or when this actor is a Follower
+ // we can safely commit everything in here. We not need to worry about event notifications
+ // as they would have already been disabled on the follower
+
+ log.info("{}: Applying snapshot", logId);
+
+ try {
+ ReadWriteShardDataTreeTransaction transaction = store.newReadWriteTransaction("snapshot-" + logId, null);
+
+ NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
+
+ // delete everything first
+ transaction.getSnapshot().delete(DATASTORE_ROOT);
+
+ // Add everything from the remote node back
+ transaction.getSnapshot().write(DATASTORE_ROOT, node);
+ syncCommitTransaction(transaction);
+ } catch (InterruptedException | ExecutionException e) {
+ log.error("{}: An exception occurred when applying snapshot", logId, e);
+ } finally {
+ log.info("{}: Done applying snapshot", logId);
+ }
+
+ }
+
+ void syncCommitTransaction(final ReadWriteShardDataTreeTransaction transaction)
+ throws ExecutionException, InterruptedException {
+ ShardDataTreeCohort commitCohort = store.finishTransaction(transaction);
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
+ }
+}
import akka.actor.ReceiveTimeout;
import akka.japi.Creator;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* The ShardTransaction Actor represents a remote transaction
protected static final boolean SERIALIZED_REPLY = true;
private final ActorRef shardActor;
- private final SchemaContext schemaContext;
private final ShardStats shardStats;
private final String transactionID;
private final short clientTxVersion;
- protected ShardTransaction(ActorRef shardActor, SchemaContext schemaContext,
- ShardStats shardStats, String transactionID, short clientTxVersion) {
+ protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, String transactionID,
+ short clientTxVersion) {
super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
this.shardActor = shardActor;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
this.transactionID = transactionID;
this.clientTxVersion = clientTxVersion;
}
- public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats,
- String transactionID, short txnClientVersion) {
- return Props.create(new ShardTransactionCreator(transaction, shardActor, schemaContext,
+ public static Props props(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
+ DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
+ return Props.create(new ShardTransactionCreator(type, transaction, shardActor,
datastoreContext, shardStats, transactionID, txnClientVersion));
}
- protected abstract DOMStoreTransaction getDOMStoreTransaction();
+ protected abstract AbstractShardDataTreeTransaction<?> getDOMStoreTransaction();
protected ActorRef getShardActor() {
return shardActor;
return transactionID;
}
- protected SchemaContext getSchemaContext() {
- return schemaContext;
- }
-
protected short getClientTxVersion() {
return clientTxVersion;
}
}
private void closeTransaction(boolean sendReply) {
- getDOMStoreTransaction().close();
+ getDOMStoreTransaction().abort();
if(sendReply && returnCloseTransactionReply()) {
getSender().tell(CloseTransactionReply.INSTANCE.toSerializable(), getSelf());
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
- protected void readData(DOMStoreReadTransaction transaction, ReadData message,
- final boolean returnSerialized) {
-
- final YangInstanceIdentifier path = message.getPath();
- try {
- final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future = transaction.read(path);
- Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
- ReadDataReply readDataReply = new ReadDataReply(optional.orNull(), clientTxVersion);
+ private boolean checkClosed(AbstractShardDataTreeTransaction<?> transaction) {
+ final boolean ret = transaction.isClosed();
+ if (ret) {
+ shardStats.incrementFailedReadTransactionsCount();
+ getSender().tell(new akka.actor.Status.Failure(new ReadFailedException("Transaction is closed")), getSelf());
+ }
+ return ret;
+ }
- sender().tell((returnSerialized ? readDataReply.toSerializable(): readDataReply), self());
+ protected void readData(AbstractShardDataTreeTransaction<?> transaction, ReadData message,
+ final boolean returnSerialized) {
- } catch (Exception e) {
- LOG.debug(String.format("Unexpected error reading path %s", path), e);
- shardStats.incrementFailedReadTransactionsCount();
- sender().tell(new akka.actor.Status.Failure(e), self());
+ if (checkClosed(transaction)) {
+ return;
}
+
+ final YangInstanceIdentifier path = message.getPath();
+ Optional<NormalizedNode<?, ?>> optional = transaction.getSnapshot().readNode(path);
+ ReadDataReply readDataReply = new ReadDataReply(optional.orNull(), clientTxVersion);
+ sender().tell((returnSerialized ? readDataReply.toSerializable(): readDataReply), self());
}
- protected void dataExists(DOMStoreReadTransaction transaction, DataExists message,
+ protected void dataExists(AbstractShardDataTreeTransaction<?> transaction, DataExists message,
final boolean returnSerialized) {
- final YangInstanceIdentifier path = message.getPath();
- try {
- Boolean exists = transaction.exists(path).checkedGet();
- DataExistsReply dataExistsReply = new DataExistsReply(exists);
- getSender().tell(returnSerialized ? dataExistsReply.toSerializable() :
- dataExistsReply, getSelf());
- } catch (ReadFailedException e) {
- getSender().tell(new akka.actor.Status.Failure(e),getSelf());
+ if (checkClosed(transaction)) {
+ return;
}
+ final YangInstanceIdentifier path = message.getPath();
+ boolean exists = transaction.getSnapshot().readNode(path).isPresent();
+ DataExistsReply dataExistsReply = DataExistsReply.create(exists);
+ getSender().tell(returnSerialized ? dataExistsReply.toSerializable() :
+ dataExistsReply, getSelf());
}
private static class ShardTransactionCreator implements Creator<ShardTransaction> {
private static final long serialVersionUID = 1L;
- final DOMStoreTransaction transaction;
+ final AbstractShardDataTreeTransaction<?> transaction;
final ActorRef shardActor;
- final SchemaContext schemaContext;
final DatastoreContext datastoreContext;
final ShardStats shardStats;
final String transactionID;
final short txnClientVersion;
+ final TransactionType type;
- ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, DatastoreContext datastoreContext,
- ShardStats shardStats, String transactionID, short txnClientVersion) {
- this.transaction = transaction;
+ ShardTransactionCreator(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
+ DatastoreContext datastoreContext, ShardStats shardStats, String transactionID, short txnClientVersion) {
+ this.transaction = Preconditions.checkNotNull(transaction);
this.shardActor = shardActor;
this.shardStats = shardStats;
- this.schemaContext = schemaContext;
this.datastoreContext = datastoreContext;
this.transactionID = transactionID;
this.txnClientVersion = txnClientVersion;
+ this.type = type;
}
@Override
public ShardTransaction create() throws Exception {
- ShardTransaction tx;
- if(transaction instanceof DOMStoreReadWriteTransaction) {
- tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
- } else if(transaction instanceof DOMStoreReadTransaction) {
- tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- schemaContext, shardStats, transactionID, txnClientVersion);
- } else {
- tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
+ final ShardTransaction tx;
+ switch (type) {
+ case READ_ONLY:
+ tx = new ShardReadTransaction(transaction, shardActor,
+ shardStats, transactionID, txnClientVersion);
+ break;
+ case READ_WRITE:
+ tx = new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction)transaction,
+ shardActor, shardStats, transactionID, txnClientVersion);
+ break;
+ case WRITE_ONLY:
+ tx = new ShardWriteTransaction((ReadWriteShardDataTreeTransaction)transaction,
+ shardActor, shardStats, transactionID, txnClientVersion);
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled transaction type " + type);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
package org.opendaylight.controller.cluster.datastore;
+import com.google.common.base.Preconditions;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
*/
public class ShardTransactionChain extends AbstractUntypedActor {
- private final DOMStoreTransactionChain chain;
+ private final ShardDataTreeTransactionChain chain;
private final DatastoreContext datastoreContext;
- private final SchemaContext schemaContext;
private final ShardStats shardStats;
- public ShardTransactionChain(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, ShardStats shardStats) {
- this.chain = chain;
+ public ShardTransactionChain(ShardDataTreeTransactionChain chain, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
+ this.chain = Preconditions.checkNotNull(chain);
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
}
private ActorRef createTypedTransactionActor(CreateTransaction createTransaction) {
String transactionName = "shard-" + createTransaction.getTransactionId();
- if(createTransaction.getTransactionType() ==
- TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
- return getContext().actorOf(
- ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
- createTransaction.getVersion()), transactionName);
- } else if (createTransaction.getTransactionType() ==
- TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
- return getContext().actorOf(
- ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
- createTransaction.getVersion()), transactionName);
- } else if (createTransaction.getTransactionType() ==
- TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
- return getContext().actorOf(
- ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId(),
- createTransaction.getVersion()), transactionName);
- } else {
- throw new IllegalArgumentException (
- "CreateTransaction message has unidentified transaction type=" +
- createTransaction.getTransactionType());
+
+ final TransactionType type = TransactionType.fromInt(createTransaction.getTransactionType());
+ final AbstractShardDataTreeTransaction<?> transaction;
+ switch (type) {
+ case READ_ONLY:
+ transaction = chain.newReadOnlyTransaction(transactionName);
+ break;
+ case READ_WRITE:
+ case WRITE_ONLY:
+ transaction = chain.newReadWriteTransaction(transactionName);
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled transaction type " + type);
}
+
+ return getContext().actorOf(
+ ShardTransaction.props(type, transaction, getShardActor(),
+ datastoreContext, shardStats, createTransaction.getTransactionId(),
+ createTransaction.getVersion()), transactionName);
}
private void createTransaction(CreateTransaction createTransaction) {
createTransaction.getTransactionId()).toSerializable(), getSelf());
}
- public static Props props(DOMStoreTransactionChain chain, SchemaContext schemaContext,
+ public static Props props(ShardDataTreeTransactionChain chain, SchemaContext schemaContext,
DatastoreContext datastoreContext, ShardStats shardStats) {
- return Props.create(new ShardTransactionChainCreator(chain, schemaContext,
- datastoreContext, shardStats));
+ return Props.create(new ShardTransactionChainCreator(chain, datastoreContext, shardStats));
}
private static class ShardTransactionChainCreator implements Creator<ShardTransactionChain> {
private static final long serialVersionUID = 1L;
- final DOMStoreTransactionChain chain;
+ final ShardDataTreeTransactionChain chain;
final DatastoreContext datastoreContext;
- final SchemaContext schemaContext;
final ShardStats shardStats;
-
- ShardTransactionChainCreator(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, ShardStats shardStats) {
+ ShardTransactionChainCreator(ShardDataTreeTransactionChain chain, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
this.shardStats = shardStats;
}
@Override
public ShardTransactionChain create() throws Exception {
- return new ShardTransactionChain(chain, schemaContext, datastoreContext, shardStats);
+ return new ShardTransactionChain(chain, datastoreContext, shardStats);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import akka.actor.ActorRef;
+import akka.actor.UntypedActorContext;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+
+/**
+ * A factory for creating ShardTransaction actors.
+ *
+ * @author Thomas Pantelis
+ */
+class ShardTransactionActorFactory {
+
+ private final ShardDataTree dataTree;
+ private final DatastoreContext datastoreContext;
+ private final String txnDispatcherPath;
+ private final ShardStats shardMBean;
+ private final UntypedActorContext actorContext;
+ private final ActorRef shardActor;
+
+ ShardTransactionActorFactory(ShardDataTree dataTree, DatastoreContext datastoreContext,
+ String txnDispatcherPath, ActorRef shardActor, UntypedActorContext actorContext, ShardStats shardMBean) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+ this.datastoreContext = datastoreContext;
+ this.txnDispatcherPath = txnDispatcherPath;
+ this.shardMBean = shardMBean;
+ this.actorContext = actorContext;
+ this.shardActor = shardActor;
+ }
+
+ ActorRef newShardTransaction(TransactionProxy.TransactionType type, ShardTransactionIdentifier transactionID,
+ String transactionChainID, short clientVersion) {
+ final AbstractShardDataTreeTransaction<?> transaction;
+ switch (type) {
+ case READ_ONLY:
+ transaction = dataTree.newReadOnlyTransaction(transactionID.toString(), transactionChainID);
+ break;
+ case READ_WRITE:
+ case WRITE_ONLY:
+ transaction = dataTree.newReadWriteTransaction(transactionID.toString(), transactionChainID);
+ break;
+ default:
+ throw new IllegalArgumentException("Unsupported transaction type " + type);
+ }
+
+ return actorContext.actorOf(ShardTransaction.props(type, transaction, shardActor, datastoreContext, shardMBean,
+ transactionID.getRemoteTransactionId(), clientVersion).withDispatcher(txnDispatcherPath),
+ transactionID.toString());
+ }
+}
/*
- *
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
+import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
public class ShardWriteTransaction extends ShardTransaction {
private final MutableCompositeModification compositeModification = new MutableCompositeModification();
- private final DOMStoreWriteTransaction transaction;
+ private int totalBatchedModificationsReceived;
+ private Exception lastBatchedModificationsException;
+ private final ReadWriteShardDataTreeTransaction transaction;
- public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID,
- short clientTxVersion) {
- super(shardActor, schemaContext, shardStats, transactionID, clientTxVersion);
+ public ShardWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
+ ShardStats shardStats, String transactionID, short clientTxVersion) {
+ super(shardActor, shardStats, transactionID, clientTxVersion);
this.transaction = transaction;
}
@Override
- protected DOMStoreTransaction getDOMStoreTransaction() {
+ protected ReadWriteShardDataTreeTransaction getDOMStoreTransaction() {
return transaction;
}
if (message instanceof BatchedModifications) {
batchedModifications((BatchedModifications)message);
} else if (message instanceof ReadyTransaction) {
- readyTransaction(transaction, !SERIALIZED_REPLY);
+ readyTransaction(!SERIALIZED_REPLY, false);
} else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, SERIALIZED_REPLY);
+ readyTransaction(SERIALIZED_REPLY, false);
} else if(WriteData.isSerializedType(message)) {
- writeData(transaction, WriteData.fromSerializable(message), SERIALIZED_REPLY);
+ writeData(WriteData.fromSerializable(message), SERIALIZED_REPLY);
} else if(MergeData.isSerializedType(message)) {
- mergeData(transaction, MergeData.fromSerializable(message), SERIALIZED_REPLY);
+ mergeData(MergeData.fromSerializable(message), SERIALIZED_REPLY);
} else if(DeleteData.isSerializedType(message)) {
- deleteData(transaction, DeleteData.fromSerializable(message), SERIALIZED_REPLY);
+ deleteData(DeleteData.fromSerializable(message), SERIALIZED_REPLY);
} else if (message instanceof GetCompositedModification) {
// This is here for testing only
}
private void batchedModifications(BatchedModifications batched) {
+ if (checkClosed()) {
+ if (batched.isReady()) {
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
+ return;
+ }
+
try {
for(Modification modification: batched.getModifications()) {
compositeModification.addModification(modification);
- modification.apply(transaction);
+ modification.apply(transaction.getSnapshot());
}
- getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ totalBatchedModificationsReceived++;
+ if(batched.isReady()) {
+ if(lastBatchedModificationsException != null) {
+ throw lastBatchedModificationsException;
+ }
+
+ if(totalBatchedModificationsReceived != batched.getTotalMessagesSent()) {
+ throw new IllegalStateException(String.format(
+ "The total number of batched messages received %d does not match the number sent %d",
+ totalBatchedModificationsReceived, batched.getTotalMessagesSent()));
+ }
+
+ readyTransaction(false, batched.isDoCommitOnReady());
+ } else {
+ getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ }
} catch (Exception e) {
+ lastBatchedModificationsException = e;
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+
+ if(batched.isReady()) {
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
}
}
- private void writeData(DOMStoreWriteTransaction transaction, WriteData message,
- boolean returnSerialized) {
+ protected final void dataExists(DataExists message, final boolean returnSerialized) {
+ super.dataExists(transaction, message, returnSerialized);
+ }
+
+ protected final void readData(ReadData message, final boolean returnSerialized) {
+ super.readData(transaction, message, returnSerialized);
+ }
+
+ private boolean checkClosed() {
+ if (transaction.isClosed()) {
+ getSender().tell(new akka.actor.Status.Failure(new IllegalStateException("Transaction is closed, no modifications allowed")), getSelf());
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private void writeData(WriteData message, boolean returnSerialized) {
LOG.debug("writeData at path : {}", message.getPath());
+ if (checkClosed()) {
+ return;
+ }
compositeModification.addModification(
new WriteModification(message.getPath(), message.getData()));
try {
- transaction.write(message.getPath(), message.getData());
+ transaction.getSnapshot().write(message.getPath(), message.getData());
WriteDataReply writeDataReply = WriteDataReply.INSTANCE;
getSender().tell(returnSerialized ? writeDataReply.toSerializable(message.getVersion()) :
writeDataReply, getSelf());
}
}
- private void mergeData(DOMStoreWriteTransaction transaction, MergeData message,
- boolean returnSerialized) {
+ private void mergeData(MergeData message, boolean returnSerialized) {
LOG.debug("mergeData at path : {}", message.getPath());
+ if (checkClosed()) {
+ return;
+ }
compositeModification.addModification(
new MergeModification(message.getPath(), message.getData()));
try {
- transaction.merge(message.getPath(), message.getData());
+ transaction.getSnapshot().merge(message.getPath(), message.getData());
MergeDataReply mergeDataReply = MergeDataReply.INSTANCE;
getSender().tell(returnSerialized ? mergeDataReply.toSerializable(message.getVersion()) :
mergeDataReply, getSelf());
}
}
- private void deleteData(DOMStoreWriteTransaction transaction, DeleteData message,
- boolean returnSerialized) {
+ private void deleteData(DeleteData message, boolean returnSerialized) {
LOG.debug("deleteData at path : {}", message.getPath());
+ if (checkClosed()) {
+ return;
+ }
compositeModification.addModification(new DeleteModification(message.getPath()));
try {
- transaction.delete(message.getPath());
+ transaction.getSnapshot().delete(message.getPath());
DeleteDataReply deleteDataReply = DeleteDataReply.INSTANCE;
getSender().tell(returnSerialized ? deleteDataReply.toSerializable(message.getVersion()) :
deleteDataReply, getSelf());
- }catch(Exception e){
+ } catch(Exception e) {
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
}
- private void readyTransaction(DOMStoreWriteTransaction transaction, boolean returnSerialized) {
+ private void readyTransaction(boolean returnSerialized, boolean doImmediateCommit) {
String transactionID = getTransactionID();
LOG.debug("readyTransaction : {}", transactionID);
- DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
+ ShardDataTreeCohort cohort = transaction.ready();
getShardActor().forward(new ForwardedReadyTransaction(transactionID, getClientTxVersion(),
- cohort, compositeModification, returnSerialized), getContext());
+ cohort, compositeModification, returnSerialized, doImmediateCommit), getContext());
// The shard will handle the commit from here so we're no longer needed - self-destruct.
getSelf().tell(PoisonPill.getInstance(), getSelf());
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
+ private static final Logger LOG = LoggerFactory.getLogger(SimpleShardDataTreeCohort.class);
+ private static final ListenableFuture<Boolean> TRUE_FUTURE = Futures.immediateFuture(Boolean.TRUE);
+ private static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
+ private final DataTreeModification transaction;
+ private final ShardDataTree dataTree;
+ private DataTreeCandidateTip candidate;
+
+ SimpleShardDataTreeCohort(final ShardDataTree dataTree, final DataTreeModification transaction) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+ this.transaction = Preconditions.checkNotNull(transaction);
+ }
+
+ @Override
+ DataTreeCandidateTip getCandidate() {
+ return candidate;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ try {
+ dataTree.getDataTree().validate(transaction);
+ LOG.debug("Transaction {} validated", transaction);
+ return TRUE_FUTURE;
+ } catch (Exception e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ try {
+ candidate = dataTree.getDataTree().prepare(transaction);
+ /*
+ * FIXME: this is the place where we should be interacting with persistence, specifically by invoking
+ * persist on the candidate (which gives us a Future).
+ */
+ LOG.debug("Transaction {} prepared candidate {}", transaction, candidate);
+ return VOID_FUTURE;
+ } catch (Exception e) {
+ LOG.debug("Transaction {} failed to prepare", transaction, e);
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ // No-op, really
+ return VOID_FUTURE;
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ try {
+ dataTree.getDataTree().commit(candidate);
+ } catch (Exception e) {
+ LOG.error("Transaction {} failed to commit", transaction, e);
+ return Futures.immediateFailedFuture(e);
+ }
+
+ LOG.debug("Transaction {} committed, proceeding to notify", transaction);
+ dataTree.notifyListeners(candidate);
+ return VOID_FUTURE;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import akka.dispatch.Futures;
+import akka.dispatch.OnComplete;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import java.util.Arrays;
+import java.util.List;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+/**
+ * A cohort proxy implementation for a single-shard transaction commit. If the transaction was a direct commit
+ * to the shard, this implementation elides the CanCommitTransaction and CommitTransaction messages to the
+ * shard as an optimization. Otherwise the 3-phase commit to the shard is delegated to a
+ * ThreePhaseCommitCohortProxy instance (this is for backwards compatibility with pre-Lithium versions).
+ *
+ * @author Thomas Pantelis
+ */
+class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
+ private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class);
+
+ private final ActorContext actorContext;
+ private final Future<Object> cohortFuture;
+ private final String transactionId;
+ private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
+ private final OperationCallback.Reference operationCallbackRef;
+
+ SingleCommitCohortProxy(ActorContext actorContext, Future<Object> cohortFuture, String transactionId,
+ OperationCallback.Reference operationCallbackRef) {
+ this.actorContext = actorContext;
+ this.cohortFuture = cohortFuture;
+ this.transactionId = transactionId;
+ this.operationCallbackRef = operationCallbackRef;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ LOG.debug("Tx {} canCommit", transactionId);
+
+ final SettableFuture<Boolean> returnFuture = SettableFuture.create();
+
+ cohortFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object cohortResponse) {
+ if(failure != null) {
+ operationCallbackRef.get().failure();
+ returnFuture.setException(failure);
+ return;
+ }
+
+ operationCallbackRef.get().success();
+
+ if(cohortResponse instanceof ActorSelection) {
+ handlePreLithiumActorCohort((ActorSelection)cohortResponse, returnFuture);
+ return;
+ }
+
+ LOG.debug("Tx {} successfully completed direct commit", transactionId);
+
+ // The Future was the result of a direct commit to the shard, essentially eliding the
+ // front-end 3PC coordination. We don't really care about the specific Future
+ // response object, only that it completed successfully. At this point the Tx is complete
+ // so return true. The subsequent preCommit and commit phases will be no-ops, ie return
+ // immediate success, to complete the 3PC for the front-end.
+ returnFuture.set(Boolean.TRUE);
+ }
+ }, actorContext.getClientDispatcher());
+
+ return returnFuture;
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegateCohort.preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegateCohort.abort();
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ return delegateCohort.commit();
+ }
+
+ @Override
+ List<Future<Object>> getCohortFutures() {
+ return Arrays.asList(cohortFuture);
+ }
+
+ private void handlePreLithiumActorCohort(ActorSelection actorSelection, final SettableFuture<Boolean> returnFuture) {
+ // Handle backwards compatibility. An ActorSelection response would be returned from a
+ // pre-Lithium version. In this case delegate to a ThreePhaseCommitCohortProxy.
+ delegateCohort = new ThreePhaseCommitCohortProxy(actorContext,
+ Arrays.asList(Futures.successful(actorSelection)), transactionId);
+ com.google.common.util.concurrent.Futures.addCallback(delegateCohort.canCommit(), new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean canCommit) {
+ returnFuture.set(canCommit);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ returnFuture.setException(t);
+ }
+ });
+ }
+}
import akka.actor.ActorSelection;
import akka.dispatch.Futures;
import akka.dispatch.OnComplete;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
/**
* ThreePhaseCommitCohortProxy represents a set of remote cohort proxies
*/
-public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCohort{
+public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<ActorSelection> {
private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
- private static final ListenableFuture<Void> IMMEDIATE_SUCCESS =
- com.google.common.util.concurrent.Futures.immediateFuture(null);
-
private final ActorContext actorContext;
private final List<Future<ActorSelection>> cohortFutures;
private volatile List<ActorSelection> cohorts;
private final String transactionId;
- private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
- @Override
- public void run() {
- }
-
- @Override
- public void success() {
- }
-
- @Override
- public void failure() {
- }
- };
public ThreePhaseCommitCohortProxy(ActorContext actorContext,
List<Future<ActorSelection>> cohortFutures, String transactionId) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} finishCanCommit", transactionId);
}
- // The last phase of canCommit is to invoke all the cohort actors asynchronously to perform
- // their canCommit processing. If any one fails then we'll fail canCommit.
- Future<Iterable<Object>> combinedFuture =
- invokeCohorts(new CanCommitTransaction(transactionId).toSerializable());
+ // For empty transactions return immediately
+ if(cohorts.size() == 0){
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: canCommit returning result: {}", transactionId, true);
+ }
+ returnFuture.set(Boolean.TRUE);
+ return;
+ }
- combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
+ final Object message = new CanCommitTransaction(transactionId).toSerializable();
+
+ final Iterator<ActorSelection> iterator = cohorts.iterator();
+
+ final OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
- public void onComplete(Throwable failure, Iterable<Object> responses) throws Throwable {
- if(failure != null) {
- if(LOG.isDebugEnabled()) {
+ public void onComplete(Throwable failure, Object response) throws Throwable {
+ if (failure != null) {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
}
returnFuture.setException(failure);
}
boolean result = true;
- for(Object response: responses) {
- if (response.getClass().equals(CanCommitTransactionReply.SERIALIZABLE_CLASS)) {
- CanCommitTransactionReply reply =
- CanCommitTransactionReply.fromSerializable(response);
- if (!reply.getCanCommit()) {
- result = false;
- break;
- }
- } else {
- LOG.error("Unexpected response type {}", response.getClass());
- returnFuture.setException(new IllegalArgumentException(
- String.format("Unexpected response type %s", response.getClass())));
- return;
+ if (response.getClass().equals(CanCommitTransactionReply.SERIALIZABLE_CLASS)) {
+ CanCommitTransactionReply reply =
+ CanCommitTransactionReply.fromSerializable(response);
+ if (!reply.getCanCommit()) {
+ result = false;
}
+ } else {
+ LOG.error("Unexpected response type {}", response.getClass());
+ returnFuture.setException(new IllegalArgumentException(
+ String.format("Unexpected response type %s", response.getClass())));
+ return;
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
+
+ if(iterator.hasNext() && result){
+ Future<Object> future = actorContext.executeOperationAsync(iterator.next(), message,
+ actorContext.getTransactionCommitOperationTimeout());
+ future.onComplete(this, actorContext.getClientDispatcher());
+ } else {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
+ }
+ returnFuture.set(Boolean.valueOf(result));
}
- returnFuture.set(Boolean.valueOf(result));
+
}
- }, actorContext.getClientDispatcher());
+ };
+
+ Future<Object> future = actorContext.executeOperationAsync(iterator.next(), message,
+ actorContext.getTransactionCommitOperationTimeout());
+ future.onComplete(onComplete, actorContext.getClientDispatcher());
}
private Future<Iterable<Object>> invokeCohorts(Object message) {
public ListenableFuture<Void> preCommit() {
// We don't need to do anything here - preCommit is done atomically with the commit phase
// by the shard.
- return IMMEDIATE_SUCCESS;
+ return IMMEDIATE_VOID_SUCCESS;
}
@Override
@Override
public ListenableFuture<Void> commit() {
- OperationCallback operationCallback = (cohortFutures.size() == 0) ? NO_OP_CALLBACK :
+ OperationCallback operationCallback = cohortFutures.isEmpty() ? OperationCallback.NO_OP_CALLBACK :
new TransactionRateLimitingCallback(actorContext);
return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
final Class<?> expectedResponseClass, final boolean propagateException) {
- return voidOperation(operationName, message, expectedResponseClass, propagateException, NO_OP_CALLBACK);
+ return voidOperation(operationName, message, expectedResponseClass, propagateException,
+ OperationCallback.NO_OP_CALLBACK);
}
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
}, actorContext.getClientDispatcher());
}
- @VisibleForTesting
+ @Override
List<Future<ActorSelection>> getCohortFutures() {
return Collections.unmodifiableList(cohortFutures);
}
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
import com.google.common.base.Preconditions;
import java.util.Collections;
import java.util.List;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-import scala.concurrent.Promise;
/**
* TransactionChainProxy acts as a proxy for a DOMStoreTransactionChain created on a remote shard
*/
public class TransactionChainProxy implements DOMStoreTransactionChain {
- private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
-
private interface State {
boolean isReady();
- List<Future<ActorSelection>> getPreviousReadyFutures();
+ List<Future<Object>> getPreviousReadyFutures();
}
private static class Allocated implements State {
}
@Override
- public List<Future<ActorSelection>> getPreviousReadyFutures() {
+ public List<Future<Object>> getPreviousReadyFutures() {
return transaction.getReadyFutures();
}
}
private static abstract class AbstractDefaultState implements State {
@Override
- public List<Future<ActorSelection>> getPreviousReadyFutures() {
+ public List<Future<Object>> getPreviousReadyFutures() {
return Collections.emptyList();
}
}
currentState = CLOSED_STATE;
// Send a close transaction chain request to each and every shard
- actorContext.broadcast(new CloseTransactionChain(transactionChainId));
+ actorContext.broadcast(new CloseTransactionChain(transactionChainId).toSerializable());
}
private ChainedTransactionProxy allocateWriteTransaction(TransactionProxy.TransactionType type) {
private void checkReadyState(State state) {
Preconditions.checkState(state.isReady(), "Previous transaction is not ready yet");
}
-
- private static class ChainedTransactionProxy extends TransactionProxy {
-
- /**
- * Stores the ready Futures from the previous Tx in the chain.
- */
- private final List<Future<ActorSelection>> previousReadyFutures;
-
- /**
- * Stores the ready Futures from this transaction when it is readied.
- */
- private volatile List<Future<ActorSelection>> readyFutures;
-
- private ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
- String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
- super(actorContext, transactionType, transactionChainId);
- this.previousReadyFutures = previousReadyFutures;
- }
-
- List<Future<ActorSelection>> getReadyFutures() {
- return readyFutures;
- }
-
- boolean isReady() {
- return readyFutures != null;
- }
-
- @Override
- protected void onTransactionReady(List<Future<ActorSelection>> readyFutures) {
- LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
- readyFutures.size(), getTransactionChainId());
- this.readyFutures = readyFutures;
- }
-
- /**
- * This method is overridden to ensure the previous Tx's ready operations complete
- * before we initiate the next Tx in the chain to avoid creation failures if the
- * previous Tx's ready operations haven't completed yet.
- */
- @Override
- protected Future<ActorSelection> sendFindPrimaryShardAsync(final String shardName) {
- // Check if there are any previous ready Futures, otherwise let the super class handle it.
- if(previousReadyFutures.isEmpty()) {
- return super.sendFindPrimaryShardAsync(shardName);
- }
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Waiting for {} previous ready futures for Tx {} on chain {}",
- previousReadyFutures.size(), getIdentifier(), getTransactionChainId());
- }
-
- // Combine the ready Futures into 1.
- Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
- previousReadyFutures, getActorContext().getClientDispatcher());
-
- // Add a callback for completion of the combined Futures.
- final Promise<ActorSelection> returnPromise = akka.dispatch.Futures.promise();
- OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
- @Override
- public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
- if(failure != null) {
- // A Ready Future failed so fail the returned Promise.
- returnPromise.failure(failure);
- } else {
- LOG.debug("Previous Tx readied - sending FindPrimaryShard for {} on chain {}",
- getIdentifier(), getTransactionChainId());
-
- // Send the FindPrimaryShard message and use the resulting Future to complete the
- // returned Promise.
- returnPromise.completeWith(ChainedTransactionProxy.super.sendFindPrimaryShardAsync(shardName));
- }
- }
- };
-
- combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
-
- return returnPromise.future();
- }
- }
}
import akka.actor.ActorSelection;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.List;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import scala.concurrent.Future;
void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture);
- List<Future<Object>> getRecordedOperationFutures();
+ boolean supportsDirectCommit();
+
+ Future<Object> directCommit();
}
/*
* Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
public class TransactionContextImpl extends AbstractTransactionContext {
private static final Logger LOG = LoggerFactory.getLogger(TransactionContextImpl.class);
+ private final String transactionChainId;
private final ActorContext actorContext;
- private final String transactionPath;
private final ActorSelection actor;
private final boolean isTxActorLocal;
private final short remoteTransactionVersion;
private final OperationCompleter operationCompleter;
private BatchedModifications batchedModifications;
+ private int totalBatchedModificationsSent;
- protected TransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
- ActorContext actorContext, SchemaContext schemaContext,
- boolean isTxActorLocal, short remoteTransactionVersion, OperationCompleter operationCompleter) {
+ protected TransactionContextImpl(ActorSelection actor, TransactionIdentifier identifier,
+ String transactionChainId, ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
+ short remoteTransactionVersion, OperationCompleter operationCompleter) {
super(identifier);
- this.transactionPath = transactionPath;
this.actor = actor;
+ this.transactionChainId = transactionChainId;
this.actorContext = actorContext;
this.isTxActorLocal = isTxActorLocal;
this.remoteTransactionVersion = remoteTransactionVersion;
return actor;
}
+ protected ActorContext getActorContext() {
+ return actorContext;
+ }
+
protected short getRemoteTransactionVersion() {
return remoteTransactionVersion;
}
@Override
public void closeTransaction() {
- LOG.debug("Tx {} closeTransaction called", identifier);
+ LOG.debug("Tx {} closeTransaction called", getIdentifier());
actorContext.sendOperationAsync(getActor(), CloseTransaction.INSTANCE.toSerializable());
}
@Override
- public Future<ActorSelection> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
- identifier, recordedOperationFutures.size());
+ public boolean supportsDirectCommit() {
+ return true;
+ }
- // Send the remaining batched modifications if any.
+ @Override
+ public Future<Object> directCommit() {
+ LOG.debug("Tx {} directCommit called", getIdentifier());
- sendBatchedModifications();
+ // Send the remaining batched modifications, if any, with the ready flag set.
- // Send the ReadyTransaction message to the Tx actor.
+ return sendBatchedModifications(true, true);
+ }
- final Future<Object> replyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
+ @Override
+ public Future<ActorSelection> readyTransaction() {
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
- // Combine all the previously recorded put/merge/delete operation reply Futures and the
- // ReadyTransactionReply Future into one Future. If any one fails then the combined
- // Future will fail. We need all prior operations and the ready operation to succeed
- // in order to attempt commit.
+ // Send the remaining batched modifications, if any, with the ready flag set.
- List<Future<Object>> futureList =
- Lists.newArrayListWithCapacity(recordedOperationFutures.size() + 1);
- futureList.addAll(recordedOperationFutures);
- futureList.add(replyFuture);
+ Future<Object> lastModificationsFuture = sendBatchedModifications(true, false);
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
- actorContext.getClientDispatcher());
+ return transformReadyReply(lastModificationsFuture);
+ }
- // Transform the combined Future into a Future that returns the cohort actor path from
- // the ReadyTransactionReply. That's the end result of the ready operation.
+ protected Future<ActorSelection> transformReadyReply(final Future<Object> readyReplyFuture) {
+ // Transform the last reply Future into a Future that returns the cohort actor path from
+ // the last reply message. That's the end result of the ready operation.
- return combinedFutures.transform(new Mapper<Iterable<Object>, ActorSelection>() {
+ return readyReplyFuture.transform(new Mapper<Object, ActorSelection>() {
@Override
- public ActorSelection checkedApply(Iterable<Object> notUsed) {
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
- identifier);
-
- // At this point all the Futures succeeded and we need to extract the cohort
- // actor path from the ReadyTransactionReply. For the recorded operations, they
- // don't return any data so we're only interested that they completed
- // successfully. We could be paranoid and verify the correct reply types but
- // that really should never happen so it's not worth the overhead of
- // de-serializing each reply.
-
- // Note the Future get call here won't block as it's complete.
- Object serializedReadyReply = replyFuture.value().get().get();
- if (serializedReadyReply instanceof ReadyTransactionReply) {
- return actorContext.actorSelection(((ReadyTransactionReply)serializedReadyReply).getCohortPath());
-
- } else if(serializedReadyReply.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
- String cohortPath = reply.getCohortPath();
-
- // In Helium we used to return the local path of the actor which represented
- // a remote ThreePhaseCommitCohort. The local path would then be converted to
- // a remote path using this resolvePath method. To maintain compatibility with
- // a Helium node we need to continue to do this conversion.
- // At some point in the future when upgrades from Helium are not supported
- // we could remove this code to resolvePath and just use the cohortPath as the
- // resolved cohortPath
- if(TransactionContextImpl.this.remoteTransactionVersion <
- DataStoreVersions.HELIUM_1_VERSION) {
- cohortPath = actorContext.resolvePath(transactionPath, cohortPath);
- }
-
- return actorContext.actorSelection(cohortPath);
-
- } else {
- // Throwing an exception here will fail the Future.
- throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
- identifier, serializedReadyReply.getClass()));
+ public ActorSelection checkedApply(Object serializedReadyReply) {
+ LOG.debug("Tx {} readyTransaction", getIdentifier());
+
+ // At this point the ready operation succeeded and we need to extract the cohort
+ // actor path from the reply.
+ if(ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
+ ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
+ return actorContext.actorSelection(extractCohortPathFrom(readyTxReply));
}
+
+ // Throwing an exception here will fail the Future.
+ throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
+ getIdentifier(), serializedReadyReply.getClass()));
}
}, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
}
+ protected String extractCohortPathFrom(ReadyTransactionReply readyTxReply) {
+ return readyTxReply.getCohortPath();
+ }
+
+ private BatchedModifications newBatchedModifications() {
+ return new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion, transactionChainId);
+ }
+
private void batchModification(Modification modification) {
if(batchedModifications == null) {
- batchedModifications = new BatchedModifications(remoteTransactionVersion);
+ batchedModifications = newBatchedModifications();
}
batchedModifications.addModification(modification);
}
}
- private void sendBatchedModifications() {
- if(batchedModifications != null) {
- LOG.debug("Tx {} sending {} batched modifications", identifier,
- batchedModifications.getModifications().size());
+ protected Future<Object> sendBatchedModifications() {
+ return sendBatchedModifications(false, false);
+ }
+
+ protected Future<Object> sendBatchedModifications(boolean ready, boolean doCommitOnReady) {
+ Future<Object> sent = null;
+ if(ready || (batchedModifications != null && !batchedModifications.getModifications().isEmpty())) {
+ if(batchedModifications == null) {
+ batchedModifications = newBatchedModifications();
+ }
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
+ batchedModifications.getModifications().size(), ready);
+ }
+
+ batchedModifications.setReady(ready);
+ batchedModifications.setDoCommitOnReady(doCommitOnReady);
+ batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
+ sent = executeOperationAsync(batchedModifications);
- recordedOperationFutures.add(executeOperationAsync(batchedModifications));
- batchedModifications = null;
+ if(ready) {
+ batchedModifications = null;
+ } else {
+ batchedModifications = newBatchedModifications();
+ }
}
+
+ return sent;
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ LOG.debug("Tx {} deleteData called path = {}", getIdentifier(), path);
batchModification(new DeleteModification(path));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ LOG.debug("Tx {} mergeData called path = {}", getIdentifier(), path);
batchModification(new MergeModification(path, data));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ LOG.debug("Tx {} writeData called path = {}", getIdentifier(), path);
batchModification(new WriteModification(path, data));
}
@Override
- public void readData(
- final YangInstanceIdentifier path,final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture ) {
+ public void readData(final YangInstanceIdentifier path,
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture ) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
+ LOG.debug("Tx {} readData called path = {}", getIdentifier(), path);
- // Send the remaining batched modifications if any.
+ // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
+ // public API contract.
sendBatchedModifications();
- // If there were any previous recorded put/merge/delete operation reply Futures then we
- // must wait for them to successfully complete. This is necessary to honor the read
- // uncommitted semantics of the public API contract. If any one fails then fail the read.
-
- if(recordedOperationFutures.isEmpty()) {
- finishReadData(path, returnFuture);
- } else {
- LOG.debug("Tx {} readData: verifying {} previous recorded operations",
- identifier, recordedOperationFutures.size());
-
- // Note: we make a copy of recordedOperationFutures to be on the safe side in case
- // Futures#sequence accesses the passed List on a different thread, as
- // recordedOperationFutures is not synchronized.
-
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
- Lists.newArrayList(recordedOperationFutures),
- actorContext.getClientDispatcher());
-
- OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
- @Override
- public void onComplete(Throwable failure, Iterable<Object> notUsed)
- throws Throwable {
- if(failure != null) {
- LOG.debug("Tx {} readData: a recorded operation failed: {}",
- identifier, failure);
- returnFuture.setException(new ReadFailedException(
- "The read could not be performed because a previous put, merge,"
- + "or delete operation failed", failure));
- } else {
- finishReadData(path, returnFuture);
- }
- }
- };
-
- combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
- }
-
- }
-
- private void finishReadData(final YangInstanceIdentifier path,
- final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
-
- LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object readResponse) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+ LOG.debug("Tx {} read operation failed: {}", getIdentifier(), failure);
returnFuture.setException(new ReadFailedException(
"Error reading data for path " + path, failure));
} else {
- LOG.debug("Tx {} read operation succeeded", identifier, failure);
+ LOG.debug("Tx {} read operation succeeded", getIdentifier(), failure);
if (readResponse instanceof ReadDataReply) {
ReadDataReply reply = (ReadDataReply) readResponse;
@Override
public void dataExists(final YangInstanceIdentifier path, final SettableFuture<Boolean> returnFuture) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ LOG.debug("Tx {} dataExists called path = {}", getIdentifier(), path);
- // Send the remaining batched modifications if any.
+ // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
+ // public API contract.
sendBatchedModifications();
- // If there were any previous recorded put/merge/delete operation reply Futures then we
- // must wait for them to successfully complete. This is necessary to honor the read
- // uncommitted semantics of the public API contract. If any one fails then fail this
- // request.
-
- if(recordedOperationFutures.isEmpty()) {
- finishDataExists(path, returnFuture);
- } else {
- LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
- identifier, recordedOperationFutures.size());
-
- // Note: we make a copy of recordedOperationFutures to be on the safe side in case
- // Futures#sequence accesses the passed List on a different thread, as
- // recordedOperationFutures is not synchronized.
-
- Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
- Lists.newArrayList(recordedOperationFutures),
- actorContext.getClientDispatcher());
- OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
- @Override
- public void onComplete(Throwable failure, Iterable<Object> notUsed)
- throws Throwable {
- if(failure != null) {
- LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
- identifier, failure);
- returnFuture.setException(new ReadFailedException(
- "The data exists could not be performed because a previous "
- + "put, merge, or delete operation failed", failure));
- } else {
- finishDataExists(path, returnFuture);
- }
- }
- };
-
- combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
- }
- }
-
- private void finishDataExists(final YangInstanceIdentifier path,
- final SettableFuture<Boolean> returnFuture) {
-
- LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object response) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+ LOG.debug("Tx {} dataExists operation failed: {}", getIdentifier(), failure);
returnFuture.setException(new ReadFailedException(
"Error checking data exists for path " + path, failure));
} else {
- LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+ LOG.debug("Tx {} dataExists operation succeeded", getIdentifier(), failure);
if (response instanceof DataExistsReply) {
returnFuture.set(Boolean.valueOf(((DataExistsReply) response).exists()));
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
+ * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
+ * retry task after a short delay.
+ * <p>
+ * The end result from a completed CreateTransaction message is a TransactionContext that is
+ * used to perform transaction operations. Transaction operations that occur before the
+ * CreateTransaction completes are cache and executed once the CreateTransaction completes,
+ * successfully or not.
+ */
+final class TransactionFutureCallback extends OnComplete<Object> {
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionFutureCallback.class);
+
+ /**
+ * Time interval in between transaction create retries.
+ */
+ private static final FiniteDuration CREATE_TX_TRY_INTERVAL = FiniteDuration.create(1, TimeUnit.SECONDS);
+
+ /**
+ * The list of transaction operations to execute once the CreateTransaction completes.
+ */
+ @GuardedBy("txOperationsOnComplete")
+ private final List<TransactionOperation> txOperationsOnComplete = Lists.newArrayList();
+ private final TransactionProxy proxy;
+ private final String shardName;
+
+ /**
+ * The TransactionContext resulting from the CreateTransaction reply.
+ */
+ private volatile TransactionContext transactionContext;
+
+ /**
+ * The target primary shard.
+ */
+ private volatile ActorSelection primaryShard;
+ private volatile int createTxTries;
+
+ TransactionFutureCallback(final TransactionProxy proxy, final String shardName) {
+ this.proxy = Preconditions.checkNotNull(proxy);
+ this.shardName = shardName;
+ createTxTries = (int) (proxy.getActorContext().getDatastoreContext().
+ getShardLeaderElectionTimeout().duration().toMillis() /
+ CREATE_TX_TRY_INTERVAL.toMillis());
+ }
+
+ String getShardName() {
+ return shardName;
+ }
+
+ TransactionContext getTransactionContext() {
+ return transactionContext;
+ }
+
+ private TransactionType getTransactionType() {
+ return proxy.getTransactionType();
+ }
+
+ private TransactionIdentifier getIdentifier() {
+ return proxy.getIdentifier();
+ }
+
+ private ActorContext getActorContext() {
+ return proxy.getActorContext();
+ }
+
+ private Semaphore getOperationLimiter() {
+ return proxy.getOperationLimiter();
+ }
+
+ /**
+ * Sets the target primary shard and initiates a CreateTransaction try.
+ */
+ void setPrimaryShard(ActorSelection primaryShard) {
+ this.primaryShard = primaryShard;
+
+ if (getTransactionType() == TransactionType.WRITE_ONLY &&
+ getActorContext().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
+ LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
+ getIdentifier(), primaryShard);
+
+ // For write-only Tx's we prepare the transaction modifications directly on the shard actor
+ // to avoid the overhead of creating a separate transaction actor.
+ // FIXME: can't assume the shard version is LITHIUM_VERSION - need to obtain it somehow.
+ executeTxOperatonsOnComplete(proxy.createValidTransactionContext(this.primaryShard,
+ this.primaryShard.path().toString(), DataStoreVersions.LITHIUM_VERSION));
+ } else {
+ tryCreateTransaction();
+ }
+ }
+
+ /**
+ * Adds a TransactionOperation to be executed after the CreateTransaction completes.
+ */
+ private void addTxOperationOnComplete(TransactionOperation operation) {
+ boolean invokeOperation = true;
+ synchronized(txOperationsOnComplete) {
+ if(transactionContext == null) {
+ LOG.debug("Tx {} Adding operation on complete", getIdentifier());
+
+ invokeOperation = false;
+ txOperationsOnComplete.add(operation);
+ }
+ }
+
+ if(invokeOperation) {
+ operation.invoke(transactionContext);
+ }
+ }
+
+ void enqueueTransactionOperation(final TransactionOperation op) {
+
+ if (transactionContext != null) {
+ op.invoke(transactionContext);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ addTxOperationOnComplete(op);
+ }
+ }
+
+ /**
+ * Performs a CreateTransaction try async.
+ */
+ private void tryCreateTransaction() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(), primaryShard);
+ }
+
+ Object serializedCreateMessage = new CreateTransaction(getIdentifier().toString(),
+ getTransactionType().ordinal(), proxy.getTransactionChainId()).toSerializable();
+
+ Future<Object> createTxFuture = getActorContext().executeOperationAsync(primaryShard, serializedCreateMessage);
+
+ createTxFuture.onComplete(this, getActorContext().getClientDispatcher());
+ }
+
+ @Override
+ public void onComplete(Throwable failure, Object response) {
+ if(failure instanceof NoShardLeaderException) {
+ // There's no leader for the shard yet - schedule and try again, unless we're out
+ // of retries. Note: createTxTries is volatile as it may be written by different
+ // threads however not concurrently, therefore decrementing it non-atomically here
+ // is ok.
+ if(--createTxTries > 0) {
+ LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
+ getIdentifier(), shardName);
+
+ getActorContext().getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
+ new Runnable() {
+ @Override
+ public void run() {
+ tryCreateTransaction();
+ }
+ }, getActorContext().getClientDispatcher());
+ return;
+ }
+ }
+
+ createTransactionContext(failure, response);
+ }
+
+ void createTransactionContext(Throwable failure, Object response) {
+ // Mainly checking for state violation here to perform a volatile read of "initialized" to
+ // ensure updates to operationLimter et al are visible to this thread (ie we're doing
+ // "piggy-back" synchronization here).
+ proxy.ensureInitializied();
+
+ // Create the TransactionContext from the response or failure. Store the new
+ // TransactionContext locally until we've completed invoking the
+ // TransactionOperations. This avoids thread timing issues which could cause
+ // out-of-order TransactionOperations. Eg, on a modification operation, if the
+ // TransactionContext is non-null, then we directly call the TransactionContext.
+ // However, at the same time, the code may be executing the cached
+ // TransactionOperations. So to avoid thus timing, we don't publish the
+ // TransactionContext until after we've executed all cached TransactionOperations.
+ TransactionContext localTransactionContext;
+ if(failure != null) {
+ LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
+
+ localTransactionContext = new NoOpTransactionContext(failure, getIdentifier(), getOperationLimiter());
+ } else if (CreateTransactionReply.SERIALIZABLE_CLASS.equals(response.getClass())) {
+ localTransactionContext = createValidTransactionContext(
+ CreateTransactionReply.fromSerializable(response));
+ } else {
+ IllegalArgumentException exception = new IllegalArgumentException(String.format(
+ "Invalid reply type %s for CreateTransaction", response.getClass()));
+
+ localTransactionContext = new NoOpTransactionContext(exception, getIdentifier(), getOperationLimiter());
+ }
+
+ executeTxOperatonsOnComplete(localTransactionContext);
+ }
+
+ private void executeTxOperatonsOnComplete(TransactionContext localTransactionContext) {
+ while(true) {
+ // Access to txOperationsOnComplete and transactionContext must be protected and atomic
+ // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
+ // issues and ensure no TransactionOperation is missed and that they are processed
+ // in the order they occurred.
+
+ // We'll make a local copy of the txOperationsOnComplete list to handle re-entrancy
+ // in case a TransactionOperation results in another transaction operation being
+ // queued (eg a put operation from a client read Future callback that is notified
+ // synchronously).
+ Collection<TransactionOperation> operationsBatch = null;
+ synchronized(txOperationsOnComplete) {
+ if(txOperationsOnComplete.isEmpty()) {
+ // We're done invoking the TransactionOperations so we can now publish the
+ // TransactionContext.
+ transactionContext = localTransactionContext;
+ break;
+ }
+
+ operationsBatch = new ArrayList<>(txOperationsOnComplete);
+ txOperationsOnComplete.clear();
+ }
+
+ // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking.
+ // A slight down-side is that we need to re-acquire the lock below but this should
+ // be negligible.
+ for(TransactionOperation oper: operationsBatch) {
+ oper.invoke(localTransactionContext);
+ }
+ }
+ }
+
+ private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
+ LOG.debug("Tx {} Received {}", getIdentifier(), reply);
+
+ return proxy.createValidTransactionContext(getActorContext().actorSelection(reply.getTransactionPath()),
+ reply.getTransactionPath(), reply.getVersion());
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+/**
+ * Abstract superclass for transaction operations which should be executed
+ * on a {@link TransactionContext} at a later point in time.
+ */
+abstract class TransactionOperation {
+ /**
+ * Execute the delayed operation.
+ *
+ * @param transactionContext
+ */
+ protected abstract void invoke(TransactionContext transactionContext);
+}
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.controller.cluster.datastore.compat.PreLithiumTransactionContextImpl;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
import scala.concurrent.Promise;
-import scala.concurrent.duration.FiniteDuration;
/**
* TransactionProxy acts as a proxy for one or more transactions that were created on a remote shard
* shards will be executed.
* </p>
*/
-public class TransactionProxy implements DOMStoreReadWriteTransaction {
+public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIdentifier> implements DOMStoreReadWriteTransaction {
public static enum TransactionType {
READ_ONLY,
WRITE_ONLY,
- READ_WRITE
+ READ_WRITE;
+
+ // Cache all values
+ private static final TransactionType[] VALUES = values();
+
+ public static TransactionType fromInt(final int type) {
+ try {
+ return VALUES[type];
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("In TransactionType enum value " + type, e);
+ }
+ }
+ }
+
+ private static enum TransactionState {
+ OPEN,
+ READY,
+ CLOSED,
}
static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER =
private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
- /**
- * Time interval in between transaction create retries.
- */
- private static final FiniteDuration CREATE_TX_TRY_INTERVAL =
- FiniteDuration.create(1, TimeUnit.SECONDS);
-
- /**
- * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
- * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
- * trickery to clean up its internal thread when the bundle is unloaded.
- */
- private static final FinalizableReferenceQueue phantomReferenceQueue =
- new FinalizableReferenceQueue();
-
- /**
- * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
- * necessary because PhantomReferences need a hard reference so they're not garbage collected.
- * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
- * and thus becomes eligible for garbage collection.
- */
- private static final Map<TransactionProxyCleanupPhantomReference,
- TransactionProxyCleanupPhantomReference> phantomReferenceCache =
- new ConcurrentHashMap<>();
-
- /**
- * A PhantomReference that closes remote transactions for a TransactionProxy when it's
- * garbage collected. This is used for read-only transactions as they're not explicitly closed
- * by clients. So the only way to detect that a transaction is no longer in use and it's safe
- * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
- * but TransactionProxy instances should generally be short-lived enough to avoid being moved
- * to the old generation space and thus should be cleaned up in a timely manner as the GC
- * runs on the young generation (eden, swap1...) space much more frequently.
- */
- private static class TransactionProxyCleanupPhantomReference
- extends FinalizablePhantomReference<TransactionProxy> {
-
- private final List<ActorSelection> remoteTransactionActors;
- private final AtomicBoolean remoteTransactionActorsMB;
- private final ActorContext actorContext;
- private final TransactionIdentifier identifier;
-
- protected TransactionProxyCleanupPhantomReference(TransactionProxy referent) {
- super(referent, phantomReferenceQueue);
-
- // Note we need to cache the relevant fields from the TransactionProxy as we can't
- // have a hard reference to the TransactionProxy instance itself.
-
- remoteTransactionActors = referent.remoteTransactionActors;
- remoteTransactionActorsMB = referent.remoteTransactionActorsMB;
- actorContext = referent.actorContext;
- identifier = referent.identifier;
- }
-
- @Override
- public void finalizeReferent() {
- LOG.trace("Cleaning up {} Tx actors for TransactionProxy {}",
- remoteTransactionActors.size(), identifier);
-
- phantomReferenceCache.remove(this);
-
- // Access the memory barrier volatile to ensure all previous updates to the
- // remoteTransactionActors list are visible to this thread.
-
- if(remoteTransactionActorsMB.get()) {
- for(ActorSelection actor : remoteTransactionActors) {
- LOG.trace("Sending CloseTransaction to {}", actor);
- actorContext.sendOperationAsync(actor, CloseTransaction.INSTANCE.toSerializable());
- }
- }
- }
- }
-
/**
* Stores the remote Tx actors for each requested data store path to be used by the
* PhantomReference to close the remote Tx's. This is only used for read-only Tx's. The
* remoteTransactionActors list so they will be visible to the thread accessing the
* PhantomReference.
*/
- private List<ActorSelection> remoteTransactionActors;
- private volatile AtomicBoolean remoteTransactionActorsMB;
+ List<ActorSelection> remoteTransactionActors;
+ volatile AtomicBoolean remoteTransactionActorsMB;
/**
* Stores the create transaction results per shard.
private final Map<String, TransactionFutureCallback> txFutureCallbackMap = new HashMap<>();
private final TransactionType transactionType;
- private final ActorContext actorContext;
- private final TransactionIdentifier identifier;
+ final ActorContext actorContext;
private final String transactionChainId;
private final SchemaContext schemaContext;
- private boolean inReadyState;
+ private TransactionState state = TransactionState.OPEN;
private volatile boolean initialized;
private Semaphore operationLimiter;
this(actorContext, transactionType, "");
}
- public TransactionProxy(ActorContext actorContext, TransactionType transactionType,
- String transactionChainId) {
+ public TransactionProxy(ActorContext actorContext, TransactionType transactionType, String transactionChainId) {
+ super(createIdentifier(actorContext));
this.actorContext = Preconditions.checkNotNull(actorContext,
"actorContext should not be null");
this.transactionType = Preconditions.checkNotNull(transactionType,
"schemaContext should not be null");
this.transactionChainId = transactionChainId;
- String memberName = actorContext.getCurrentMemberName();
- if(memberName == null){
- memberName = "UNKNOWN-MEMBER";
- }
-
- this.identifier = new TransactionIdentifier(memberName, counter.getAndIncrement());
-
- LOG.debug("Created txn {} of type {} on chain {}", identifier, transactionType, transactionChainId);
+ LOG.debug("Created txn {} of type {} on chain {}", getIdentifier(), transactionType, transactionChainId);
}
- @VisibleForTesting
- List<Future<Object>> getRecordedOperationFutures() {
- List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
- }
+ private static TransactionIdentifier createIdentifier(ActorContext actorContext) {
+ String memberName = actorContext.getCurrentMemberName();
+ if (memberName == null) {
+ memberName = "UNKNOWN-MEMBER";
}
- return recordedOperationFutures;
+ return new TransactionIdentifier(memberName, counter.getAndIncrement());
}
@VisibleForTesting
return false;
}
+ private static boolean isRootPath(YangInstanceIdentifier path) {
+ return !path.getPathArguments().iterator().hasNext();
+ }
+
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
- LOG.debug("Tx {} read {}", identifier, path);
-
- throttleOperation();
+ LOG.debug("Tx {} read {}", getIdentifier(), path);
final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture = SettableFuture.create();
- TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.readData(path, proxyFuture);
- }
- });
+ if(isRootPath(path)){
+ readAllData(path, proxyFuture);
+ } else {
+ throttleOperation();
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.readData(path, proxyFuture);
+ }
+ });
+
+ }
return MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
+ private void readAllData(final YangInstanceIdentifier path,
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture) {
+ Set<String> allShardNames = actorContext.getConfiguration().getAllShardNames();
+ List<SettableFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
+
+ for(String shardName : allShardNames){
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> subProxyFuture = SettableFuture.create();
+
+ throttleOperation();
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(shardName);
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.readData(path, subProxyFuture);
+ }
+ });
+
+ futures.add(subProxyFuture);
+ }
+
+ final ListenableFuture<List<Optional<NormalizedNode<?, ?>>>> future = Futures.allAsList(futures);
+
+ future.addListener(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ proxyFuture.set(NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.builder().build(),
+ future.get(), actorContext.getSchemaContext()));
+ } catch (DataValidationFailedException | InterruptedException | ExecutionException e) {
+ proxyFuture.setException(e);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ }
+
@Override
public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Exists operation on write-only transaction is not allowed");
- LOG.debug("Tx {} exists {}", identifier, path);
+ LOG.debug("Tx {} exists {}", getIdentifier(), path);
throttleOperation();
private void checkModificationState() {
Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
"Modification operation on read-only transaction is not allowed");
- Preconditions.checkState(!inReadyState,
+ Preconditions.checkState(state == TransactionState.OPEN,
"Transaction is sealed - further modifications are not allowed");
}
}
}
+ final void ensureInitializied() {
+ Preconditions.checkState(initialized, "Transaction %s was not propertly initialized.", getIdentifier());
+ }
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkModificationState();
- LOG.debug("Tx {} write {}", identifier, path);
+ LOG.debug("Tx {} write {}", getIdentifier(), path);
throttleOperation();
checkModificationState();
- LOG.debug("Tx {} merge {}", identifier, path);
+ LOG.debug("Tx {} merge {}", getIdentifier(), path);
throttleOperation();
checkModificationState();
- LOG.debug("Tx {} delete {}", identifier, path);
+ LOG.debug("Tx {} delete {}", getIdentifier(), path);
throttleOperation();
});
}
- @Override
- public DOMStoreThreePhaseCommitCohort ready() {
+ private boolean seal(final TransactionState newState) {
+ if (state == TransactionState.OPEN) {
+ state = newState;
+ return true;
+ } else {
+ return false;
+ }
+ }
- checkModificationState();
+ @Override
+ public AbstractThreePhaseCommitCohort<?> ready() {
+ Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
+ "Read-only transactions cannot be readied");
- inReadyState = true;
+ final boolean success = seal(TransactionState.READY);
+ Preconditions.checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state);
- LOG.debug("Tx {} Readying {} transactions for commit", identifier,
+ LOG.debug("Tx {} Readying {} transactions for commit", getIdentifier(),
txFutureCallbackMap.size());
- if(txFutureCallbackMap.size() == 0) {
- onTransactionReady(Collections.<Future<ActorSelection>>emptyList());
+ if (txFutureCallbackMap.isEmpty()) {
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
return NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
}
throttleOperation(txFutureCallbackMap.size());
- List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
+ final boolean isSingleShard = txFutureCallbackMap.size() == 1;
+ return isSingleShard ? createSingleCommitCohort() : createMultiCommitCohort();
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ private AbstractThreePhaseCommitCohort<Object> createSingleCommitCohort() {
+ TransactionFutureCallback txFutureCallback = txFutureCallbackMap.values().iterator().next();
+
+ LOG.debug("Tx {} Readying transaction for shard {} on chain {}", getIdentifier(),
+ txFutureCallback.getShardName(), transactionChainId);
+
+ final OperationCallback.Reference operationCallbackRef =
+ new OperationCallback.Reference(OperationCallback.NO_OP_CALLBACK);
+ final TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ final Future future;
+ if (transactionContext != null) {
+ // avoid the creation of a promise and a TransactionOperation
+ future = getReadyOrDirectCommitFuture(transactionContext, operationCallbackRef);
+ } else {
+ final Promise promise = akka.dispatch.Futures.promise();
+ txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ promise.completeWith(getReadyOrDirectCommitFuture(transactionContext, operationCallbackRef));
+ }
+ });
+ future = promise.future();
+ }
+
+ return new SingleCommitCohortProxy(actorContext, future, getIdentifier().toString(), operationCallbackRef);
+ }
+
+ private Future<?> getReadyOrDirectCommitFuture(TransactionContext transactionContext,
+ OperationCallback.Reference operationCallbackRef) {
+ if(transactionContext.supportsDirectCommit()) {
+ TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback(actorContext);
+ operationCallbackRef.set(rateLimitingCallback);
+ rateLimitingCallback.run();
+ return transactionContext.directCommit();
+ } else {
+ return transactionContext.readyTransaction();
+ }
+ }
+ private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort() {
+ List<Future<ActorSelection>> cohortFutures = new ArrayList<>(txFutureCallbackMap.size());
for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- LOG.debug("Tx {} Readying transaction for shard {} chain {}", identifier,
+ LOG.debug("Tx {} Readying transaction for shard {} on chain {}", getIdentifier(),
txFutureCallback.getShardName(), transactionChainId);
final TransactionContext transactionContext = txFutureCallback.getTransactionContext();
cohortFutures.add(future);
}
- onTransactionReady(cohortFutures);
-
- return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures,
- identifier.toString());
- }
-
- /**
- * Method for derived classes to be notified when the transaction has been readied.
- *
- * @param cohortFutures the cohort Futures for each shard transaction.
- */
- protected void onTransactionReady(List<Future<ActorSelection>> cohortFutures) {
- }
-
- @Override
- public Object getIdentifier() {
- return this.identifier;
+ return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures, getIdentifier().toString());
}
@Override
public void close() {
+ if (!seal(TransactionState.CLOSED)) {
+ if (state == TransactionState.CLOSED) {
+ // Idempotent no-op as per AutoCloseable recommendation
+ return;
+ }
+
+ throw new IllegalStateException(String.format("Transaction %s is ready, it cannot be closed",
+ getIdentifier()));
+ }
+
for (TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
@Override
return ShardStrategyFactory.getStrategy(path).findShard(path);
}
- protected Future<ActorSelection> sendFindPrimaryShardAsync(String shardName) {
+ protected Future<PrimaryShardInfo> sendFindPrimaryShardAsync(String shardName) {
return actorContext.findPrimaryShardAsync(shardName);
}
+ final TransactionType getTransactionType() {
+ return transactionType;
+ }
+
+ final Semaphore getOperationLimiter() {
+ return operationLimiter;
+ }
+
private TransactionFutureCallback getOrCreateTxFutureCallback(YangInstanceIdentifier path) {
String shardName = shardNameFromIdentifier(path);
+ return getOrCreateTxFutureCallback(shardName);
+ }
+
+ private TransactionFutureCallback getOrCreateTxFutureCallback(String shardName) {
TransactionFutureCallback txFutureCallback = txFutureCallbackMap.get(shardName);
if(txFutureCallback == null) {
- Future<ActorSelection> findPrimaryFuture = sendFindPrimaryShardAsync(shardName);
+ Future<PrimaryShardInfo> findPrimaryFuture = sendFindPrimaryShardAsync(shardName);
- final TransactionFutureCallback newTxFutureCallback = new TransactionFutureCallback(shardName);
+ final TransactionFutureCallback newTxFutureCallback = new TransactionFutureCallback(this, shardName);
txFutureCallback = newTxFutureCallback;
txFutureCallbackMap.put(shardName, txFutureCallback);
- findPrimaryFuture.onComplete(new OnComplete<ActorSelection>() {
+ findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
@Override
- public void onComplete(Throwable failure, ActorSelection primaryShard) {
+ public void onComplete(Throwable failure, PrimaryShardInfo primaryShardInfo) {
if(failure != null) {
- newTxFutureCallback.onComplete(failure, null);
+ newTxFutureCallback.createTransactionContext(failure, null);
} else {
- newTxFutureCallback.setPrimaryShard(primaryShard);
+ newTxFutureCallback.setPrimaryShard(primaryShardInfo.getPrimaryShardActor());
}
}
}, actorContext.getClientDispatcher());
return txFutureCallback;
}
- public String getTransactionChainId() {
+ String getTransactionChainId() {
return transactionChainId;
}
return actorContext;
}
- /**
- * Interfaces for transaction operations to be invoked later.
- */
- private static interface TransactionOperation {
- void invoke(TransactionContext transactionContext);
- }
-
- /**
- * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
- * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
- * retry task after a short delay.
- * <p>
- * The end result from a completed CreateTransaction message is a TransactionContext that is
- * used to perform transaction operations. Transaction operations that occur before the
- * CreateTransaction completes are cache and executed once the CreateTransaction completes,
- * successfully or not.
- */
- private class TransactionFutureCallback extends OnComplete<Object> {
-
- /**
- * The list of transaction operations to execute once the CreateTransaction completes.
- */
- @GuardedBy("txOperationsOnComplete")
- private final List<TransactionOperation> txOperationsOnComplete = Lists.newArrayList();
-
- /**
- * The TransactionContext resulting from the CreateTransaction reply.
- */
- private volatile TransactionContext transactionContext;
-
- /**
- * The target primary shard.
- */
- private volatile ActorSelection primaryShard;
-
- private volatile int createTxTries = (int) (actorContext.getDatastoreContext().
- getShardLeaderElectionTimeout().duration().toMillis() /
- CREATE_TX_TRY_INTERVAL.toMillis());
-
- private final String shardName;
-
- TransactionFutureCallback(String shardName) {
- this.shardName = shardName;
- }
-
- String getShardName() {
- return shardName;
- }
-
- TransactionContext getTransactionContext() {
- return transactionContext;
- }
-
-
- /**
- * Sets the target primary shard and initiates a CreateTransaction try.
- */
- void setPrimaryShard(ActorSelection primaryShard) {
- LOG.debug("Tx {} Primary shard found - trying create transaction", identifier);
-
- this.primaryShard = primaryShard;
- tryCreateTransaction();
- }
-
- /**
- * Adds a TransactionOperation to be executed after the CreateTransaction completes.
- */
- void addTxOperationOnComplete(TransactionOperation operation) {
- boolean invokeOperation = true;
- synchronized(txOperationsOnComplete) {
- if(transactionContext == null) {
- LOG.debug("Tx {} Adding operation on complete {}", identifier);
-
- invokeOperation = false;
- txOperationsOnComplete.add(operation);
- }
- }
-
- if(invokeOperation) {
- operation.invoke(transactionContext);
- }
- }
-
- void enqueueTransactionOperation(final TransactionOperation op) {
-
- if (transactionContext != null) {
- op.invoke(transactionContext);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- addTxOperationOnComplete(op);
- }
- }
-
- /**
- * Performs a CreateTransaction try async.
- */
- private void tryCreateTransaction() {
- Object serializedCreateMessage = new CreateTransaction(identifier.toString(),
- TransactionProxy.this.transactionType.ordinal(),
- getTransactionChainId()).toSerializable();
+ TransactionContext createValidTransactionContext(ActorSelection transactionActor,
+ String transactionPath, short remoteTransactionVersion) {
- Future<Object> createTxFuture = actorContext.executeOperationAsync(primaryShard, serializedCreateMessage);
+ if (transactionType == TransactionType.READ_ONLY) {
+ // Read-only Tx's aren't explicitly closed by the client so we create a PhantomReference
+ // to close the remote Tx's when this instance is no longer in use and is garbage
+ // collected.
- createTxFuture.onComplete(this, actorContext.getClientDispatcher());
- }
+ if(remoteTransactionActorsMB == null) {
+ remoteTransactionActors = Lists.newArrayList();
+ remoteTransactionActorsMB = new AtomicBoolean();
- @Override
- public void onComplete(Throwable failure, Object response) {
- if(failure instanceof NoShardLeaderException) {
- // There's no leader for the shard yet - schedule and try again, unless we're out
- // of retries. Note: createTxTries is volatile as it may be written by different
- // threads however not concurrently, therefore decrementing it non-atomically here
- // is ok.
- if(--createTxTries > 0) {
- LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
- identifier, shardName);
-
- actorContext.getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
- new Runnable() {
- @Override
- public void run() {
- tryCreateTransaction();
- }
- }, actorContext.getClientDispatcher());
- return;
- }
+ TransactionProxyCleanupPhantomReference.track(TransactionProxy.this);
}
- // Mainly checking for state violation here to perform a volatile read of "initialized" to
- // ensure updates to operationLimter et al are visible to this thread (ie we're doing
- // "piggy-back" synchronization here).
- Preconditions.checkState(initialized, "Tx was not propertly initialized.");
-
- // Create the TransactionContext from the response or failure. Store the new
- // TransactionContext locally until we've completed invoking the
- // TransactionOperations. This avoids thread timing issues which could cause
- // out-of-order TransactionOperations. Eg, on a modification operation, if the
- // TransactionContext is non-null, then we directly call the TransactionContext.
- // However, at the same time, the code may be executing the cached
- // TransactionOperations. So to avoid thus timing, we don't publish the
- // TransactionContext until after we've executed all cached TransactionOperations.
- TransactionContext localTransactionContext;
- if(failure != null) {
- LOG.debug("Tx {} Creating NoOpTransaction because of error: {}", identifier,
- failure.getMessage());
-
- localTransactionContext = new NoOpTransactionContext(failure, identifier, operationLimiter);
- } else if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
- localTransactionContext = createValidTransactionContext(
- CreateTransactionReply.fromSerializable(response));
- } else {
- IllegalArgumentException exception = new IllegalArgumentException(String.format(
- "Invalid reply type %s for CreateTransaction", response.getClass()));
-
- localTransactionContext = new NoOpTransactionContext(exception, identifier, operationLimiter);
- }
-
- executeTxOperatonsOnComplete(localTransactionContext);
- }
-
- private void executeTxOperatonsOnComplete(TransactionContext localTransactionContext) {
- while(true) {
- // Access to txOperationsOnComplete and transactionContext must be protected and atomic
- // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
- // issues and ensure no TransactionOperation is missed and that they are processed
- // in the order they occurred.
-
- // We'll make a local copy of the txOperationsOnComplete list to handle re-entrancy
- // in case a TransactionOperation results in another transaction operation being
- // queued (eg a put operation from a client read Future callback that is notified
- // synchronously).
- Collection<TransactionOperation> operationsBatch = null;
- synchronized(txOperationsOnComplete) {
- if(txOperationsOnComplete.isEmpty()) {
- // We're done invoking the TransactionOperations so we can now publish the
- // TransactionContext.
- transactionContext = localTransactionContext;
- break;
- }
-
- operationsBatch = new ArrayList<>(txOperationsOnComplete);
- txOperationsOnComplete.clear();
- }
-
- // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking.
- // A slight down-side is that we need to re-acquire the lock below but this should
- // be negligible.
- for(TransactionOperation oper: operationsBatch) {
- oper.invoke(localTransactionContext);
- }
- }
- }
-
- private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
- String transactionPath = reply.getTransactionPath();
-
- LOG.debug("Tx {} Received {}", identifier, reply);
-
- ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
-
- if (transactionType == TransactionType.READ_ONLY) {
- // Read-only Tx's aren't explicitly closed by the client so we create a PhantomReference
- // to close the remote Tx's when this instance is no longer in use and is garbage
- // collected.
-
- if(remoteTransactionActorsMB == null) {
- remoteTransactionActors = Lists.newArrayList();
- remoteTransactionActorsMB = new AtomicBoolean();
+ // Add the actor to the remoteTransactionActors list for access by the
+ // cleanup PhantonReference.
+ remoteTransactionActors.add(transactionActor);
- TransactionProxyCleanupPhantomReference cleanup =
- new TransactionProxyCleanupPhantomReference(TransactionProxy.this);
- phantomReferenceCache.put(cleanup, cleanup);
- }
-
- // Add the actor to the remoteTransactionActors list for access by the
- // cleanup PhantonReference.
- remoteTransactionActors.add(transactionActor);
-
- // Write to the memory barrier volatile to publish the above update to the
- // remoteTransactionActors list for thread visibility.
- remoteTransactionActorsMB.set(true);
- }
-
- // TxActor is always created where the leader of the shard is.
- // Check if TxActor is created in the same node
- boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
-
- if(reply.getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
- return new TransactionContextImpl(transactionPath, transactionActor, identifier,
- actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
- } else {
- return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, identifier,
- actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
- }
- }
- }
-
- private static class NoOpDOMStoreThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
- static NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
-
- private static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS =
- com.google.common.util.concurrent.Futures.immediateFuture(null);
- private static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS =
- com.google.common.util.concurrent.Futures.immediateFuture(Boolean.TRUE);
-
- private NoOpDOMStoreThreePhaseCommitCohort() {
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- return IMMEDIATE_BOOLEAN_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return IMMEDIATE_VOID_SUCCESS;
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return IMMEDIATE_VOID_SUCCESS;
+ // Write to the memory barrier volatile to publish the above update to the
+ // remoteTransactionActors list for thread visibility.
+ remoteTransactionActorsMB.set(true);
}
- @Override
- public ListenableFuture<Void> commit() {
- return IMMEDIATE_VOID_SUCCESS;
+ // TxActor is always created where the leader of the shard is.
+ // Check if TxActor is created in the same node
+ boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
+
+ if(remoteTransactionVersion < DataStoreVersions.LITHIUM_VERSION) {
+ return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, getIdentifier(),
+ transactionChainId, actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion,
+ operationCompleter);
+ } else {
+ return new TransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
+ actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import com.google.common.base.FinalizablePhantomReference;
+import com.google.common.base.FinalizableReferenceQueue;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A PhantomReference that closes remote transactions for a TransactionProxy when it's
+ * garbage collected. This is used for read-only transactions as they're not explicitly closed
+ * by clients. So the only way to detect that a transaction is no longer in use and it's safe
+ * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
+ * but TransactionProxy instances should generally be short-lived enough to avoid being moved
+ * to the old generation space and thus should be cleaned up in a timely manner as the GC
+ * runs on the young generation (eden, swap1...) space much more frequently.
+ */
+final class TransactionProxyCleanupPhantomReference
+ extends FinalizablePhantomReference<TransactionProxy> {
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionProxyCleanupPhantomReference.class);
+ /**
+ * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
+ * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
+ * trickery to clean up its internal thread when the bundle is unloaded.
+ */
+ private static final FinalizableReferenceQueue phantomReferenceQueue =
+ new FinalizableReferenceQueue();
+
+ /**
+ * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
+ * necessary because PhantomReferences need a hard reference so they're not garbage collected.
+ * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
+ * and thus becomes eligible for garbage collection.
+ */
+ private static final Map<TransactionProxyCleanupPhantomReference,
+ TransactionProxyCleanupPhantomReference> phantomReferenceCache =
+ new ConcurrentHashMap<>();
+
+ private final List<ActorSelection> remoteTransactionActors;
+ private final AtomicBoolean remoteTransactionActorsMB;
+ private final ActorContext actorContext;
+ private final TransactionIdentifier identifier;
+
+ private TransactionProxyCleanupPhantomReference(TransactionProxy referent) {
+ super(referent, phantomReferenceQueue);
+
+ // Note we need to cache the relevant fields from the TransactionProxy as we can't
+ // have a hard reference to the TransactionProxy instance itself.
+
+ remoteTransactionActors = referent.remoteTransactionActors;
+ remoteTransactionActorsMB = referent.remoteTransactionActorsMB;
+ actorContext = referent.actorContext;
+ identifier = referent.getIdentifier();
+ }
+
+ static void track(TransactionProxy referent) {
+ final TransactionProxyCleanupPhantomReference ret = new TransactionProxyCleanupPhantomReference(referent);
+ phantomReferenceCache.put(ret, ret);
+ }
+
+ @Override
+ public void finalizeReferent() {
+ LOG.trace("Cleaning up {} Tx actors for TransactionProxy {}",
+ remoteTransactionActors.size(), identifier);
+
+ phantomReferenceCache.remove(this);
+
+ // Access the memory barrier volatile to ensure all previous updates to the
+ // remoteTransactionActors list are visible to this thread.
+
+ if(remoteTransactionActorsMB.get()) {
+ for(ActorSelection actor : remoteTransactionActors) {
+ LOG.trace("Sending CloseTransaction to {}", actor);
+ actorContext.sendOperationAsync(actor, CloseTransaction.INSTANCE.toSerializable());
+ }
+ }
+ }
+}
\ No newline at end of file
Preconditions.checkState(timerContext != null, "Call run before success");
timerContext.stop();
+ double newRateLimit = calculateNewRateLimit(commitTimer, actorContext.getDatastoreContext());
+
+ LOG.debug("Data Store {} commit rateLimit adjusted to {}", actorContext.getDataStoreType(), newRateLimit);
+
+ actorContext.setTxCreationLimit(newRateLimit);
+ }
+
+ @Override
+ public void failure() {
+ // This would mean we couldn't get a transaction completed in 30 seconds which is
+ // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
+ // not going to be useful - so we leave it as it is
+ }
+
+ private static double calculateNewRateLimit(Timer commitTimer, DatastoreContext context) {
+ if(commitTimer == null) {
+ // This can happen in unit tests.
+ return 0;
+ }
+
Snapshot timerSnapshot = commitTimer.getSnapshot();
double newRateLimit = 0;
- long commitTimeoutInSeconds = actorContext.getDatastoreContext()
- .getShardTransactionCommitTimeoutInSeconds();
+ long commitTimeoutInSeconds = context.getShardTransactionCommitTimeoutInSeconds();
long commitTimeoutInNanos = TimeUnit.SECONDS.toNanos(commitTimeoutInSeconds);
// Find the time that it takes for transactions to get executed in every 10th percentile
if(percentileTimeInNanos > 0) {
// Figure out the rate limit for the i*10th percentile in nanos
- double percentileRateLimit = ((double) commitTimeoutInNanos / percentileTimeInNanos);
+ double percentileRateLimit = (commitTimeoutInNanos / percentileTimeInNanos);
// Add the percentileRateLimit to the total rate limit
newRateLimit += percentileRateLimit;
}
// Compute the rate limit per second
- newRateLimit = newRateLimit/(commitTimeoutInSeconds*10);
-
- LOG.debug("Data Store {} commit rateLimit adjusted to {}", actorContext.getDataStoreType(), newRateLimit);
-
- actorContext.setTxCreationLimit(newRateLimit);
+ return newRateLimit/(commitTimeoutInSeconds*10);
}
- @Override
- public void failure() {
- // This would mean we couldn't get a transaction completed in 30 seconds which is
- // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
- // not going to be useful - so we leave it as it is
+ public static void adjustRateLimitForUnusedTransaction(ActorContext actorContext) {
+ // Unused transactions in one data store can artificially limit the rate for other data stores
+ // if the first data store's rate is still at a lower initial rate since the front-end creates
+ // transactions in each data store up-front even though the client may not actually submit changes.
+ // So we may have to adjust the rate for data stores with unused transactions.
+
+ // First calculate the current rate for the data store. If it's 0 then there have been no
+ // actual transactions committed to the data store.
+
+ double newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(COMMIT),
+ actorContext.getDatastoreContext());
+ if(newRateLimit == 0.0) {
+ // Since we have no rate data for unused Tx's data store, adjust to the rate from another
+ // data store that does have rate data.
+ for(String datastoreType: DatastoreContext.getGlobalDatastoreTypes()) {
+ if(datastoreType.equals(actorContext.getDataStoreType())) {
+ continue;
+ }
+
+ newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(datastoreType, COMMIT),
+ actorContext.getDatastoreContext());
+ if(newRateLimit > 0.0) {
+ LOG.debug("On unused Tx - data Store {} commit rateLimit adjusted to {}",
+ actorContext.getDataStoreType(), newRateLimit);
+
+ actorContext.setTxCreationLimit(newRateLimit);
+ break;
+ }
+ }
+ }
}
}
\ No newline at end of file
package org.opendaylight.controller.cluster.datastore.compat;
import akka.actor.ActorSelection;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.OperationCompleter;
import org.opendaylight.controller.cluster.datastore.TransactionContextImpl;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
/**
* Implementation of TransactionContextImpl used when talking to a pre-Lithium controller that doesn't
* @author Thomas Pantelis
*/
public class PreLithiumTransactionContextImpl extends TransactionContextImpl {
+ private static final Logger LOG = LoggerFactory.getLogger(PreLithiumTransactionContextImpl.class);
+
+ private final String transactionPath;
public PreLithiumTransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
- ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
+ String transactionChainId, ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
short remoteTransactionVersion, OperationCompleter operationCompleter) {
- super(transactionPath, actor, identifier, actorContext, schemaContext, isTxActorLocal,
- remoteTransactionVersion, operationCompleter);
+ super(actor, identifier, transactionChainId, actorContext, schemaContext, isTxActorLocal,
+ remoteTransactionVersion, operationCompleter);
+ this.transactionPath = transactionPath;
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- recordedOperationFutures.add(executeOperationAsync(
- new DeleteData(path, getRemoteTransactionVersion())));
+ executeOperationAsync(new DeleteData(path, getRemoteTransactionVersion()));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordedOperationFutures.add(executeOperationAsync(
- new MergeData(path, data, getRemoteTransactionVersion())));
+ executeOperationAsync(new MergeData(path, data, getRemoteTransactionVersion()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- recordedOperationFutures.add(executeOperationAsync(
- new WriteData(path, data, getRemoteTransactionVersion())));
+ executeOperationAsync(new WriteData(path, data, getRemoteTransactionVersion()));
+ }
+
+ @Override
+ public Future<ActorSelection> readyTransaction() {
+ LOG.debug("Tx {} readyTransaction called", getIdentifier());
+
+ // Send the ReadyTransaction message to the Tx actor.
+
+ Future<Object> lastReplyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
+
+ return transformReadyReply(lastReplyFuture);
+ }
+
+ @Override
+ protected String extractCohortPathFrom(ReadyTransactionReply readyTxReply) {
+ // In base Helium we used to return the local path of the actor which represented
+ // a remote ThreePhaseCommitCohort. The local path would then be converted to
+ // a remote path using this resolvePath method. To maintain compatibility with
+ // a Helium node we need to continue to do this conversion.
+ // At some point in the future when upgrades from Helium are not supported
+ // we could remove this code to resolvePath and just use the cohortPath as the
+ // resolved cohortPath
+ if(getRemoteTransactionVersion() < DataStoreVersions.HELIUM_1_VERSION) {
+ return getActorContext().resolvePath(transactionPath, readyTxReply.getCohortPath());
+ }
+
+ return readyTxReply.getCohortPath();
+ }
+
+ @Override
+ public boolean supportsDirectCommit() {
+ return false;
+ }
+
+ @Override
+ public Future<Object> directCommit() {
+ throw new UnsupportedOperationException("directCommit is not supported for " + getClass());
}
}
public class ShardTransactionIdentifier {
private final String remoteTransactionId;
+ private final String stringRepresentation;
public ShardTransactionIdentifier(String remoteTransactionId) {
this.remoteTransactionId = Preconditions.checkNotNull(remoteTransactionId,
"remoteTransactionId should not be null");
+
+ stringRepresentation = new StringBuilder(remoteTransactionId.length() + 6).append("shard-").
+ append(remoteTransactionId).toString();
}
public String getRemoteTransactionId() {
}
@Override public String toString() {
- final StringBuilder sb = new StringBuilder();
- sb.append("shard-").append(remoteTransactionId);
- return sb.toString();
+ return stringRepresentation;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+/**
+ * JMX bean for general datastore info.
+ *
+ * @author Thomas Pantelis
+ */
+public interface DatastoreInfoMXBean {
+ double getTransactionCreationRateLimit();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+
+/**
+ * Implementation of DatastoreInfoMXBean.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
+
+ private final ActorContext actorContext;
+
+ public DatastoreInfoMXBeanImpl(String mxBeanType, ActorContext actorContext) {
+ super("GeneralRuntimeInfo", mxBeanType, null);
+ this.actorContext = actorContext;
+ }
+
+
+ @Override
+ public double getTransactionCreationRateLimit() {
+ return actorContext.getTxCreationLimit();
+ }
+}
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+import akka.actor.ActorRef;
+import akka.pattern.Patterns;
+import akka.util.Timeout;
+import com.google.common.base.Stopwatch;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
+import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
-import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStats;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Await;
/**
* Maintains statistics for a shard.
public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
public static String JMX_CATEGORY_SHARD = "Shards";
+ private static final Logger LOG = LoggerFactory.getLogger(ShardStats.class);
+
+ private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+
+ private static final Cache<String, OnDemandRaftState> onDemandRaftStateCache =
+ CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.SECONDS).build();
+
private long committedTransactionsCount;
private long readOnlyTransactionCount;
private long readWriteTransactionCount;
- private String leader;
-
- private String raftState;
-
- private long lastLogTerm = -1L;
-
- private long lastLogIndex = -1L;
-
- private long currentTerm = -1L;
-
- private long commitIndex = -1L;
-
- private long lastApplied = -1L;
-
private long lastCommittedTransactionTime;
private long failedTransactionsCount;
private QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
- private long dataSize = 0;
+ private boolean followerInitialSyncStatus = false;
- private final SimpleDateFormat sdf =
- new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+ private ActorRef shardActor;
- private boolean followerInitialSyncStatus = false;
+ private String statRetrievalError;
+
+ private String statRetrievalTime;
+
+ private long leadershipChangeCount;
+
+ private long lastLeadershipChangeTime;
public ShardStats(final String shardName, final String mxBeanType) {
super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor());
}
+ public void setShardActor(ActorRef shardActor) {
+ this.shardActor = shardActor;
+ }
+
+ private OnDemandRaftState getOnDemandRaftState() {
+ String name = getShardName();
+ OnDemandRaftState state = onDemandRaftStateCache.getIfPresent(name);
+ if(state == null) {
+ statRetrievalError = null;
+ statRetrievalTime = null;
+
+ if(shardActor != null) {
+ Timeout timeout = new Timeout(10, TimeUnit.SECONDS);
+ try {
+ Stopwatch timer = Stopwatch.createStarted();
+
+ state = (OnDemandRaftState) Await.result(Patterns.ask(shardActor,
+ GetOnDemandRaftState.INSTANCE, timeout), timeout.duration());
+
+ statRetrievalTime = timer.stop().toString();
+ onDemandRaftStateCache.put(name, state);
+ } catch (Exception e) {
+ statRetrievalError = e.toString();
+ }
+ }
+
+ state = state != null ? state : OnDemandRaftState.builder().build();
+ }
+
+ return state;
+ }
+
@Override
public String getShardName() {
return getMBeanName();
@Override
public String getLeader() {
- return leader;
+ return getOnDemandRaftState().getLeader();
}
@Override
public String getRaftState() {
- return raftState;
+ return getOnDemandRaftState().getRaftState();
}
@Override
@Override
public long getLastLogIndex() {
- return lastLogIndex;
+ return getOnDemandRaftState().getLastLogIndex();
}
@Override
public long getLastLogTerm() {
- return lastLogTerm;
+ return getOnDemandRaftState().getLastLogTerm();
}
@Override
public long getCurrentTerm() {
- return currentTerm;
+ return getOnDemandRaftState().getCurrentTerm();
}
@Override
public long getCommitIndex() {
- return commitIndex;
+ return getOnDemandRaftState().getCommitIndex();
}
@Override
public long getLastApplied() {
- return lastApplied;
+ return getOnDemandRaftState().getLastApplied();
}
@Override
- public String getLastCommittedTransactionTime() {
+ public long getLastIndex() {
+ return getOnDemandRaftState().getLastIndex();
+ }
+
+ @Override
+ public long getLastTerm() {
+ return getOnDemandRaftState().getLastTerm();
+ }
+
+ @Override
+ public long getSnapshotIndex() {
+ return getOnDemandRaftState().getSnapshotIndex();
+ }
+
+ @Override
+ public long getSnapshotTerm() {
+ return getOnDemandRaftState().getSnapshotTerm();
+ }
- return sdf.format(new Date(lastCommittedTransactionTime));
+ @Override
+ public long getReplicatedToAllIndex() {
+ return getOnDemandRaftState().getReplicatedToAllIndex();
+ }
+
+ @Override
+ public String getVotedFor() {
+ return getOnDemandRaftState().getVotedFor();
+ }
+
+ @Override
+ public boolean isSnapshotCaptureInitiated() {
+ return getOnDemandRaftState().isSnapshotCaptureInitiated();
+ }
+
+ @Override
+ public String getLastCommittedTransactionTime() {
+ return DATE_FORMAT.format(new Date(lastCommittedTransactionTime));
}
@Override
return ++abortTransactionsCount;
}
- public void setLeader(final String leader) {
- this.leader = leader;
- }
-
- public void setRaftState(final String raftState) {
- this.raftState = raftState;
- }
-
- public void setLastLogTerm(final long lastLogTerm) {
- this.lastLogTerm = lastLogTerm;
- }
-
- public void setLastLogIndex(final long lastLogIndex) {
- this.lastLogIndex = lastLogIndex;
- }
-
- public void setCurrentTerm(final long currentTerm) {
- this.currentTerm = currentTerm;
- }
-
- public void setCommitIndex(final long commitIndex) {
- this.commitIndex = commitIndex;
- }
-
- public void setLastApplied(final long lastApplied) {
- this.lastApplied = lastApplied;
- }
-
public void setLastCommittedTransactionTime(final long lastCommittedTransactionTime) {
this.lastCommittedTransactionTime = lastCommittedTransactionTime;
}
- public void setInMemoryJournalDataSize(long dataSize){
- this.dataSize = dataSize;
- }
-
@Override
public long getInMemoryJournalDataSize(){
- return dataSize;
- }
-
- @Override
- public ThreadExecutorStats getDataStoreExecutorStats() {
- // FIXME: this particular thing does not work, as it really is DS-specific
- return null;
- }
-
- @Override
- public ThreadExecutorStats getNotificationMgrExecutorStats() {
- return notificationExecutorStatsBean.toThreadExecutorStats();
- }
-
- @Override
- public List<ListenerNotificationQueueStats> getCurrentNotificationMgrListenerQueueStats() {
- return notificationManagerStatsBean.getCurrentListenerQueueStats();
+ return getOnDemandRaftState().getInMemoryJournalDataSize();
}
@Override
- public int getMaxNotificationMgrListenerQueueSize() {
- return notificationManagerStatsBean.getMaxListenerQueueSize();
+ public long getInMemoryJournalLogSize() {
+ return getOnDemandRaftState().getInMemoryJournalLogSize();
}
/**
public boolean getFollowerInitialSyncStatus() {
return followerInitialSyncStatus;
}
+
+ @Override
+ public List<FollowerInfo> getFollowerInfo() {
+ return getOnDemandRaftState().getFollowerInfoList();
+ }
+
+ @Override
+ public String getPeerAddresses() {
+ StringBuilder builder = new StringBuilder();
+ int i = 0;
+ for(Map.Entry<String, String> e: getOnDemandRaftState().getPeerAddresses().entrySet()) {
+ if(i++ > 0) {
+ builder.append(", ");
+ }
+
+ builder.append(e.getKey()).append(": ").append(e.getValue());
+ }
+
+ return builder.toString();
+ }
+
+ @Override
+ public String getStatRetrievalTime() {
+ getOnDemandRaftState();
+ return statRetrievalTime;
+ }
+
+ @Override
+ public String getStatRetrievalError() {
+ getOnDemandRaftState();
+ return statRetrievalError;
+ }
+
+ @Override
+ public long getLeadershipChangeCount() {
+ return leadershipChangeCount;
+ }
+
+ public void incrementLeadershipChangeCount() {
+ leadershipChangeCount++;
+ lastLeadershipChangeTime = System.currentTimeMillis();
+ }
+
+ @Override
+ public String getLastLeadershipChangeTime() {
+ return DATE_FORMAT.format(new Date(lastLeadershipChangeTime));
+ }
}
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
import java.util.List;
-
-import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStats;
-import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
+import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
/**
* @author: syedbahm
String getShardName();
+ String getStatRetrievalTime();
+
+ String getStatRetrievalError();
+
long getCommittedTransactionsCount();
long getReadOnlyTransactionCount();
long getLastApplied();
+ long getLastIndex();
+
+ long getLastTerm();
+
+ long getSnapshotIndex();
+
+ long getSnapshotTerm();
+
+ long getReplicatedToAllIndex();
+
String getLastCommittedTransactionTime();
long getFailedTransactionsCount();
String getRaftState();
- ThreadExecutorStats getDataStoreExecutorStats();
+ String getVotedFor();
- ThreadExecutorStats getNotificationMgrExecutorStats();
-
- List<ListenerNotificationQueueStats> getCurrentNotificationMgrListenerQueueStats();
-
- int getMaxNotificationMgrListenerQueueSize();
+ boolean isSnapshotCaptureInitiated();
void resetTransactionCounters();
long getInMemoryJournalDataSize();
+ long getInMemoryJournalLogSize();
+
boolean getFollowerInitialSyncStatus();
+
+ List<FollowerInfo> getFollowerInfo();
+
+ String getPeerAddresses();
+
+ long getLeadershipChangeCount();
+
+ String getLastLeadershipChangeTime();
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Serializable;
-
-public class ActorNotInitialized implements Serializable {
- private static final long serialVersionUID = 1L;
-}
*/
package org.opendaylight.controller.cluster.datastore.messages;
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
/**
public class BatchedModifications extends MutableCompositeModification implements SerializableMessage {
private static final long serialVersionUID = 1L;
+ private boolean ready;
+ private boolean doCommitOnReady;
+ private int totalMessagesSent;
+ private String transactionID;
+ private String transactionChainID;
+
public BatchedModifications() {
}
- public BatchedModifications(short version) {
+ public BatchedModifications(String transactionID, short version, String transactionChainID) {
super(version);
+ this.transactionID = Preconditions.checkNotNull(transactionID, "transactionID can't be null");
+ this.transactionChainID = transactionChainID != null ? transactionChainID : "";
+ }
+
+ public boolean isReady() {
+ return ready;
+ }
+
+ public void setReady(boolean ready) {
+ this.ready = ready;
+ }
+
+ public boolean isDoCommitOnReady() {
+ return doCommitOnReady;
+ }
+
+ public void setDoCommitOnReady(boolean doCommitOnReady) {
+ this.doCommitOnReady = doCommitOnReady;
+ }
+
+ public int getTotalMessagesSent() {
+ return totalMessagesSent;
+ }
+
+ public void setTotalMessagesSent(int totalMessagesSent) {
+ this.totalMessagesSent = totalMessagesSent;
+ }
+
+ public String getTransactionID() {
+ return transactionID;
+ }
+
+ public String getTransactionChainID() {
+ return transactionChainID;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ transactionID = in.readUTF();
+ transactionChainID = in.readUTF();
+ ready = in.readBoolean();
+ totalMessagesSent = in.readInt();
+ doCommitOnReady = in.readBoolean();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal(out);
+ out.writeUTF(transactionID);
+ out.writeUTF(transactionChainID);
+ out.writeBoolean(ready);
+ out.writeInt(totalMessagesSent);
+ out.writeBoolean(doCommitOnReady);
}
@Override
public Object toSerializable() {
return this;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("BatchedModifications [transactionID=").append(transactionID).append(", transactionChainID=")
+ .append(transactionChainID).append(", ready=").append(ready).append(", totalMessagesSent=")
+ .append(totalMessagesSent).append(", modifications size=").append(getModifications().size())
+ .append("]");
+ return builder.toString();
+ }
}
this.numBatched = numBatched;
}
-
public int getNumBatched() {
return numBatched;
}
public Object toSerializable() {
return this;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("BatchedModificationsReply [numBatched=").append(numBatched).append("]");
+ return builder.toString();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class CloseDataTreeChangeListenerRegistration implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final CloseDataTreeChangeListenerRegistration INSTANCE = new CloseDataTreeChangeListenerRegistration();
+
+ private CloseDataTreeChangeListenerRegistration() {
+ }
+
+ public static CloseDataTreeChangeListenerRegistration getInstance() {
+ return INSTANCE;
+ }
+
+ private Object readResolve() throws ObjectStreamException {
+ return INSTANCE;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class CloseDataTreeChangeListenerRegistrationReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final CloseDataTreeChangeListenerRegistrationReply INSTANCE = new CloseDataTreeChangeListenerRegistrationReply();
+
+ private CloseDataTreeChangeListenerRegistrationReply() {
+ // Use getInstance() instead
+ }
+
+ public static CloseDataTreeChangeListenerRegistrationReply getInstance() {
+ return INSTANCE;
+ }
+
+ private Object readResolve() throws ObjectStreamException {
+ return INSTANCE;
+ }
+}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-public class DataExistsReply implements SerializableMessage{
+public class DataExistsReply implements SerializableMessage {
public static final Class<ShardTransactionMessages.DataExistsReply> SERIALIZABLE_CLASS =
ShardTransactionMessages.DataExistsReply.class;
+ private static final DataExistsReply TRUE = new DataExistsReply(true, null);
+ private static final DataExistsReply FALSE = new DataExistsReply(false, null);
+ private static final ShardTransactionMessages.DataExistsReply SERIALIZABLE_TRUE =
+ ShardTransactionMessages.DataExistsReply.newBuilder().setExists(true).build();
+ private static final ShardTransactionMessages.DataExistsReply SERIALIZABLE_FALSE =
+ ShardTransactionMessages.DataExistsReply.newBuilder().setExists(false).build();
+
private final boolean exists;
- public DataExistsReply(final boolean exists) {
+ private DataExistsReply(final boolean exists, final Void dummy) {
this.exists = exists;
}
+ /**
+ * @deprecated Use {@link #create(boolean)} instead.
+ * @param exists
+ */
+ @Deprecated
+ public DataExistsReply(final boolean exists) {
+ this(exists, null);
+ }
+
+ public static DataExistsReply create(final boolean exists) {
+ return exists ? TRUE : FALSE;
+ }
+
public boolean exists() {
return exists;
}
- @Override public Object toSerializable() {
- return ShardTransactionMessages.DataExistsReply.newBuilder()
- .setExists(exists).build();
+ @Override
+ public Object toSerializable() {
+ return exists ? SERIALIZABLE_TRUE : SERIALIZABLE_FALSE;
}
- public static DataExistsReply fromSerializable(final Object serializable){
+ public static DataExistsReply fromSerializable(final Object serializable) {
ShardTransactionMessages.DataExistsReply o = (ShardTransactionMessages.DataExistsReply) serializable;
- return new DataExistsReply(o.getExists());
+ return create(o.getExists());
}
-
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * A message about a DataTree having been changed. The message is not
+ * serializable on purpose. For delegating the change across cluster nodes,
+ * this needs to be intercepted by a local agent and forwarded as
+ * a {@link DataTreeDelta}.
+ */
+public final class DataTreeChanged {
+ private final Collection<DataTreeCandidate> changes;
+
+ public DataTreeChanged(final Collection<DataTreeCandidate> changes) {
+ this.changes = Preconditions.checkNotNull(changes);
+ }
+
+ /**
+ * Return the data changes.
+ *
+ * @return Change events
+ */
+ public Collection<DataTreeCandidate> getChanges() {
+ return changes;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class DataTreeChangedReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply();
+
+ private DataTreeChangedReply() {
+ // Use getInstance() instead
+ }
+
+ public static DataTreeChangedReply getInstance() {
+ return INSTANCE;
+ }
+
+ private Object readResolve() throws ObjectStreamException {
+ return INSTANCE;
+ }
+}
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
+import java.io.Serializable;
/**
* The FindPrimary message is used to locate the primary of any given shard
*
*/
-public class FindPrimary implements SerializableMessage{
- public static final Class<FindPrimary> SERIALIZABLE_CLASS = FindPrimary.class;
+public class FindPrimary implements Serializable {
+ private static final long serialVersionUID = 1L;
private final String shardName;
- private final boolean waitUntilInitialized;
+ private final boolean waitUntilReady;
- public FindPrimary(String shardName, boolean waitUntilInitialized){
+ public FindPrimary(String shardName, boolean waitUntilReady) {
Preconditions.checkNotNull(shardName, "shardName should not be null");
this.shardName = shardName;
- this.waitUntilInitialized = waitUntilInitialized;
+ this.waitUntilReady = waitUntilReady;
}
public String getShardName() {
return shardName;
}
- public boolean isWaitUntilInitialized() {
- return waitUntilInitialized;
+ public boolean isWaitUntilReady() {
+ return waitUntilReady;
}
@Override
- public Object toSerializable() {
- return this;
- }
-
- public static FindPrimary fromSerializable(Object message){
- return (FindPrimary) message;
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append(getClass().getName()).append(" [shardName=").append(shardName).append(", waitUntilReady=")
+ .append(waitUntilReady).append("]");
+ return builder.toString();
}
}
*/
package org.opendaylight.controller.cluster.datastore.messages;
+import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
/**
* Transaction ReadyTransaction message that is forwarded to the local Shard from the ShardTransaction.
*/
public class ForwardedReadyTransaction {
private final String transactionID;
- private final DOMStoreThreePhaseCommitCohort cohort;
+ private final ShardDataTreeCohort cohort;
private final Modification modification;
private final boolean returnSerialized;
- private final int txnClientVersion;
+ private final boolean doImmediateCommit;
+ private final short txnClientVersion;
- public ForwardedReadyTransaction(String transactionID, int txnClientVersion,
- DOMStoreThreePhaseCommitCohort cohort, Modification modification,
- boolean returnSerialized) {
+ public ForwardedReadyTransaction(String transactionID, short txnClientVersion,
+ ShardDataTreeCohort cohort, Modification modification,
+ boolean returnSerialized, boolean doImmediateCommit) {
this.transactionID = transactionID;
this.cohort = cohort;
this.modification = modification;
this.returnSerialized = returnSerialized;
this.txnClientVersion = txnClientVersion;
+ this.doImmediateCommit = doImmediateCommit;
}
public String getTransactionID() {
return transactionID;
}
- public DOMStoreThreePhaseCommitCohort getCohort() {
+ public ShardDataTreeCohort getCohort() {
return cohort;
}
return returnSerialized;
}
- public int getTxnClientVersion() {
+ public short getTxnClientVersion() {
return txnClientVersion;
}
+
+ public boolean isDoImmediateCommit() {
+ return doImmediateCommit;
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
+import org.apache.commons.lang3.ObjectUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+
+/**
+ * Local message sent in reply to FindPrimaryShard to indicate the primary shard is local to the caller.
+ *
+ * @author Thomas Pantelis
+ */
+public class LocalPrimaryShardFound {
+
+ private final String primaryPath;
+ private final DataTree localShardDataTree;
+
+ public LocalPrimaryShardFound(@Nonnull String primaryPath, @Nonnull DataTree localShardDataTree) {
+ this.primaryPath = Preconditions.checkNotNull(primaryPath);
+ this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+ }
+
+ public @Nonnull String getPrimaryPath() {
+ return primaryPath;
+ }
+
+ public @Nonnull DataTree getLocalShardDataTree() {
+ return localShardDataTree;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("LocalPrimaryShardFound [primaryPath=").append(primaryPath).append(", localShardDataTree=")
+ .append(ObjectUtils.identityToString(localShardDataTree)).append("]");
+ return builder.toString();
+ }
+}
package org.opendaylight.controller.cluster.datastore.messages;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
public class PeerAddressResolved {
- private final ShardIdentifier peerId;
+ private final String peerId;
private final String peerAddress;
- public PeerAddressResolved(ShardIdentifier peerId, String peerAddress) {
+ public PeerAddressResolved(String peerId, String peerAddress) {
this.peerId = peerId;
this.peerAddress = peerAddress;
}
- public ShardIdentifier getPeerId() {
+ public String getPeerId() {
return peerId;
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-
-public class PrimaryFound implements SerializableMessage {
- public static final Class<PrimaryFound> SERIALIZABLE_CLASS = PrimaryFound.class;
- private final String primaryPath;
-
- public PrimaryFound(final String primaryPath) {
- this.primaryPath = primaryPath;
- }
-
- public String getPrimaryPath() {
- return primaryPath;
- }
-
- @Override
- public boolean equals(final Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- PrimaryFound that = (PrimaryFound) o;
-
- if (!primaryPath.equals(that.primaryPath)) {
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- return primaryPath.hashCode();
- }
-
- @Override
- public String toString() {
- return "PrimaryFound{" +
- "primaryPath='" + primaryPath + '\'' +
- '}';
- }
-
-
- @Override
- public Object toSerializable() {
- return this;
- }
-
- public static PrimaryFound fromSerializable(final Object message){
- return (PrimaryFound) message;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import com.google.common.base.Preconditions;
-
-public class PrimaryNotFound implements SerializableMessage {
- public static final Class<PrimaryNotFound> SERIALIZABLE_CLASS = PrimaryNotFound.class;
-
- private final String shardName;
-
- public PrimaryNotFound(final String shardName){
-
- Preconditions.checkNotNull(shardName, "shardName should not be null");
-
- this.shardName = shardName;
- }
-
- @Override
- public boolean equals(final Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- PrimaryNotFound that = (PrimaryNotFound) o;
-
- if (shardName != null ? !shardName.equals(that.shardName) : that.shardName != null) {
- return false;
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- return shardName != null ? shardName.hashCode() : 0;
- }
-
- @Override
- public Object toSerializable() {
- return this;
- }
-
- public static PrimaryNotFound fromSerializable(final Object message){
- return (PrimaryNotFound) message;
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorSelection;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+
+/**
+ * Local message DTO that contains information about the primary shard.
+ *
+ * @author Thomas Pantelis
+ */
+public class PrimaryShardInfo {
+ private final ActorSelection primaryShardActor;
+ private final Optional<DataTree> localShardDataTree;
+
+ public PrimaryShardInfo(@Nonnull ActorSelection primaryShardActor, @Nonnull Optional<DataTree> localShardDataTree) {
+ this.primaryShardActor = Preconditions.checkNotNull(primaryShardActor);
+ this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+ }
+
+ /**
+ * Returns an ActorSelection representing the primary shard actor.
+ */
+ public @Nonnull ActorSelection getPrimaryShardActor() {
+ return primaryShardActor;
+ }
+
+ /**
+ * Returns an Optional whose value contains the primary shard's DataTree if the primary shard is local
+ * to the caller. Otherwise the Optional value is absent.
+ */
+ public @Nonnull Optional<DataTree> getLocalShardDataTree() {
+ return localShardDataTree;
+ }
+}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+@Deprecated
public class ReadyTransaction implements SerializableMessage{
public static final Class<ShardTransactionMessages.ReadyTransaction> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadyTransaction.class;
package org.opendaylight.controller.cluster.datastore.messages;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-public class ReadyTransactionReply implements SerializableMessage {
+public class ReadyTransactionReply extends VersionedExternalizableMessage {
+ private static final long serialVersionUID = 1L;
+
public static final Class<ShardTransactionMessages.ReadyTransactionReply> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadyTransactionReply.class;
- private final String cohortPath;
+ private String cohortPath;
+
+ public ReadyTransactionReply() {
+ }
public ReadyTransactionReply(String cohortPath) {
+ this(cohortPath, DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public ReadyTransactionReply(String cohortPath, short version) {
+ super(version);
this.cohortPath = cohortPath;
}
}
@Override
- public ShardTransactionMessages.ReadyTransactionReply toSerializable() {
- return ShardTransactionMessages.ReadyTransactionReply.newBuilder()
- .setActorPath(cohortPath)
- .build();
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ cohortPath = in.readUTF();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal(out);
+ out.writeUTF(cohortPath);
+ }
+
+ @Override
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
+ return this;
+ } else {
+ return ShardTransactionMessages.ReadyTransactionReply.newBuilder().setActorPath(cohortPath).build();
+ }
}
public static ReadyTransactionReply fromSerializable(Object serializable) {
- ShardTransactionMessages.ReadyTransactionReply o =
- (ShardTransactionMessages.ReadyTransactionReply) serializable;
+ if(serializable instanceof ReadyTransactionReply) {
+ return (ReadyTransactionReply)serializable;
+ } else {
+ ShardTransactionMessages.ReadyTransactionReply o =
+ (ShardTransactionMessages.ReadyTransactionReply) serializable;
+ return new ReadyTransactionReply(o.getActorPath(), DataStoreVersions.HELIUM_2_VERSION);
+ }
+ }
- return new ReadyTransactionReply(o.getActorPath());
+ public static boolean isSerializedType(Object message) {
+ return message instanceof ReadyTransactionReply ||
+ message instanceof ShardTransactionMessages.ReadyTransactionReply;
}
}
package org.opendaylight.controller.cluster.datastore.messages;
import akka.actor.ActorPath;
+import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import akka.serialization.Serialization;
import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
ListenerRegistrationMessages.RegisterChangeListener.class;
private final YangInstanceIdentifier path;
- private final ActorPath dataChangeListenerPath;
+ private final ActorRef dataChangeListener;
private final AsyncDataBroker.DataChangeScope scope;
public RegisterChangeListener(YangInstanceIdentifier path,
- ActorPath dataChangeListenerPath,
+ ActorRef dataChangeListener,
AsyncDataBroker.DataChangeScope scope) {
this.path = path;
- this.dataChangeListenerPath = dataChangeListenerPath;
+ this.dataChangeListener = dataChangeListener;
this.scope = scope;
}
}
public ActorPath getDataChangeListenerPath() {
- return dataChangeListenerPath;
+ return dataChangeListener.path();
}
public ListenerRegistrationMessages.RegisterChangeListener toSerializable() {
return ListenerRegistrationMessages.RegisterChangeListener.newBuilder()
.setInstanceIdentifierPath(InstanceIdentifierUtils.toSerializable(path))
- .setDataChangeListenerActorPath(dataChangeListenerPath.toString())
+ .setDataChangeListenerActorPath(Serialization.serializedActorPath(dataChangeListener))
.setDataChangeScope(scope.ordinal()).build();
}
- public static RegisterChangeListener fromSerializable(ActorSystem actorSystem,Object serializable){
+ public static RegisterChangeListener fromSerializable(ActorSystem actorSystem, Object serializable){
ListenerRegistrationMessages.RegisterChangeListener o = (ListenerRegistrationMessages.RegisterChangeListener) serializable;
return new RegisterChangeListener(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPath()),
- actorSystem.actorFor(o.getDataChangeListenerActorPath()).path(),
+ actorSystem.provider().resolveActorRef(o.getDataChangeListenerActorPath()),
AsyncDataBroker.DataChangeScope.values()[o.getDataChangeScope()]);
}
package org.opendaylight.controller.cluster.datastore.messages;
import akka.actor.ActorPath;
+import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import akka.serialization.Serialization;
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
public class RegisterChangeListenerReply implements SerializableMessage{
public static final Class<ListenerRegistrationMessages.RegisterChangeListenerReply> SERIALIZABLE_CLASS =
ListenerRegistrationMessages.RegisterChangeListenerReply.class;
- private final ActorPath listenerRegistrationPath;
+ private final ActorRef listenerRegistration;
- public RegisterChangeListenerReply(final ActorPath listenerRegistrationPath) {
- this.listenerRegistrationPath = listenerRegistrationPath;
+ public RegisterChangeListenerReply(final ActorRef listenerRegistration) {
+ this.listenerRegistration = listenerRegistration;
}
public ActorPath getListenerRegistrationPath() {
- return listenerRegistrationPath;
+ return listenerRegistration.path();
}
@Override
public ListenerRegistrationMessages.RegisterChangeListenerReply toSerializable() {
return ListenerRegistrationMessages.RegisterChangeListenerReply.newBuilder()
- .setListenerRegistrationPath(listenerRegistrationPath.toString()).build();
+ .setListenerRegistrationPath(Serialization.serializedActorPath(listenerRegistration)).build();
}
public static RegisterChangeListenerReply fromSerializable(final ActorSystem actorSystem,final Object serializable){
ListenerRegistrationMessages.RegisterChangeListenerReply o = (ListenerRegistrationMessages.RegisterChangeListenerReply) serializable;
return new RegisterChangeListenerReply(
- actorSystem.actorFor(o.getListenerRegistrationPath()).path()
+ actorSystem.provider().resolveActorRef(o.getListenerRegistrationPath())
);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorRef;
+import com.google.common.base.Preconditions;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Request a {@link org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener} registration be made on the shard
+ * leader.
+ */
+public final class RegisterDataTreeChangeListener implements Externalizable {
+ private static final long serialVersionUID = 1L;
+ private ActorRef dataTreeChangeListenerPath;
+ private YangInstanceIdentifier path;
+
+ public RegisterDataTreeChangeListener(final YangInstanceIdentifier path, final ActorRef dataTreeChangeListenerPath) {
+ this.path = Preconditions.checkNotNull(path);
+ this.dataTreeChangeListenerPath = Preconditions.checkNotNull(dataTreeChangeListenerPath);
+ }
+
+ public YangInstanceIdentifier getPath() {
+ return path;
+ }
+
+ public ActorRef getDataTreeChangeListenerPath() {
+ return dataTreeChangeListenerPath;
+ }
+
+ @Override
+ public void writeExternal(final ObjectOutput out) throws IOException {
+ out.writeObject(dataTreeChangeListenerPath);
+ SerializationUtils.serializePath(path, out);
+ }
+
+ @Override
+ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+ dataTreeChangeListenerPath = (ActorRef) in.readObject();
+ path = SerializationUtils.deserializePath(in);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorPath;
+import akka.actor.ActorRef;
+import com.google.common.base.Preconditions;
+
+import java.io.Serializable;
+
+/**
+ * Successful reply to a {@link RegisterDataTreeChangeListener} request.
+ */
+public final class RegisterDataTreeChangeListenerReply implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private final ActorRef listenerRegistrationPath;
+
+ public RegisterDataTreeChangeListenerReply(final ActorRef listenerRegistrationPath) {
+ this.listenerRegistrationPath = Preconditions.checkNotNull(listenerRegistrationPath);
+ }
+
+ public ActorPath getListenerRegistrationPath() {
+ return listenerRegistrationPath.path();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+/**
+ * A remote message sent to locate the primary shard.
+ *
+ * @author Thomas Pantelis
+ */
+public class RemoteFindPrimary extends FindPrimary {
+ private static final long serialVersionUID = 1L;
+
+ public RemoteFindPrimary(String shardName, boolean waitUntilReady) {
+ super(shardName, waitUntilReady);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.Serializable;
+
+/**
+ * Local or remote message sent in reply to FindPrimaryShard to indicate the primary shard is remote to the caller.
+ */
+public class RemotePrimaryShardFound implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private final String primaryPath;
+
+ public RemotePrimaryShardFound(final String primaryPath) {
+ this.primaryPath = primaryPath;
+ }
+
+ public String getPrimaryPath() {
+ return primaryPath;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("RemotePrimaryShardFound [primaryPath=").append(primaryPath).append("]");
+ return builder.toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+
+/**
+ * A local message derived from LeaderStateChanged containing additional Shard-specific info that is sent
+ * when some state of the shard leader has changed. This message is used by the ShardManager to maintain
+ * current Shard information.
+ *
+ * @author Thomas Pantelis
+ */
+public class ShardLeaderStateChanged extends LeaderStateChanged {
+
+ private final Optional<DataTree> localShardDataTree;
+
+ public ShardLeaderStateChanged(@Nonnull String memberId, @Nonnull String leaderId,
+ @Nonnull Optional<DataTree> localShardDataTree) {
+ super(memberId, leaderId);
+ this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+ }
+
+ public @Nonnull Optional<DataTree> getLocalShardDataTree() {
+ return localShardDataTree;
+ }
+}
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
/**
* Abstract base class for a versioned Externalizable message.
public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage {
private static final long serialVersionUID = 1L;
- private short version;
+ private short version = DataStoreVersions.CURRENT_VERSION;
public VersionedExternalizableMessage() {
}
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* DeleteModification store all the parameters required to delete a path from the data tree
transaction.delete(getPath());
}
+ @Override
+ public void apply(DataTreeModification transaction) {
+ transaction.delete(getPath());
+ }
+
@Override
public byte getType() {
return DELETE;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* MergeModification stores all the parameters required to merge data into the specified path
transaction.merge(getPath(), getData());
}
+ @Override
+ public void apply(final DataTreeModification transaction) {
+ transaction.merge(getPath(), getData());
+ }
+
@Override
public byte getType() {
return MERGE;
import java.io.Externalizable;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* Represents a modification to the data store.
*/
void apply(DOMStoreWriteTransaction transaction);
+ /**
+ * Apply the modification to the specified transaction
+ *
+ * @param transaction
+ */
+ void apply(DataTreeModification transaction);
+
byte getType();
@Deprecated
out.write(serializedPayload);
}
- @SuppressWarnings("rawtypes")
@Override
@Deprecated
public <T> Map<GeneratedExtension, T> encode() {
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* MutableCompositeModification is just a mutable version of a
}
}
+ @Override
+ public void apply(DataTreeModification transaction) {
+ for (Modification modification : modifications) {
+ modification.apply(transaction);
+ }
+ }
+
@Override
public byte getType() {
return COMPOSITE;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* WriteModification stores all the parameters required to write data to the specified path
transaction.write(getPath(), data);
}
+ @Override
+ public void apply(final DataTreeModification transaction) {
+ transaction.write(getPath(), data);
+ }
+
public NormalizedNode<?, ?> getData() {
return data;
}
import akka.actor.PoisonPill;
import akka.dispatch.Futures;
import akka.dispatch.Mapper;
+import akka.dispatch.OnComplete;
import akka.pattern.AskTimeoutException;
import akka.util.Timeout;
-import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import org.opendaylight.controller.cluster.datastore.Configuration;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
-import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
+import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
+import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*/
public class ActorContext {
private static final Logger LOG = LoggerFactory.getLogger(ActorContext.class);
- private static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
private static final String METRIC_RATE = "rate";
- private static final String DOMAIN = "org.opendaylight.controller.cluster.datastore";
private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
new Mapper<Throwable, Throwable>() {
@Override
private Timeout operationTimeout;
private final String selfAddressHostPort;
private RateLimiter txRateLimiter;
- private final MetricRegistry metricRegistry = new MetricRegistry();
- private final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(DOMAIN).build();
private final int transactionOutstandingOperationLimit;
private Timeout transactionCommitOperationTimeout;
+ private Timeout shardInitializationTimeout;
private final Dispatchers dispatchers;
- private final Cache<String, Future<ActorSelection>> primaryShardActorSelectionCache;
+ private Cache<String, Future<PrimaryShardInfo>> primaryShardInfoCache;
private volatile SchemaContext schemaContext;
private volatile boolean updated;
+ private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DatastoreContext.METRICS_DOMAIN).getMetricsRegistry();
public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
ClusterWrapper clusterWrapper, Configuration configuration) {
this.dispatchers = new Dispatchers(actorSystem.dispatchers());
setCachedProperties();
- primaryShardActorSelectionCache = CacheBuilder.newBuilder()
- .expireAfterWrite(datastoreContext.getShardLeaderElectionTimeout().duration().toMillis(), TimeUnit.MILLISECONDS)
- .build();
-
- operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(), TimeUnit.SECONDS);
- operationTimeout = new Timeout(operationDuration);
- transactionCommitOperationTimeout = new Timeout(Duration.create(getDatastoreContext().getShardTransactionCommitTimeoutInSeconds(),
- TimeUnit.SECONDS));
Address selfAddress = clusterWrapper.getSelfAddress();
if (selfAddress != null && !selfAddress.host().isEmpty()) {
}
transactionOutstandingOperationLimit = new CommonConfig(this.getActorSystem().settings().config()).getMailBoxCapacity();
- jmxReporter.start();
-
}
private void setCachedProperties() {
transactionCommitOperationTimeout = new Timeout(Duration.create(
datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS));
+
+ shardInitializationTimeout = new Timeout(datastoreContext.getShardInitializationTimeout().duration().$times(2));
+
+ primaryShardInfoCache = CacheBuilder.newBuilder()
+ .expireAfterWrite(datastoreContext.getShardLeaderElectionTimeout().duration().toMillis(), TimeUnit.MILLISECONDS)
+ .build();
}
public DatastoreContext getDatastoreContext() {
return schemaContext;
}
- /**
- * Finds the primary shard for the given shard name
- *
- * @param shardName
- * @return
- */
- public Optional<ActorSelection> findPrimaryShard(String shardName) {
- String path = findPrimaryPathOrNull(shardName);
- if (path == null){
- return Optional.absent();
- }
- return Optional.of(actorSystem.actorSelection(path));
- }
-
- public Future<ActorSelection> findPrimaryShardAsync(final String shardName) {
- Future<ActorSelection> ret = primaryShardActorSelectionCache.getIfPresent(shardName);
+ public Future<PrimaryShardInfo> findPrimaryShardAsync(final String shardName) {
+ Future<PrimaryShardInfo> ret = primaryShardInfoCache.getIfPresent(shardName);
if(ret != null){
return ret;
}
Future<Object> future = executeOperationAsync(shardManager,
- new FindPrimary(shardName, true).toSerializable(),
- datastoreContext.getShardInitializationTimeout());
+ new FindPrimary(shardName, true), shardInitializationTimeout);
- return future.transform(new Mapper<Object, ActorSelection>() {
+ return future.transform(new Mapper<Object, PrimaryShardInfo>() {
@Override
- public ActorSelection checkedApply(Object response) throws Exception {
- if(PrimaryFound.SERIALIZABLE_CLASS.isInstance(response)) {
- PrimaryFound found = PrimaryFound.fromSerializable(response);
-
- LOG.debug("Primary found {}", found.getPrimaryPath());
- ActorSelection actorSelection = actorSystem.actorSelection(found.getPrimaryPath());
- primaryShardActorSelectionCache.put(shardName, Futures.successful(actorSelection));
- return actorSelection;
- } else if(response instanceof ActorNotInitialized) {
- throw new NotInitializedException(
- String.format("Found primary shard %s but it's not initialized yet. " +
- "Please try again later", shardName));
- } else if(response instanceof PrimaryNotFound) {
- throw new PrimaryNotFoundException(
- String.format("No primary shard found for %S.", shardName));
+ public PrimaryShardInfo checkedApply(Object response) throws Exception {
+ if(response instanceof RemotePrimaryShardFound) {
+ LOG.debug("findPrimaryShardAsync received: {}", response);
+ return onPrimaryShardFound(shardName, ((RemotePrimaryShardFound)response).getPrimaryPath(), null);
+ } else if(response instanceof LocalPrimaryShardFound) {
+ LOG.debug("findPrimaryShardAsync received: {}", response);
+ LocalPrimaryShardFound found = (LocalPrimaryShardFound)response;
+ return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getLocalShardDataTree());
+ } else if(response instanceof NotInitializedException) {
+ throw (NotInitializedException)response;
+ } else if(response instanceof PrimaryNotFoundException) {
+ throw (PrimaryNotFoundException)response;
+ } else if(response instanceof NoShardLeaderException) {
+ throw (NoShardLeaderException)response;
}
throw new UnknownMessageException(String.format(
}, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
}
+ private PrimaryShardInfo onPrimaryShardFound(String shardName, String primaryActorPath,
+ DataTree localShardDataTree) {
+ ActorSelection actorSelection = actorSystem.actorSelection(primaryActorPath);
+ PrimaryShardInfo info = new PrimaryShardInfo(actorSelection, Optional.fromNullable(localShardDataTree));
+ primaryShardInfoCache.put(shardName, Futures.successful(info));
+ return info;
+ }
+
/**
* Finds a local shard given its shard name and return it's ActorRef
*
*/
public Future<ActorRef> findLocalShardAsync( final String shardName) {
Future<Object> future = executeOperationAsync(shardManager,
- new FindLocalShard(shardName, true), datastoreContext.getShardInitializationTimeout());
+ new FindLocalShard(shardName, true), shardInitializationTimeout);
return future.map(new Mapper<Object, ActorRef>() {
@Override
LocalShardFound found = (LocalShardFound)response;
LOG.debug("Local shard found {}", found.getPath());
return found.getPath();
- } else if(response instanceof ActorNotInitialized) {
- throw new NotInitializedException(
- String.format("Found local shard for %s but it's not initialized yet.",
- shardName));
+ } else if(response instanceof NotInitializedException) {
+ throw (NotInitializedException)response;
} else if(response instanceof LocalShardNotFound) {
throw new LocalShardNotFoundException(
String.format("Local shard for %s does not exist.", shardName));
}, getClientDispatcher());
}
- private String findPrimaryPathOrNull(String shardName) {
- Object result = executeOperation(shardManager, new FindPrimary(shardName, false).toSerializable());
-
- if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
- PrimaryFound found = PrimaryFound.fromSerializable(result);
-
- LOG.debug("Primary found {}", found.getPrimaryPath());
- return found.getPrimaryPath();
-
- } else if (result.getClass().equals(ActorNotInitialized.class)){
- throw new NotInitializedException(
- String.format("Found primary shard[%s] but its not initialized yet. Please try again later", shardName)
- );
-
- } else {
- return null;
- }
- }
-
-
/**
* Executes an operation on a local actor and wait for it's response
*
*
* @param message
*/
- public void broadcast(Object message){
- for(String shardName : configuration.getAllShardNames()){
-
- Optional<ActorSelection> primary = findPrimaryShard(shardName);
- if (primary.isPresent()) {
- primary.get().tell(message, ActorRef.noSender());
- } else {
- LOG.warn("broadcast failed to send message {} to shard {}. Primary not found",
- message.getClass().getSimpleName(), shardName);
- }
+ public void broadcast(final Object message){
+ for(final String shardName : configuration.getAllShardNames()){
+
+ Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
+ primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
+ @Override
+ public void onComplete(Throwable failure, PrimaryShardInfo primaryShardInfo) {
+ if(failure != null) {
+ LOG.warn("broadcast failed to send message {} to shard {}: {}",
+ message.getClass().getSimpleName(), shardName, failure);
+ } else {
+ primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
+ }
+ }
+ }, getClientDispatcher());
}
}
* @return
*/
public Timer getOperationTimer(String operationName){
- final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, datastoreContext.getDataStoreType(), operationName, METRIC_RATE);
+ return getOperationTimer(datastoreContext.getDataStoreType(), operationName);
+ }
+
+ public Timer getOperationTimer(String dataStoreType, String operationName){
+ final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
+ operationName, METRIC_RATE);
return metricRegistry.timer(rate);
}
return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
}
+ public Configuration getConfiguration() {
+ return configuration;
+ }
+
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout){
return ask(actorRef, message, timeout);
}
}
@VisibleForTesting
- Cache<String, Future<ActorSelection>> getPrimaryShardActorSelectionCache() {
- return primaryShardActorSelectionCache;
+ Cache<String, Future<PrimaryShardInfo>> getPrimaryShardInfoCache() {
+ return primaryShardInfoCache;
}
}
private static final Context NO_OP_CONTEXT = new NoOpContext();
- private final Class expectedMessageClass;
+ private final Class<?> expectedMessageClass;
private final long expectedArrivalInterval;
* @param expectedArrivalIntervalInMillis The expected arrival interval between two instances of the expected
* message
*/
- public MessageTracker(Class expectedMessageClass, long expectedArrivalIntervalInMillis){
+ public MessageTracker(Class<?> expectedMessageClass, long expectedArrivalIntervalInMillis){
this.expectedMessageClass = expectedMessageClass;
this.expectedArrivalInterval = expectedArrivalIntervalInMillis;
}
}
public static class MessageProcessingTime {
- private final Class messageClass;
+ private final Class<?> messageClass;
private final long elapsedTimeInNanos;
- MessageProcessingTime(Class messageClass, long elapsedTimeInNanos){
+ MessageProcessingTime(Class<?> messageClass, long elapsedTimeInNanos){
this.messageClass = messageClass;
this.elapsedTimeInNanos = elapsedTimeInNanos;
}
'}';
}
- public Class getMessageClass() {
+ public Class<?> getMessageClass() {
return messageClass;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Optional;
+import java.util.List;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class NormalizedNodeAggregator {
+ private final YangInstanceIdentifier rootIdentifier;
+ private final List<Optional<NormalizedNode<?, ?>>> nodes;
+ private final DataTree dataTree;
+
+ private NormalizedNodeAggregator(YangInstanceIdentifier rootIdentifier, List<Optional<NormalizedNode<?, ?>>> nodes,
+ SchemaContext schemaContext) {
+ this.rootIdentifier = rootIdentifier;
+ this.nodes = nodes;
+ this.dataTree = InMemoryDataTreeFactory.getInstance().create();
+ this.dataTree.setSchemaContext(schemaContext);
+ }
+
+ /**
+ * Combine data from all the nodes in the list into a tree with root as rootIdentifier
+ *
+ * @param nodes
+ * @param schemaContext
+ * @return
+ * @throws DataValidationFailedException
+ */
+ public static Optional<NormalizedNode<?,?>> aggregate(YangInstanceIdentifier rootIdentifier,
+ List<Optional<NormalizedNode<?, ?>>> nodes,
+ SchemaContext schemaContext) throws DataValidationFailedException {
+ return new NormalizedNodeAggregator(rootIdentifier, nodes, schemaContext).aggregate();
+ }
+
+ private Optional<NormalizedNode<?,?>> aggregate() throws DataValidationFailedException {
+ return combine().getRootNode();
+ }
+
+ private NormalizedNodeAggregator combine() throws DataValidationFailedException {
+ DataTreeModification mod = dataTree.takeSnapshot().newModification();
+
+ for (Optional<NormalizedNode<?,?>> node : nodes) {
+ if (node.isPresent()) {
+ mod.merge(rootIdentifier, node.get());
+ }
+ }
+
+ dataTree.validate(mod);
+ final DataTreeCandidate candidate = dataTree.prepare(mod);
+ dataTree.commit(candidate);
+
+ return this;
+ }
+
+ private Optional<NormalizedNode<?, ?>> getRootNode() {
+ return dataTree.takeSnapshot().readNode(rootIdentifier);
+ }
+}
}
leaf shard-journal-recovery-log-batch-size {
- default 5000;
+ default 1000;
type non-zero-uint32-type;
description "The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.";
}
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.actor.Props;
+import akka.dispatch.Dispatchers;
import akka.japi.Creator;
import akka.testkit.TestActorRef;
import com.google.common.base.Function;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
}
protected Props newShardProps() {
- return Shard.props(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ return Shard.props(shardID, Collections.<String,String>emptyMap(),
newDatastoreContext(), SCHEMA_CONTEXT);
}
Creator<Shard> creator = new Creator<Shard>() {
@Override
public Shard create() throws Exception {
- return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ return new Shard(shardID, Collections.<String,String>emptyMap(),
newDatastoreContext(), SCHEMA_CONTEXT) {
@Override
protected void onRecoveryComplete() {
};
TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- Props.create(new DelegatingShardCreator(creator)), "testRecovery");
+ Props.create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()), "testRecovery");
assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
- protected void verifyLastLogIndex(TestActorRef<Shard> shard, long expectedValue) {
+ protected void verifyLastApplied(TestActorRef<Shard> shard, long expectedValue) {
+ long lastApplied = -1;
for(int i = 0; i < 20 * 5; i++) {
- long lastLogIndex = shard.underlyingActor().getShardMBean().getLastLogIndex();
- if(lastLogIndex == expectedValue) {
- break;
+ lastApplied = shard.underlyingActor().getShardMBean().getLastApplied();
+ if(lastApplied == expectedValue) {
+ return;
}
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
- assertEquals("Last log index", expectedValue, shard.underlyingActor().getShardMBean().getLastLogIndex());
+ Assert.fail(String.format("Expected last applied: %d, Actual: %d", expectedValue, lastApplied));
}
- protected NormalizedNode<?, ?> readStore(final InMemoryDOMDataStore store) throws ReadFailedException {
- DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
- transaction.read(YangInstanceIdentifier.builder().build());
-
- Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
-
- NormalizedNode<?, ?> normalizedNode = optional.get();
-
- transaction.close();
-
- return normalizedNode;
- }
-
- protected DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(final String cohortName,
- final InMemoryDOMDataStore dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ protected ShardDataTreeCohort setupMockWriteTransaction(final String cohortName,
+ final ShardDataTree dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
final MutableCompositeModification modification) {
return setupMockWriteTransaction(cohortName, dataStore, path, data, modification, null);
}
- protected DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(final String cohortName,
- final InMemoryDOMDataStore dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ protected ShardDataTreeCohort setupMockWriteTransaction(final String cohortName,
+ final ShardDataTree dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
final MutableCompositeModification modification,
- final Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit) {
+ final Function<ShardDataTreeCohort, ListenableFuture<Void>> preCommit) {
- DOMStoreWriteTransaction tx = dataStore.newWriteOnlyTransaction();
- tx.write(path, data);
- DOMStoreThreePhaseCommitCohort cohort = createDelegatingMockCohort(cohortName, tx.ready(), preCommit);
+ ReadWriteShardDataTreeTransaction tx = dataStore.newReadWriteTransaction("setup-mock-" + cohortName, null);
+ tx.getSnapshot().write(path, data);
+ ShardDataTreeCohort cohort = createDelegatingMockCohort(cohortName, dataStore.finishTransaction(tx), preCommit);
modification.addModification(new WriteModification(path, data));
return cohort;
}
- protected DOMStoreThreePhaseCommitCohort createDelegatingMockCohort(final String cohortName,
- final DOMStoreThreePhaseCommitCohort actual) {
+ protected ShardDataTreeCohort createDelegatingMockCohort(final String cohortName,
+ final ShardDataTreeCohort actual) {
return createDelegatingMockCohort(cohortName, actual, null);
}
- protected DOMStoreThreePhaseCommitCohort createDelegatingMockCohort(final String cohortName,
- final DOMStoreThreePhaseCommitCohort actual,
- final Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit) {
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, cohortName);
+ protected ShardDataTreeCohort createDelegatingMockCohort(final String cohortName,
+ final ShardDataTreeCohort actual,
+ final Function<ShardDataTreeCohort, ListenableFuture<Void>> preCommit) {
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, cohortName);
doAnswer(new Answer<ListenableFuture<Boolean>>() {
@Override
doAnswer(new Answer<ListenableFuture<Void>>() {
@Override
public ListenableFuture<Void> answer(final InvocationOnMock invocation) throws Throwable {
- return actual.preCommit();
+ if(preCommit != null) {
+ return preCommit.apply(actual);
+ } else {
+ return actual.preCommit();
+ }
}
}).when(cohort).preCommit();
}
}).when(cohort).abort();
+ doAnswer(new Answer<DataTreeCandidateTip>() {
+ @Override
+ public DataTreeCandidateTip answer(final InvocationOnMock invocation) {
+ return actual.getCandidate();
+ }
+ }).when(cohort).getCandidate();
+
return cohort;
}
public static NormalizedNode<?,?> readStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id)
throws ExecutionException, InterruptedException {
- return readStore(shard.underlyingActor().getDataStore(), id);
+ return readStore(shard.underlyingActor().getDataStore().getDataTree(), id);
}
- public static NormalizedNode<?,?> readStore(final InMemoryDOMDataStore store, final YangInstanceIdentifier id)
- throws ExecutionException, InterruptedException {
- DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
-
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
- transaction.read(id);
+ public static NormalizedNode<?,?> readStore(final DataTree store, final YangInstanceIdentifier id) {
+ DataTreeSnapshot transaction = store.takeSnapshot();
- Optional<NormalizedNode<?, ?>> optional = future.get();
+ Optional<NormalizedNode<?, ?>> optional = transaction.readNode(id);
NormalizedNode<?, ?> node = optional.isPresent()? optional.get() : null;
- transaction.close();
-
return node;
}
public static void writeToStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws ExecutionException, InterruptedException {
+ final NormalizedNode<?,?> node) throws InterruptedException, ExecutionException {
writeToStore(shard.underlyingActor().getDataStore(), id, node);
}
- public static void writeToStore(final InMemoryDOMDataStore store, final YangInstanceIdentifier id,
- final NormalizedNode<?,?> node) throws ExecutionException, InterruptedException {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+ public static void writeToStore(final ShardDataTree store, final YangInstanceIdentifier id,
+ final NormalizedNode<?,?> node) throws InterruptedException, ExecutionException {
+ ReadWriteShardDataTreeTransaction transaction = store.newReadWriteTransaction("writeToStore", null);
- transaction.write(id, node);
+ transaction.getSnapshot().write(id, node);
+ ShardDataTreeCohort cohort = transaction.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit();
+ }
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- commitCohort.preCommit().get();
- commitCohort.commit().get();
+ public static void writeToStore(final DataTree store, final YangInstanceIdentifier id,
+ final NormalizedNode<?,?> node) throws DataValidationFailedException {
+ DataTreeModification transaction = store.takeSnapshot().newModification();
+
+ transaction.write(id, node);
+ transaction.ready();
+ store.validate(transaction);
+ final DataTreeCandidate candidate = store.prepare(transaction);
+ store.commit(candidate);
}
@SuppressWarnings("serial")
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.argThat;
import akka.actor.Props;
import akka.dispatch.Futures;
import akka.testkit.JavaTestKit;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.CheckedFuture;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.Before;
import org.opendaylight.controller.cluster.datastore.TransactionProxyTest.TestException;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static ActorSystem system;
- private final Configuration configuration = new MockConfiguration();
+ private final Configuration configuration = new MockConfiguration() {
+ @Override
+ public Map<String, ShardStrategy> getModuleNameToShardStrategyMap() {
+ return ImmutableMap.<String, ShardStrategy>builder().put(
+ "junk", new ShardStrategy() {
+ @Override
+ public String findShard(YangInstanceIdentifier path) {
+ return "junk";
+ }
+ }).build();
+ }
+
+ @Override
+ public Optional<String> getModuleNameFromNameSpace(String nameSpace) {
+ return TestModel.JUNK_QNAME.getNamespace().toASCIIString().equals(nameSpace) ?
+ Optional.of("junk") : Optional.<String>absent();
+ }
+ };
@Mock
protected ActorContext mockActorContext;
protected final String memberName = "mock-member";
- protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder().operationTimeoutInSeconds(2).
- shardBatchedModificationCount(1);
+ protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder().operationTimeoutInSeconds(2);
@BeforeClass
public static void setUpClass() throws IOException {
doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
+ Timer timer = new MetricRegistry().timer("test");
+ doReturn(timer).when(mockActorContext).getOperationTimer(any(String.class));
+
ShardStrategyFactory.setConfiguration(configuration);
}
return argThat(matcher);
}
- protected Future<Object> readySerializedTxReply(String path) {
- return Futures.successful((Object)new ReadyTransactionReply(path).toSerializable());
- }
-
protected Future<Object> readyTxReply(String path) {
return Futures.successful((Object)new ReadyTransactionReply(path));
}
}
protected Future<Object> dataExistsSerializedReply(boolean exists) {
- return Futures.successful(new DataExistsReply(exists).toSerializable());
+ return Futures.successful(DataExistsReply.create(exists).toSerializable());
}
protected Future<DataExistsReply> dataExistsReply(boolean exists) {
- return Futures.successful(new DataExistsReply(exists));
+ return Futures.successful(DataExistsReply.create(exists));
}
protected Future<BatchedModificationsReply> batchedModificationsReply(int count) {
return Futures.successful(new BatchedModificationsReply(count));
}
- protected Future<Object> incompleteFuture(){
+ protected Future<Object> incompleteFuture() {
return mock(Future.class);
}
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
+ protected void expectBatchedModificationsReady(ActorRef actorRef) {
+ expectBatchedModificationsReady(actorRef, false);
+ }
+
+ protected void expectBatchedModificationsReady(ActorRef actorRef, boolean doCommitOnReady) {
+ doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
+ readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+ }
+
protected void expectBatchedModifications(int count) {
doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
any(ActorSelection.class), isA(BatchedModifications.class));
any(ActorSelection.class), isA(BatchedModifications.class));
}
- protected void expectReadyTransaction(ActorRef actorRef) {
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
- }
-
protected void expectFailedBatchedModifications(ActorRef actorRef) {
doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
protected ActorRef setupActorContextWithoutInitialCreateTransaction(ActorSystem actorSystem) {
+ return setupActorContextWithoutInitialCreateTransaction(actorSystem, DefaultShardStrategy.DEFAULT_SHARD);
+ }
+
+ protected Future<PrimaryShardInfo> primaryShardInfoReply(ActorSystem actorSystem, ActorRef actorRef) {
+ return Futures.successful(new PrimaryShardInfo(actorSystem.actorSelection(actorRef.path()),
+ Optional.<DataTree>absent()));
+ }
+
+ protected ActorRef setupActorContextWithoutInitialCreateTransaction(ActorSystem actorSystem, String shardName) {
ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
log.info("Created mock shard actor {}", actorRef);
doReturn(actorSystem.actorSelection(actorRef.path())).
when(mockActorContext).actorSelection(actorRef.path().toString());
- doReturn(Futures.successful(actorSystem.actorSelection(actorRef.path()))).
- when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+ doReturn(primaryShardInfoReply(actorSystem, actorRef)).
+ when(mockActorContext).findPrimaryShardAsync(eq(shardName));
doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
}
protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem,
- TransactionType type, int transactionVersion) {
- ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem);
+ TransactionType type, int transactionVersion, String shardName) {
+ ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName);
return setupActorContextWithInitialCreateTransaction(actorSystem, type, transactionVersion,
memberName, shardActorRef);
protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem,
TransactionType type, int transactionVersion, String prefix, ActorRef shardActorRef) {
- ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
- log.info("Created mock shard Tx actor {}", txActorRef);
+ ActorRef txActorRef;
+ if(type == TransactionType.WRITE_ONLY && transactionVersion >= DataStoreVersions.LITHIUM_VERSION &&
+ dataStoreContextBuilder.build().isWriteOnlyTransactionOptimizationsEnabled()) {
+ txActorRef = shardActorRef;
+ } else {
+ txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+ log.info("Created mock shard Tx actor {}", txActorRef);
- doReturn(actorSystem.actorSelection(txActorRef.path())).when(mockActorContext).actorSelection(
- txActorRef.path().toString());
+ doReturn(actorSystem.actorSelection(txActorRef.path())).
+ when(mockActorContext).actorSelection(txActorRef.path().toString());
- doReturn(Futures.successful(createTransactionReply(txActorRef, transactionVersion))).when(mockActorContext).
- executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
- eqCreateTransaction(prefix, type));
+ doReturn(Futures.successful(createTransactionReply(txActorRef, transactionVersion))).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(prefix, type));
+ }
return txActorRef;
}
protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type) {
- return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION);
+ return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
+ DefaultShardStrategy.DEFAULT_SHARD);
}
+ protected ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type,
+ String shardName) {
+ return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
+ shardName);
+ }
protected void propagateReadFailedExceptionCause(CheckedFuture<?, ReadFailedException> future)
throws Throwable {
return captured;
}
- protected void verifyOneBatchedModification(ActorRef actorRef, Modification expected) {
+ protected void verifyOneBatchedModification(ActorRef actorRef, Modification expected, boolean expIsReady) {
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), expected);
+ verifyBatchedModifications(batchedModifications.get(0), expIsReady, expIsReady, expected);
+ }
+
+ protected void verifyBatchedModifications(Object message, boolean expIsReady, Modification... expected) {
+ verifyBatchedModifications(message, expIsReady, false, expected);
}
- protected void verifyBatchedModifications(Object message, Modification... expected) {
+ protected void verifyBatchedModifications(Object message, boolean expIsReady, boolean expIsDoCommitOnReady,
+ Modification... expected) {
assertEquals("Message type", BatchedModifications.class, message.getClass());
BatchedModifications batchedModifications = (BatchedModifications)message;
assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
+ assertEquals("isReady", expIsReady, batchedModifications.isReady());
+ assertEquals("isDoCommitOnReady", expIsDoCommitOnReady, batchedModifications.isDoCommitOnReady());
for(int i = 0; i < batchedModifications.getModifications().size(); i++) {
Modification actual = batchedModifications.getModifications().get(i);
assertEquals("Modification type", expected[i].getClass(), actual.getClass());
}
}
- protected void verifyCohortFutures(ThreePhaseCommitCohortProxy proxy,
+ protected void verifyCohortFutures(AbstractThreePhaseCommitCohort<?> proxy,
Object... expReplies) throws Exception {
assertEquals("getReadyOperationFutures size", expReplies.length,
proxy.getCohortFutures().size());
- int i = 0;
- for( Future<ActorSelection> future: proxy.getCohortFutures()) {
+ List<Object> futureResults = new ArrayList<>();
+ for( Future<?> future: proxy.getCohortFutures()) {
assertNotNull("Ready operation Future is null", future);
+ try {
+ futureResults.add(Await.result(future, Duration.create(5, TimeUnit.SECONDS)));
+ } catch(Exception e) {
+ futureResults.add(e);
+ }
+ }
+
+ for(int i = 0; i < expReplies.length; i++) {
+ Object expReply = expReplies[i];
+ boolean found = false;
+ Iterator<?> iter = futureResults.iterator();
+ while(iter.hasNext()) {
+ Object actual = iter.next();
+ if(CommitTransactionReply.SERIALIZABLE_CLASS.isInstance(expReply) &&
+ CommitTransactionReply.SERIALIZABLE_CLASS.isInstance(actual)) {
+ found = true;
+ } else if(expReply instanceof ActorSelection && Objects.equal(expReply, actual)) {
+ found = true;
+ } else if(expReply instanceof Class && ((Class<?>)expReply).isInstance(actual)) {
+ found = true;
+ }
- Object expReply = expReplies[i++];
- if(expReply instanceof ActorSelection) {
- ActorSelection actual = Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- assertEquals("Cohort actor path", expReply, actual);
- } else {
- try {
- Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- fail("Expected exception from ready operation Future");
- } catch(Exception e) {
- assertTrue(String.format("Expected exception type %s. Actual %s",
- expReply, e.getClass()), ((Class<?>)expReply).isInstance(e));
+ if(found) {
+ iter.remove();
+ break;
}
}
+
+ if(!found) {
+ fail(String.format("No cohort Future response found for %s. Actual: %s", expReply, futureResults));
+ }
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Unit tests for DOMConcurrentDataCommitCoordinator.
+ *
+ * @author Thomas Pantelis
+ */
+public class ConcurrentDOMDataBrokerTest {
+
+ private final DOMDataWriteTransaction transaction = mock(DOMDataWriteTransaction.class);
+ private final DOMStoreThreePhaseCommitCohort mockCohort1 = mock(DOMStoreThreePhaseCommitCohort.class);
+ private final DOMStoreThreePhaseCommitCohort mockCohort2 = mock(DOMStoreThreePhaseCommitCohort.class);
+ private final ThreadPoolExecutor futureExecutor =
+ new ThreadPoolExecutor(0, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
+ private ConcurrentDOMDataBroker coordinator;
+
+ @Before
+ public void setup() {
+ doReturn("tx").when(transaction).getIdentifier();
+
+ DOMStore store = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.sameThreadExecutor());
+
+ coordinator = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL, store), futureExecutor);
+ }
+
+ @After
+ public void tearDown() {
+ futureExecutor.shutdownNow();
+ }
+
+ @Test
+ public void testSuccessfulSubmitAsync() throws Throwable {
+ testSuccessfulSubmit(true);
+ }
+
+ @Test
+ public void testSuccessfulSubmitSync() throws Throwable {
+ testSuccessfulSubmit(false);
+ }
+
+ private void testSuccessfulSubmit(final boolean doAsync) throws Throwable {
+ final CountDownLatch asyncCanCommitContinue = new CountDownLatch(1);
+ Answer<ListenableFuture<Boolean>> asyncCanCommit = new Answer<ListenableFuture<Boolean>>() {
+ @Override
+ public ListenableFuture<Boolean> answer(final InvocationOnMock invocation) {
+ final SettableFuture<Boolean> future = SettableFuture.create();
+ if(doAsync) {
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue,
+ 10, TimeUnit.SECONDS);
+ future.set(true);
+ }
+ }.start();
+ } else {
+ future.set(true);
+ }
+
+ return future;
+ }
+ };
+
+ doAnswer(asyncCanCommit).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).commit();
+
+ doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).commit();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2));
+
+ final CountDownLatch doneLatch = new CountDownLatch(1);
+ final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
+ Futures.addCallback(future, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ doneLatch.countDown();
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ caughtEx.set(t);
+ doneLatch.countDown();
+ }
+ });
+
+ asyncCanCommitContinue.countDown();
+
+ assertEquals("Submit complete", true, doneLatch.await(5, TimeUnit.SECONDS));
+
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Task count", doAsync ? 1 : 0, futureExecutor.getTaskCount());
+
+ InOrder inOrder = inOrder(mockCohort1, mockCohort2);
+ inOrder.verify(mockCohort1).canCommit();
+ inOrder.verify(mockCohort2).canCommit();
+ inOrder.verify(mockCohort1).preCommit();
+ inOrder.verify(mockCohort2).preCommit();
+ inOrder.verify(mockCohort1).commit();
+ inOrder.verify(mockCohort2).commit();
+ }
+
+ @Test
+ public void testSubmitWithNegativeCanCommitResponse() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ doReturn(Futures.immediateFuture(false)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(false)).when(mockCohort3).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+
+ assertFailure(future, null, mockCohort1, mockCohort2, mockCohort3);
+ }
+
+ private void assertFailure(final CheckedFuture<Void, TransactionCommitFailedException> future,
+ final Exception expCause, final DOMStoreThreePhaseCommitCohort... mockCohorts)
+ throws Exception {
+ try {
+ future.checkedGet(5, TimeUnit.SECONDS);
+ fail("Expected TransactionCommitFailedException");
+ } catch (TransactionCommitFailedException e) {
+ if(expCause != null) {
+ assertSame("Expected cause", expCause, e.getCause());
+ }
+
+ InOrder inOrder = inOrder((Object[])mockCohorts);
+ for(DOMStoreThreePhaseCommitCohort c: mockCohorts) {
+ inOrder.verify(c).abort();
+ }
+ } catch (TimeoutException e) {
+ throw e;
+ }
+ }
+
+ @Test
+ public void testSubmitWithCanCommitException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2);
+ }
+
+ @Test
+ public void testSubmitWithPreCommitException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
+ IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(true)).when(mockCohort3).canCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2"))).
+ when(mockCohort3).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+ }
+
+ @Test
+ public void testSubmitWithCommitException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).commit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).preCommit();
+ IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).commit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(true)).when(mockCohort3).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).preCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2"))).
+ when(mockCohort3).commit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+ }
+
+ @Test
+ public void testSubmitWithAbortException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock abort error"))).
+ when(mockCohort1).abort();
+
+ IllegalStateException cause = new IllegalStateException("mock canCommit error");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2);
+ }
+
+ @Test
+ public void testCreateReadWriteTransaction(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+ dataBroker.newReadWriteTransaction();
+
+ verify(domStore, never()).newReadWriteTransaction();
+ }
+
+
+ @Test
+ public void testCreateWriteOnlyTransaction(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+ dataBroker.newWriteOnlyTransaction();
+
+ verify(domStore, never()).newWriteOnlyTransaction();
+ }
+
+ @Test
+ public void testCreateReadOnlyTransaction(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+ dataBroker.newReadOnlyTransaction();
+
+ verify(domStore, never()).newReadOnlyTransaction();
+ }
+
+ @Test
+ public void testLazySubTransactionCreationForReadWriteTransactions(){
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadWriteTransaction storeTxn = mock(DOMStoreReadWriteTransaction.class);
+
+ doReturn(storeTxn).when(operationalDomStore).newReadWriteTransaction();
+ doReturn(storeTxn).when(configDomStore).newReadWriteTransaction();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor);
+ DOMDataReadWriteTransaction dataTxn = dataBroker.newReadWriteTransaction();
+
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+
+ verify(configDomStore, never()).newReadWriteTransaction();
+ verify(operationalDomStore, times(1)).newReadWriteTransaction();
+
+ dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ verify(configDomStore, times(1)).newReadWriteTransaction();
+ verify(operationalDomStore, times(1)).newReadWriteTransaction();
+
+ }
+
+ @Test
+ public void testLazySubTransactionCreationForWriteOnlyTransactions(){
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreWriteTransaction storeTxn = mock(DOMStoreWriteTransaction.class);
+
+ doReturn(storeTxn).when(operationalDomStore).newWriteOnlyTransaction();
+ doReturn(storeTxn).when(configDomStore).newWriteOnlyTransaction();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor);
+ DOMDataWriteTransaction dataTxn = dataBroker.newWriteOnlyTransaction();
+
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ verify(configDomStore, never()).newWriteOnlyTransaction();
+ verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
+
+ dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ verify(configDomStore, times(1)).newWriteOnlyTransaction();
+ verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
+
+ }
+
+
+ @Test
+ public void testLazySubTransactionCreationForReadOnlyTransactions(){
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadTransaction storeTxn = mock(DOMStoreReadTransaction.class);
+
+ doReturn(storeTxn).when(operationalDomStore).newReadOnlyTransaction();
+ doReturn(storeTxn).when(configDomStore).newReadOnlyTransaction();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor);
+ DOMDataReadOnlyTransaction dataTxn = dataBroker.newReadOnlyTransaction();
+
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+ dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+
+ verify(configDomStore, never()).newReadOnlyTransaction();
+ verify(operationalDomStore, times(1)).newReadOnlyTransaction();
+
+ dataTxn.read(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build());
+
+ verify(configDomStore, times(1)).newReadOnlyTransaction();
+ verify(operationalDomStore, times(1)).newReadOnlyTransaction();
+
+ }
+
+ @Test
+ public void testSubmitWithOnlyOneSubTransaction() throws InterruptedException {
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadWriteTransaction mockStoreReadWriteTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
+
+ doReturn(mockStoreReadWriteTransaction).when(operationalDomStore).newReadWriteTransaction();
+ doReturn(mockCohort).when(mockStoreReadWriteTransaction).ready();
+ doReturn(Futures.immediateFuture(false)).when(mockCohort).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort).abort();
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor) {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction, Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ commitCohorts.addAll(cohorts);
+ latch.countDown();
+ return super.submit(transaction, cohorts);
+ }
+ };
+ DOMDataReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
+
+ domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build());
+
+ domDataReadWriteTransaction.submit();
+
+ latch.await(10, TimeUnit.SECONDS);
+
+ assertTrue(commitCohorts.size() == 1);
+ }
+
+ @Test
+ public void testSubmitWithOnlyTwoSubTransactions() throws InterruptedException {
+ DOMStore configDomStore = mock(DOMStore.class);
+ DOMStore operationalDomStore = mock(DOMStore.class);
+ DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreReadWriteTransaction configTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreThreePhaseCommitCohort mockCohortOperational = mock(DOMStoreThreePhaseCommitCohort.class);
+ DOMStoreThreePhaseCommitCohort mockCohortConfig = mock(DOMStoreThreePhaseCommitCohort.class);
+
+ doReturn(operationalTransaction).when(operationalDomStore).newReadWriteTransaction();
+ doReturn(configTransaction).when(configDomStore).newReadWriteTransaction();
+
+ doReturn(mockCohortOperational).when(operationalTransaction).ready();
+ doReturn(Futures.immediateFuture(false)).when(mockCohortOperational).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohortOperational).abort();
+
+ doReturn(mockCohortConfig).when(configTransaction).ready();
+ doReturn(Futures.immediateFuture(false)).when(mockCohortConfig).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohortConfig).abort();
+
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList();
+
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ operationalDomStore, LogicalDatastoreType.CONFIGURATION, configDomStore), futureExecutor) {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction, Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+ commitCohorts.addAll(cohorts);
+ latch.countDown();
+ return super.submit(transaction, cohorts);
+ }
+ };
+ DOMDataReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
+
+ domDataReadWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ domDataReadWriteTransaction.merge(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+
+ domDataReadWriteTransaction.submit();
+
+ latch.await(10, TimeUnit.SECONDS);
+
+ assertTrue(commitCohorts.size() == 2);
+ }
+
+ @Test
+ public void testCreateTransactionChain(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+
+ dataBroker.createTransactionChain(mock(TransactionChainListener.class));
+
+ verify(domStore, times(2)).createTransactionChain();
+
+ }
+
+ @Test
+ public void testCreateTransactionOnChain(){
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+
+ DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
+ DOMStoreTransactionChain mockChain = mock(DOMStoreTransactionChain.class);
+
+ doReturn(mockChain).when(domStore).createTransactionChain();
+ doReturn(operationalTransaction).when(mockChain).newWriteOnlyTransaction();
+
+ DOMTransactionChain transactionChain = dataBroker.createTransactionChain(mock(TransactionChainListener.class));
+
+ DOMDataWriteTransaction domDataWriteTransaction = transactionChain.newWriteOnlyTransaction();
+
+ verify(mockChain, never()).newWriteOnlyTransaction();
+
+ domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.builder().build(), mock(NormalizedNode.class));
+ }
+
+ @Test
+ public void testEmptyTransactionSubmitSucceeds() throws ExecutionException, InterruptedException {
+ DOMStore domStore = mock(DOMStore.class);
+ ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL,
+ domStore, LogicalDatastoreType.CONFIGURATION, domStore), futureExecutor);
+
+ CheckedFuture<Void, TransactionCommitFailedException> submit1 = dataBroker.newWriteOnlyTransaction().submit();
+
+ assertNotNull(submit1);
+
+ submit1.get();
+
+ CheckedFuture<Void, TransactionCommitFailedException> submit2 = dataBroker.newReadWriteTransaction().submit();
+
+ assertNotNull(submit2);
+
+ submit2.get();
+ }
+
+}
+++ /dev/null
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Arrays;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.InOrder;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-
-/**
- * Unit tests for DOMConcurrentDataCommitCoordinator.
- *
- * @author Thomas Pantelis
- */
-public class DOMConcurrentDataCommitCoordinatorTest {
-
- private final DOMDataWriteTransaction transaction = mock(DOMDataWriteTransaction.class);
- private final DOMStoreThreePhaseCommitCohort mockCohort1 = mock(DOMStoreThreePhaseCommitCohort.class);
- private final DOMStoreThreePhaseCommitCohort mockCohort2 = mock(DOMStoreThreePhaseCommitCohort.class);
- private final ThreadPoolExecutor futureExecutor =
- new ThreadPoolExecutor(0, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
- private ConcurrentDOMDataBroker coordinator;
-
- @Before
- public void setup() {
- doReturn("tx").when(transaction).getIdentifier();
-
- DOMStore store = new InMemoryDOMDataStore("OPER",
- MoreExecutors.sameThreadExecutor());
-
- coordinator = new ConcurrentDOMDataBroker(ImmutableMap.of(LogicalDatastoreType.OPERATIONAL, store), futureExecutor);
- }
-
- @After
- public void tearDown() {
- futureExecutor.shutdownNow();
- }
-
- @Test
- public void testSuccessfulSubmitAsync() throws Throwable {
- testSuccessfulSubmit(true);
- }
-
- @Test
- public void testSuccessfulSubmitSync() throws Throwable {
- testSuccessfulSubmit(false);
- }
-
- private void testSuccessfulSubmit(final boolean doAsync) throws Throwable {
- final CountDownLatch asyncCanCommitContinue = new CountDownLatch(1);
- Answer<ListenableFuture<Boolean>> asyncCanCommit = new Answer<ListenableFuture<Boolean>>() {
- @Override
- public ListenableFuture<Boolean> answer(final InvocationOnMock invocation) {
- final SettableFuture<Boolean> future = SettableFuture.create();
- if(doAsync) {
- new Thread() {
- @Override
- public void run() {
- Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue,
- 10, TimeUnit.SECONDS);
- future.set(true);
- }
- }.start();
- } else {
- future.set(true);
- }
-
- return future;
- }
- };
-
- doAnswer(asyncCanCommit).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).commit();
-
- doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).preCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).commit();
-
- CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
- transaction, Arrays.asList(mockCohort1, mockCohort2));
-
- final CountDownLatch doneLatch = new CountDownLatch(1);
- final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
- Futures.addCallback(future, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void result) {
- doneLatch.countDown();
- }
-
- @Override
- public void onFailure(final Throwable t) {
- caughtEx.set(t);
- doneLatch.countDown();
- }
- });
-
- asyncCanCommitContinue.countDown();
-
- assertEquals("Submit complete", true, doneLatch.await(5, TimeUnit.SECONDS));
-
- if(caughtEx.get() != null) {
- throw caughtEx.get();
- }
-
- assertEquals("Task count", doAsync ? 1 : 0, futureExecutor.getTaskCount());
-
- InOrder inOrder = inOrder(mockCohort1, mockCohort2);
- inOrder.verify(mockCohort1).canCommit();
- inOrder.verify(mockCohort2).canCommit();
- inOrder.verify(mockCohort1).preCommit();
- inOrder.verify(mockCohort2).preCommit();
- inOrder.verify(mockCohort1).commit();
- inOrder.verify(mockCohort2).commit();
- }
-
- @Test
- public void testSubmitWithNegativeCanCommitResponse() throws Exception {
- doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
-
- doReturn(Futures.immediateFuture(false)).when(mockCohort2).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
-
- DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
- doReturn(Futures.immediateFuture(false)).when(mockCohort3).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
-
- CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
- transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
-
- assertFailure(future, null, mockCohort1, mockCohort2, mockCohort3);
- }
-
- private void assertFailure(final CheckedFuture<Void, TransactionCommitFailedException> future,
- final Exception expCause, final DOMStoreThreePhaseCommitCohort... mockCohorts)
- throws Exception {
- try {
- future.checkedGet(5, TimeUnit.SECONDS);
- fail("Expected TransactionCommitFailedException");
- } catch (TransactionCommitFailedException e) {
- if(expCause != null) {
- assertSame("Expected cause", expCause, e.getCause());
- }
-
- InOrder inOrder = inOrder((Object[])mockCohorts);
- for(DOMStoreThreePhaseCommitCohort c: mockCohorts) {
- inOrder.verify(c).abort();
- }
- } catch (TimeoutException e) {
- throw e;
- }
- }
-
- @Test
- public void testSubmitWithCanCommitException() throws Exception {
- doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
-
- IllegalStateException cause = new IllegalStateException("mock");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
-
- CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
- transaction, Arrays.asList(mockCohort1, mockCohort2));
-
- assertFailure(future, cause, mockCohort1, mockCohort2);
- }
-
- @Test
- public void testSubmitWithPreCommitException() throws Exception {
- doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
-
- doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
- IllegalStateException cause = new IllegalStateException("mock");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).preCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
-
- DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
- doReturn(Futures.immediateFuture(true)).when(mockCohort3).canCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2"))).
- when(mockCohort3).preCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
-
- CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
- transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
-
- assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
- }
-
- @Test
- public void testSubmitWithCommitException() throws Exception {
- doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).commit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
-
- doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).preCommit();
- IllegalStateException cause = new IllegalStateException("mock");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).commit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
-
- DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
- doReturn(Futures.immediateFuture(true)).when(mockCohort3).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort3).preCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2"))).
- when(mockCohort3).commit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
-
- CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
- transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
-
- assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
- }
-
- @Test
- public void testSubmitWithAbortException() throws Exception {
- doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock abort error"))).
- when(mockCohort1).abort();
-
- IllegalStateException cause = new IllegalStateException("mock canCommit error");
- doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
- doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
-
- CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
- transaction, Arrays.asList(mockCohort1, mockCohort2));
-
- assertFailure(future, cause, mockCohort1, mockCohort2);
- }
-}
*/
package org.opendaylight.controller.cluster.datastore;
-import static org.mockito.Mockito.any;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
-import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
Assert.assertEquals("getPath", path, registerMsg.getPath());
Assert.assertEquals("getScope", scope, registerMsg.getScope());
- reply(new RegisterChangeListenerReply(getRef().path()));
+ reply(new RegisterChangeListenerReply(getRef()));
for(int i = 0; (i < 20 * 5) && proxy.getListenerRegistrationActor() == null; i++) {
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
- reply(new ActorNotInitialized());
+ reply(new NotInitializedException("not initialized"));
new Within(duration("1 seconds")) {
@Override
@Override
public Future<Object> answer(InvocationOnMock invocation) {
proxy.close();
- return Futures.successful((Object)new RegisterChangeListenerReply(getRef().path()));
+ return Futures.successful((Object)new RegisterChangeListenerReply(getRef()));
}
};
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+import java.io.IOException;
+import java.util.Collection;
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+
+public class DataTreeCandidatePayloadTest {
+ private DataTreeCandidate candidate;
+
+ private static DataTreeCandidateNode findNode(final Collection<DataTreeCandidateNode> nodes, final PathArgument arg) {
+ for (DataTreeCandidateNode node : nodes) {
+ if (arg.equals(node.getIdentifier())) {
+ return node;
+ }
+ }
+ return null;
+ }
+
+ private static void assertChildrenEquals(final Collection<DataTreeCandidateNode> expected,
+ final Collection<DataTreeCandidateNode> actual) {
+ // Make sure all expected nodes are there
+ for (DataTreeCandidateNode exp : expected) {
+ final DataTreeCandidateNode act = findNode(actual, exp.getIdentifier());
+ assertNotNull("missing expected child", act);
+ assertCandidateNodeEquals(exp, act);
+ }
+ // Make sure no nodes are present which are not in the expected set
+ for (DataTreeCandidateNode act : actual) {
+ final DataTreeCandidateNode exp = findNode(expected, act.getIdentifier());
+ assertNull("unexpected child", exp);
+ }
+ }
+
+ private static void assertCandidateEquals(final DataTreeCandidate expected, final DataTreeCandidate actual) {
+ assertEquals("root path", expected.getRootPath(), actual.getRootPath());
+
+ final DataTreeCandidateNode expRoot = expected.getRootNode();
+ final DataTreeCandidateNode actRoot = expected.getRootNode();
+ assertEquals("root type", expRoot.getModificationType(), actRoot.getModificationType());
+
+ switch (actRoot.getModificationType()) {
+ case DELETE:
+ case WRITE:
+ assertEquals("root data", expRoot.getDataAfter(), actRoot.getDataAfter());
+ break;
+ case SUBTREE_MODIFIED:
+ assertChildrenEquals(expRoot.getChildNodes(), actRoot.getChildNodes());
+ break;
+ default:
+ fail("Unexpect root type " + actRoot.getModificationType());
+ break;
+ }
+
+ assertCandidateNodeEquals(expected.getRootNode(), actual.getRootNode());
+ }
+
+ private static void assertCandidateNodeEquals(final DataTreeCandidateNode expected, final DataTreeCandidateNode actual) {
+ assertEquals("child type", expected.getModificationType(), actual.getModificationType());
+ assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
+
+ switch (actual.getModificationType()) {
+ case DELETE:
+ case WRITE:
+ assertEquals("child data", expected.getDataAfter(), actual.getDataAfter());
+ break;
+ case SUBTREE_MODIFIED:
+ assertChildrenEquals(expected.getChildNodes(), actual.getChildNodes());
+ break;
+ default:
+ fail("Unexpect root type " + actual.getModificationType());
+ break;
+ }
+ }
+
+ @Before
+ public void setUp() {
+ final YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ final NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+ candidate = DataTreeCandidates.fromNormalizedNode(writePath, writeData);
+ }
+
+ @Test
+ public void testCandidateSerialization() throws IOException {
+ final DataTreeCandidatePayload payload = DataTreeCandidatePayload.create(candidate);
+ assertEquals("payload size", 141, payload.size());
+ }
+
+ @Test
+ public void testCandidateSerDes() throws IOException {
+ final DataTreeCandidatePayload payload = DataTreeCandidatePayload.create(candidate);
+ assertCandidateEquals(candidate, payload.getCandidate());
+ }
+
+ @Test
+ public void testPayloadSerDes() throws IOException {
+ final DataTreeCandidatePayload payload = DataTreeCandidatePayload.create(candidate);
+ assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.DeadLetter;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import com.google.common.collect.ImmutableList;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+public class DataTreeChangeListenerActorTest extends AbstractActorTest {
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWhenNotificationsAreEnabled(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates = ImmutableList.of(mockTreeCandidate);
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ final Props props = DataTreeChangeListenerActor.props(mockListener);
+ final ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedNotificationsEnabled");
+
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), getRef());
+
+ subject.tell(new DataTreeChanged(mockCandidates),
+ getRef());
+
+ expectMsgClass(DataTreeChangedReply.class);
+
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates);
+ }};
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWhenNotificationsAreDisabled(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates = ImmutableList.of(mockTreeCandidate);
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ final Props props = DataTreeChangeListenerActor.props(mockListener);
+ final ActorRef subject =
+ getSystem().actorOf(props, "testDataTreeChangedNotificationsDisabled");
+
+ subject.tell(new DataTreeChanged(mockCandidates),
+ getRef());
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+ expectNoMsg();
+
+ Mockito.verify(mockListener, Mockito.never()).onDataTreeChanged(
+ Matchers.anyCollectionOf(DataTreeCandidate.class));
+ }
+ };
+ }};
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWithNoSender(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates = ImmutableList.of(mockTreeCandidate);
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ final Props props = DataTreeChangeListenerActor.props(mockListener);
+ final ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedWithNoSender");
+
+ getSystem().eventStream().subscribe(getRef(), DeadLetter.class);
+
+ subject.tell(new DataTreeChanged(mockCandidates), ActorRef.noSender());
+
+ // Make sure no DataChangedReply is sent to DeadLetters.
+ while(true) {
+ DeadLetter deadLetter;
+ try {
+ deadLetter = expectMsgClass(duration("1 seconds"), DeadLetter.class);
+ } catch (AssertionError e) {
+ // Timed out - got no DeadLetter - this is good
+ break;
+ }
+
+ // We may get DeadLetters for other messages we don't care about.
+ Assert.assertFalse("Unexpected DataTreeChangedReply",
+ deadLetter.message() instanceof DataTreeChangedReply);
+ }
+ }};
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWithListenerRuntimeEx(){
+ new JavaTestKit(getSystem()) {{
+ final DataTreeCandidate mockTreeCandidate1 = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates1 = ImmutableList.of(mockTreeCandidate1);
+ final DataTreeCandidate mockTreeCandidate2 = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates2 = ImmutableList.of(mockTreeCandidate2);
+ final DataTreeCandidate mockTreeCandidate3 = Mockito.mock(DataTreeCandidate.class);
+ final ImmutableList<DataTreeCandidate> mockCandidates3 = ImmutableList.of(mockTreeCandidate3);
+
+ final DOMDataTreeChangeListener mockListener = Mockito.mock(DOMDataTreeChangeListener.class);
+ Mockito.doThrow(new RuntimeException("mock")).when(mockListener).onDataTreeChanged(mockCandidates2);
+
+ Props props = DataTreeChangeListenerActor.props(mockListener);
+ ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedWithListenerRuntimeEx");
+
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), getRef());
+
+ subject.tell(new DataTreeChanged(mockCandidates1),getRef());
+ expectMsgClass(DataTreeChangedReply.class);
+
+ subject.tell(new DataTreeChanged(mockCandidates2),getRef());
+ expectMsgClass(DataTreeChangedReply.class);
+
+ subject.tell(new DataTreeChanged(mockCandidates3),getRef());
+ expectMsgClass(DataTreeChangedReply.class);
+
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates1);
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates2);
+ Mockito.verify(mockListener).onDataTreeChanged(mockCandidates3);
+ }};
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.actor.Terminated;
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.Futures;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
+import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
+ @SuppressWarnings("unchecked")
+ private final DOMDataTreeChangeListener mockListener = mock(DOMDataTreeChangeListener.class);
+
+ @Test(timeout=10000)
+ public void testSuccessfulRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init("shard-1", path);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardFound(getRef()));
+
+ RegisterDataTreeChangeListener registerMsg = expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+ Assert.assertEquals("getPath", path, registerMsg.getPath());
+
+ reply(new RegisterDataTreeChangeListenerReply(getRef()));
+
+
+ for(int i = 0; (i < 20 * 5) && proxy.getListenerRegistrationActor() == null; i++) {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.assertEquals("getListenerRegistrationActor", getSystem().actorSelection(getRef().path()),
+ proxy.getListenerRegistrationActor());
+
+ watch(proxy.getDataChangeListenerActor());
+
+ proxy.close();
+
+ // The listener registration actor should get a Close message
+ expectMsgClass(timeout, CloseDataTreeChangeListenerRegistration.class);
+
+ // The DataChangeListener actor should be terminated
+ expectMsgClass(timeout, Terminated.class);
+
+ proxy.close();
+
+ expectNoMsg();
+ }};
+ }
+
+ @Test(timeout=10000)
+ public void testLocalShardNotFound() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init("shard-1", path);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardNotFound("shard-1"));
+
+ expectNoMsg(duration("1 seconds"));
+ }};
+ }
+
+ @Test(timeout=10000)
+ public void testLocalShardNotInitialized() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init("shard-1", path);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new NotInitializedException("not initialized"));
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+ expectNoMsg();
+ }
+ };
+ }};
+ }
+
+ @Test
+ public void testFailedRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorSystem mockActorSystem = mock(ActorSystem.class);
+
+ ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class),
+ "testFailedRegistration");
+ doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
+ ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(
+ MoreExecutors.sameThreadExecutor());
+
+
+ ActorContext actorContext = mock(ActorContext.class);
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+
+ doReturn(executor).when(actorContext).getClientDispatcher();
+ doReturn(mockActorSystem).when(actorContext).getActorSystem();
+
+ String shardName = "shard-1";
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+ doReturn(Futures.failed(new RuntimeException("mock"))).
+ when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
+ doReturn(mock(DatastoreContext.class)).when(actorContext).getDatastoreContext();
+
+ proxy.init("shard-1", path);
+
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
+ }
+
+ @Test
+ public void testCloseBeforeRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = mock(ActorContext.class);
+
+ String shardName = "shard-1";
+
+ doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
+ doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(Dispatchers.DEFAULT_DISPATCHER_PATH).when(actorContext).getNotificationDispatcherPath();
+ doReturn(getSystem().actorSelection(getRef().path())).
+ when(actorContext).actorSelection(getRef().path());
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+
+ final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
+ new DataTreeChangeListenerProxy<>(actorContext, mockListener);
+
+
+ Answer<Future<Object>> answer = new Answer<Future<Object>>() {
+ @Override
+ public Future<Object> answer(InvocationOnMock invocation) {
+ proxy.close();
+ return Futures.successful((Object)new RegisterDataTreeChangeListenerReply(getRef()));
+ }
+ };
+
+ doAnswer(answer).when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
+
+ proxy.init(shardName, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
+
+ expectMsgClass(duration("5 seconds"), CloseDataTreeChangeListenerRegistration.class);
+
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+public class DataTreeChangeListenerRegistrationActorTest extends AbstractActorTest {
+ private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+
+ static {
+ store.onGlobalContextUpdated(TestModel.createTestContext());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnReceiveCloseListenerRegistration() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ListenerRegistration mockListenerReg = Mockito.mock(ListenerRegistration.class);
+ final Props props = DataTreeChangeListenerRegistrationActor.props(mockListenerReg);
+ final ActorRef subject = getSystem().actorOf(props, "testCloseListenerRegistration");
+
+ subject.tell(CloseDataTreeChangeListenerRegistration.getInstance(), getRef());
+
+ expectMsgClass(duration("1 second"), CloseDataTreeChangeListenerRegistrationReply.class);
+
+ Mockito.verify(mockListenerReg).close();
+ }};
+ }
+}
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
}};
}
- @Test
- public void testTransactionWritesWithShardNotInitiallyReady() throws Exception{
+ private void testTransactionWritesWithShardNotInitiallyReady(final String testName,
+ final boolean writeOnly) throws Exception {
new IntegrationTestKit(getSystem()) {{
- String testName = "testTransactionWritesWithShardNotInitiallyReady";
String shardName = "test-1";
// Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
// Create the write Tx
- final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction() :
+ dataStore.newReadWriteTransaction();
assertNotNull("newReadWriteTransaction returned null", writeTx);
// Do some modification operations and ready the Tx on a separate thread.
}
@Test
- public void testTransactionReadsWithShardNotInitiallyReady() throws Exception{
+ public void testWriteOnlyTransactionWithShardNotInitiallyReady() throws Exception {
+ datastoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+ testTransactionWritesWithShardNotInitiallyReady("testWriteOnlyTransactionWithShardNotInitiallyReady", true);
+ }
+
+ @Test
+ public void testReadWriteTransactionWithShardNotInitiallyReady() throws Exception {
+ testTransactionWritesWithShardNotInitiallyReady("testReadWriteTransactionWithShardNotInitiallyReady", false);
+ }
+
+ @Test
+ public void testTransactionReadsWithShardNotInitiallyReady() throws Exception {
new IntegrationTestKit(getSystem()) {{
String testName = "testTransactionReadsWithShardNotInitiallyReady";
String shardName = "test-1";
}};
}
- @Test(expected=NoShardLeaderException.class)
- public void testTransactionCommitFailureWithNoShardLeader() throws Throwable{
+ private void testTransactionCommitFailureWithNoShardLeader(final boolean writeOnly) throws Throwable {
new IntegrationTestKit(getSystem()) {{
String testName = "testTransactionCommitFailureWithNoShardLeader";
- String shardName = "test-1";
+ String shardName = "default";
// We don't want the shard to become the leader so prevent shard election from completing
// by setting the election timeout, which is based on the heartbeat interval, really high.
datastoreContextBuilder.shardHeartbeatIntervalInMillis(30000);
+ datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
// Set the leader election timeout low for the test.
// Create the write Tx.
- final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction() :
+ dataStore.newReadWriteTransaction();
assertNotNull("newReadWriteTransaction returned null", writeTx);
// Do some modifications and ready the Tx on a separate thread.
@Override
public void run() {
try {
- writeTx.write(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ writeTx.write(TestModel.JUNK_PATH,
+ ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
txCohort.set(writeTx.ready());
} catch(Exception e) {
}};
}
+ @Test(expected=NoShardLeaderException.class)
+ public void testWriteOnlyTransactionCommitFailureWithNoShardLeader() throws Throwable {
+ datastoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+ testTransactionCommitFailureWithNoShardLeader(true);
+ }
+
+ @Test(expected=NoShardLeaderException.class)
+ public void testReadWriteTransactionCommitFailureWithNoShardLeader() throws Throwable {
+ testTransactionCommitFailureWithNoShardLeader(false);
+ }
+
@Test
public void testTransactionAbort() throws Exception{
System.setProperty("shard.persistent", "true");
}
void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
- Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
assertEquals("canCommit", true, canCommit);
cohort.preCommit().get(5, TimeUnit.SECONDS);
cohort.commit().get(5, TimeUnit.SECONDS);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import java.util.Arrays;
+import java.util.Collection;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
+
+ @Test
+ public void testOnDataChanged() throws Exception {
+ final Props props = Props.create(MessageCollectorActor.class);
+ final ActorRef actorRef = getSystem().actorOf(props);
+
+ ForwardingDataTreeChangeListener forwardingListener = new ForwardingDataTreeChangeListener(
+ getSystem().actorSelection(actorRef.path()));
+
+ Collection<DataTreeCandidate> expected = Arrays.asList(Mockito.mock(DataTreeCandidate.class));
+ forwardingListener.onDataTreeChanged(expected);
+
+ DataTreeChanged actual = MessageCollectorActor.expectFirstMatching(actorRef, DataTreeChanged.class);
+ Assert.assertSame(expected, actual.getChanges());
+ }
+}
OperationCompleter completer = new OperationCompleter(operationLimiter );
- completer.onComplete(null, new DataExistsReply(true));
+ completer.onComplete(null, DataExistsReply.create(true));
assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
- completer.onComplete(null, new DataExistsReply(true));
+ completer.onComplete(null, DataExistsReply.create(true));
assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
completer.onComplete(null, new IllegalArgumentException());
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
+import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListenerReply;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.RaftState;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
public class RoleChangeNotifierTest extends AbstractActorTest {
TestActorRef<RoleChangeNotifier> notifierTestActorRef = TestActorRef.create(
getSystem(), RoleChangeNotifier.getProps(memberId), memberId);
- RoleChangeNotifier roleChangeNotifier = notifierTestActorRef.underlyingActor();
-
notifierTestActorRef.tell(new RoleChanged(memberId, RaftState.Candidate.name(), RaftState.Leader.name()), shardActor);
// no notification should be sent as listener has not yet registered
}};
}
+
+ @Test
+ public void testHandleLeaderStateChanged() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ String actorId = "testHandleLeaderStateChanged";
+ TestActorRef<RoleChangeNotifier> notifierTestActorRef = TestActorRef.create(
+ getSystem(), RoleChangeNotifier.getProps(actorId), actorId);
+
+ notifierTestActorRef.tell(new LeaderStateChanged("member1", "leader1"), ActorRef.noSender());
+
+ // listener registers after the sate has been changed, ensure we sent the latest state change after a reply
+ notifierTestActorRef.tell(new RegisterRoleChangeListener(), getRef());
+
+ expectMsgClass(RegisterRoleChangeListenerReply.class);
+
+ LeaderStateChanged leaderStateChanged = expectMsgClass(LeaderStateChanged.class);
+ assertEquals("getMemberId", "member1", leaderStateChanged.getMemberId());
+ assertEquals("getLeaderId", "leader1", leaderStateChanged.getLeaderId());
+
+ notifierTestActorRef.tell(new LeaderStateChanged("member1", "leader2"), ActorRef.noSender());
+
+ leaderStateChanged = expectMsgClass(LeaderStateChanged.class);
+ assertEquals("getMemberId", "member1", leaderStateChanged.getMemberId());
+ assertEquals("getLeaderId", "leader2", leaderStateChanged.getLeaderId());
+ }};
+ }
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.AddressFromURIString;
import akka.actor.Props;
+import akka.cluster.Cluster;
+import akka.cluster.ClusterEvent;
+import akka.dispatch.Dispatchers;
import akka.japi.Creator;
import akka.pattern.Patterns;
import akka.persistence.RecoveryCompleted;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
+import com.typesafe.config.ConfigFactory;
import java.net.URI;
import java.util.Arrays;
import java.util.Collection;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
+import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
-import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
+import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
-import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
+import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
+import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.Await;
import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
public class ShardManagerTest extends AbstractActorTest {
private static int ID_COUNTER = 1;
@Mock
private static CountDownLatch ready;
- private static ActorRef mockShardActor;
+ private static TestActorRef<MessageCollectorActor> mockShardActor;
+
+ private final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder().
+ dataStoreType(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS);
+
+ private static ActorRef newMockShardActor(ActorSystem system, String shardName, String memberName) {
+ String name = new ShardIdentifier(shardName, memberName,"config").toString();
+ return TestActorRef.create(system, Props.create(MessageCollectorActor.class), name);
+ }
@Before
public void setUp() {
InMemoryJournal.clear();
if(mockShardActor == null) {
- String name = new ShardIdentifier(Shard.DEFAULT_NAME, "member-1","config").toString();
- mockShardActor = getSystem().actorOf(Props.create(DoNothingActor.class), name);
+ String name = new ShardIdentifier(Shard.DEFAULT_NAME, "member-1", "config").toString();
+ mockShardActor = TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), name);
}
+
+ mockShardActor.underlyingActor().clear();
}
@After
}
private Props newShardMgrProps() {
- DatastoreContext.Builder builder = DatastoreContext.newBuilder();
- builder.dataStoreType(shardMrgIDSuffix);
return ShardManager.props(new MockClusterWrapper(), new MockConfiguration(),
- builder.build(), ready);
+ datastoreContextBuilder.build(), ready);
+ }
+
+ private Props newPropsShardMgrWithMockShardActor() {
+ return newPropsShardMgrWithMockShardActor("shardManager", mockShardActor, new MockClusterWrapper(),
+ new MockConfiguration());
+ }
+
+ private Props newPropsShardMgrWithMockShardActor(final String name, final ActorRef shardActor,
+ final ClusterWrapper clusterWrapper, final Configuration config) {
+ Creator<ShardManager> creator = new Creator<ShardManager>() {
+ private static final long serialVersionUID = 1L;
+ @Override
+ public ShardManager create() throws Exception {
+ return new ForwardingShardManager(clusterWrapper, config, datastoreContextBuilder.build(),
+ ready, name, shardActor);
+ }
+ };
+
+ return Props.create(new DelegatingShardManagerCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId());
}
@Test
public void testOnReceiveFindPrimaryForNonExistentShard() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- shardManager.tell(new FindPrimary("non-existent", false).toSerializable(), getRef());
+ shardManager.tell(new FindPrimary("non-existent", false), getRef());
- expectMsgEquals(duration("5 seconds"),
- new PrimaryNotFound("non-existent").toSerializable());
+ expectMsgClass(duration("5 seconds"), PrimaryNotFoundException.class);
}};
}
@Test
- public void testOnReceiveFindPrimaryForExistentShard() throws Exception {
+ public void testOnReceiveFindPrimaryForLocalLeaderShard() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
+
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false).toSerializable(), getRef());
+ DataTree mockDataTree = mock(DataTree.class);
+ shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, Optional.of(mockDataTree)), getRef());
+
+ MessageCollectorActor.expectFirstMatching(mockShardActor, RegisterRoleChangeListener.class);
+ shardManager.tell((new RoleChangeNotification(memberId, RaftState.Candidate.name(),
+ RaftState.Leader.name())), mockShardActor);
- expectMsgClass(duration("5 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
+
+ LocalPrimaryShardFound primaryFound = expectMsgClass(duration("5 seconds"), LocalPrimaryShardFound.class);
+ assertTrue("Unexpected primary path " + primaryFound.getPrimaryPath(),
+ primaryFound.getPrimaryPath().contains("member-1-shard-default"));
+ assertSame("getLocalShardDataTree", mockDataTree, primaryFound.getLocalShardDataTree() );
}};
}
@Test
- public void testOnReceiveFindPrimaryForNotInitializedShard() throws Exception {
+ public void testOnReceiveFindPrimaryForNonLocalLeaderShardBeforeMemberUp() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
+ String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.tell(new RoleChangeNotification(memberId1,
+ RaftState.Candidate.name(), RaftState.Follower.name()), mockShardActor);
+ shardManager.tell(new LeaderStateChanged(memberId1, memberId2), mockShardActor);
- shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false).toSerializable(), getRef());
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
- expectMsgClass(duration("5 seconds"), ActorNotInitialized.class);
+ expectMsgClass(duration("5 seconds"), NoShardLeaderException.class);
}};
}
@Test
- public void testOnReceiveFindPrimaryWaitForShardInitialized() throws Exception {
+ public void testOnReceiveFindPrimaryForNonLocalLeaderShard() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
- // We're passing waitUntilInitialized = true to FindPrimary so the response should be
- // delayed until we send ActorInitialized.
- Future<Object> future = Patterns.ask(shardManager, new FindPrimary(Shard.DEFAULT_NAME, true),
- new Timeout(5, TimeUnit.SECONDS));
+ String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
+ MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
+
+ String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.tell(new RoleChangeNotification(memberId1,
+ RaftState.Candidate.name(), RaftState.Follower.name()), mockShardActor);
+ shardManager.tell(new ShardLeaderStateChanged(memberId1, memberId2, Optional.<DataTree>absent()), mockShardActor);
+
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
+ RemotePrimaryShardFound primaryFound = expectMsgClass(duration("5 seconds"), RemotePrimaryShardFound.class);
+ assertTrue("Unexpected primary path " + primaryFound.getPrimaryPath(),
+ primaryFound.getPrimaryPath().contains("member-2-shard-default"));
+ }};
+ }
+
+ @Test
+ public void testOnReceiveFindPrimaryForUninitializedShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
+
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
+
+ expectMsgClass(duration("5 seconds"), NotInitializedException.class);
+ }};
+ }
+
+ @Test
+ public void testOnReceiveFindPrimaryForInitializedShardWithNoRole() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
shardManager.tell(new ActorInitialized(), mockShardActor);
- Object resp = Await.result(future, duration("5 seconds"));
- assertTrue("Expected: PrimaryFound, Actual: " + resp, resp instanceof PrimaryFound);
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
+
+ expectMsgClass(duration("5 seconds"), NoShardLeaderException.class);
}};
}
@Test
- public void testOnReceiveFindLocalShardForNonExistentShard() throws Exception {
+ public void testOnReceiveFindPrimaryForFollowerShardWithNoInitialLeaderId() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindLocalShard("non-existent", false), getRef());
+ String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.tell(new RoleChangeNotification(memberId,
+ RaftState.Candidate.name(), RaftState.Follower.name()), mockShardActor);
- LocalShardNotFound notFound = expectMsgClass(duration("5 seconds"), LocalShardNotFound.class);
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
- assertEquals("getShardName", "non-existent", notFound.getShardName());
+ expectMsgClass(duration("5 seconds"), NoShardLeaderException.class);
+
+ DataTree mockDataTree = mock(DataTree.class);
+ shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, Optional.of(mockDataTree)), mockShardActor);
+
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), getRef());
+
+ LocalPrimaryShardFound primaryFound = expectMsgClass(duration("5 seconds"), LocalPrimaryShardFound.class);
+ assertTrue("Unexpected primary path " + primaryFound.getPrimaryPath(),
+ primaryFound.getPrimaryPath().contains("member-1-shard-default"));
+ assertSame("getLocalShardDataTree", mockDataTree, primaryFound.getLocalShardDataTree() );
}};
}
@Test
- public void testOnReceiveFindLocalShardForExistentShard() throws Exception {
+ public void testOnReceiveFindPrimaryWaitForShardLeader() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ // We're passing waitUntilInitialized = true to FindPrimary so the response should be
+ // delayed until we send ActorInitialized and RoleChangeNotification.
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), getRef());
+
+ expectNoMsg(FiniteDuration.create(150, TimeUnit.MILLISECONDS));
+
shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
+ expectNoMsg(FiniteDuration.create(150, TimeUnit.MILLISECONDS));
- LocalShardFound found = expectMsgClass(duration("5 seconds"), LocalShardFound.class);
+ String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.tell(new RoleChangeNotification(memberId,
+ RaftState.Candidate.name(), RaftState.Leader.name()), mockShardActor);
- assertTrue("Found path contains " + found.getPath().path().toString(),
- found.getPath().path().toString().contains("member-1-shard-default-config"));
+ expectNoMsg(FiniteDuration.create(150, TimeUnit.MILLISECONDS));
+
+ DataTree mockDataTree = mock(DataTree.class);
+ shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, Optional.of(mockDataTree)), mockShardActor);
+
+ LocalPrimaryShardFound primaryFound = expectMsgClass(duration("5 seconds"), LocalPrimaryShardFound.class);
+ assertTrue("Unexpected primary path " + primaryFound.getPrimaryPath(),
+ primaryFound.getPrimaryPath().contains("member-1-shard-default"));
+ assertSame("getLocalShardDataTree", mockDataTree, primaryFound.getLocalShardDataTree() );
+
+ expectNoMsg(FiniteDuration.create(200, TimeUnit.MILLISECONDS));
}};
}
@Test
- public void testOnReceiveFindLocalShardForNotInitializedShard() throws Exception {
+ public void testOnReceiveFindPrimaryWaitForReadyWithUninitializedShard() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
- shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), getRef());
+
+ expectMsgClass(duration("2 seconds"), NotInitializedException.class);
- expectMsgClass(duration("5 seconds"), ActorNotInitialized.class);
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ expectNoMsg(FiniteDuration.create(200, TimeUnit.MILLISECONDS));
}};
}
@Test
- public void testOnReceiveFindLocalShardWaitForShardInitialized() throws Exception {
+ public void testOnReceiveFindPrimaryWaitForReadyWithCandidateShard() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+ shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix,
+ null, RaftState.Candidate.name()), mockShardActor);
- // We're passing waitUntilInitialized = true to FindLocalShard so the response should be
- // delayed until we send ActorInitialized.
- Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
- new Timeout(5, TimeUnit.SECONDS));
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), getRef());
+
+ expectMsgClass(duration("2 seconds"), NoShardLeaderException.class);
+ }};
+ }
+
+ @Test
+ public void testOnReceiveFindPrimaryWaitForReadyWithNoRoleShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
shardManager.tell(new ActorInitialized(), mockShardActor);
- Object resp = Await.result(future, duration("5 seconds"));
- assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), getRef());
+
+ expectMsgClass(duration("2 seconds"), NoShardLeaderException.class);
}};
}
@Test
- public void testOnReceiveMemberUp() throws Exception {
- new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ public void testOnReceiveFindPrimaryForRemoteShard() throws Exception {
+ String shardManagerID = ShardManagerIdentifier.builder().type(shardMrgIDSuffix).build().toString();
- MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
+ // Create an ActorSystem ShardManager actor for member-1.
+
+ final ActorSystem system1 = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
+ Cluster.get(system1).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+
+ ActorRef mockShardActor1 = newMockShardActor(system1, Shard.DEFAULT_NAME, "member-1");
+
+ final TestActorRef<ForwardingShardManager> shardManager1 = TestActorRef.create(system1,
+ newPropsShardMgrWithMockShardActor("shardManager1", mockShardActor1, new ClusterWrapperImpl(system1),
+ new MockConfiguration()), shardManagerID);
+
+ // Create an ActorSystem ShardManager actor for member-2.
+
+ final ActorSystem system2 = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
+
+ Cluster.get(system2).join(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+
+ final ActorRef mockShardActor2 = newMockShardActor(system2, "astronauts", "member-2");
+
+ MockConfiguration mockConfig2 = new MockConfiguration(ImmutableMap.<String, List<String>>builder().
+ put("default", Arrays.asList("member-1", "member-2")).
+ put("astronauts", Arrays.asList("member-2")).build());
- shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
+ final TestActorRef<ForwardingShardManager> shardManager2 = TestActorRef.create(system2,
+ newPropsShardMgrWithMockShardActor("shardManager2", mockShardActor2, new ClusterWrapperImpl(system2),
+ mockConfig2), shardManagerID);
- PrimaryFound found = PrimaryFound.fromSerializable(expectMsgClass(duration("5 seconds"),
- PrimaryFound.SERIALIZABLE_CLASS));
+ new JavaTestKit(system1) {{
+
+ shardManager1.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager2.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ shardManager2.tell(new ActorInitialized(), mockShardActor2);
+
+ String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix;
+ shardManager2.tell(new ShardLeaderStateChanged(memberId2, memberId2,
+ Optional.of(mock(DataTree.class))), mockShardActor2);
+ shardManager2.tell(new RoleChangeNotification(memberId2,
+ RaftState.Candidate.name(), RaftState.Leader.name()), mockShardActor2);
+
+ shardManager1.underlyingActor().waitForMemberUp();
+
+ shardManager1.tell(new FindPrimary("astronauts", false), getRef());
+
+ RemotePrimaryShardFound found = expectMsgClass(duration("5 seconds"), RemotePrimaryShardFound.class);
String path = found.getPrimaryPath();
- assertTrue("Found path contains " + path, path.contains("member-2-shard-astronauts-config"));
+ assertTrue("Unexpected primary path " + path, path.contains("member-2-shard-astronauts-config"));
+
+ shardManager2.underlyingActor().verifyFindPrimary();
+
+ Cluster.get(system2).down(AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558"));
+
+ shardManager1.underlyingActor().waitForMemberRemoved();
+
+ shardManager1.tell(new FindPrimary("astronauts", false), getRef());
+
+ expectMsgClass(duration("5 seconds"), PrimaryNotFoundException.class);
}};
+
+ JavaTestKit.shutdownActorSystem(system1);
+ JavaTestKit.shutdownActorSystem(system2);
}
@Test
- public void testOnReceiveMemberDown() throws Exception {
+ public void testOnReceiveFindLocalShardForNonExistentShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ shardManager.tell(new FindLocalShard("non-existent", false), getRef());
+
+ LocalShardNotFound notFound = expectMsgClass(duration("5 seconds"), LocalShardNotFound.class);
+ assertEquals("getShardName", "non-existent", notFound.getShardName());
+ }};
+ }
+
+ @Test
+ public void testOnReceiveFindLocalShardForExistentShard() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
- MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
+ shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
- expectMsgClass(duration("5 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+ LocalShardFound found = expectMsgClass(duration("5 seconds"), LocalShardFound.class);
+
+ assertTrue("Found path contains " + found.getPath().path().toString(),
+ found.getPath().path().toString().contains("member-1-shard-default-config"));
+ }};
+ }
- MockClusterWrapper.sendMemberRemoved(shardManager, "member-2", getRef().path().toString());
+ @Test
+ public void testOnReceiveFindLocalShardForNotInitializedShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
- shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
+ shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
- expectMsgClass(duration("5 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+ expectMsgClass(duration("5 seconds"), NotInitializedException.class);
+ }};
+ }
+
+ @Test
+ public void testOnReceiveFindLocalShardWaitForShardInitialized() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ // We're passing waitUntilInitialized = true to FindLocalShard so the response should be
+ // delayed until we send ActorInitialized.
+ Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
+ new Timeout(5, TimeUnit.SECONDS));
+
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ Object resp = Await.result(future, duration("5 seconds"));
+ assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
}};
}
}
@Test
- public void testRoleChangeNotificationReleaseReady() throws Exception {
+ public void testRoleChangeNotificationAndShardLeaderStateChangedReleaseReady() throws Exception {
new JavaTestKit(getSystem()) {
{
- final Props persistentProps = ShardManager.props(
- new MockClusterWrapper(),
- new MockConfiguration(),
- DatastoreContext.newBuilder().persistent(true).build(), ready);
- final TestActorRef<ShardManager> shardManager =
- TestActorRef.create(getSystem(), persistentProps);
+ TestActorRef<ShardManager> shardManager = TestActorRef.create(getSystem(), newShardMgrProps());
+
+ String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.underlyingActor().onReceiveCommand(new RoleChangeNotification(
+ memberId, RaftState.Candidate.name(), RaftState.Leader.name()));
+
+ verify(ready, never()).countDown();
- shardManager.underlyingActor().onReceiveCommand(new RoleChangeNotification("member-1-shard-default-unknown", RaftState.Candidate.name(), RaftState.Leader.name()));
+ shardManager.underlyingActor().onReceiveCommand(new ShardLeaderStateChanged(memberId, memberId,
+ Optional.of(mock(DataTree.class))));
+
+ verify(ready, times(1)).countDown();
+
+ }};
+ }
+
+ @Test
+ public void testRoleChangeNotificationToFollowerWithShardLeaderStateChangedReleaseReady() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ TestActorRef<ShardManager> shardManager = TestActorRef.create(getSystem(), newShardMgrProps());
+
+ String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.underlyingActor().onReceiveCommand(new RoleChangeNotification(
+ memberId, null, RaftState.Follower.name()));
+
+ verify(ready, never()).countDown();
+
+ shardManager.underlyingActor().onReceiveCommand(MockClusterWrapper.createMemberUp("member-2", getRef().path().toString()));
+
+ shardManager.underlyingActor().onReceiveCommand(new ShardLeaderStateChanged(memberId,
+ "member-2-shard-default-" + shardMrgIDSuffix, Optional.of(mock(DataTree.class))));
+
+ verify(ready, times(1)).countDown();
+
+ }};
+ }
+
+ @Test
+ public void testReadyCountDownForMemberUpAfterLeaderStateChanged() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ TestActorRef<ShardManager> shardManager = TestActorRef.create(getSystem(), newShardMgrProps());
+
+ String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
+ shardManager.underlyingActor().onReceiveCommand(new RoleChangeNotification(
+ memberId, null, RaftState.Follower.name()));
+
+ verify(ready, never()).countDown();
+
+ shardManager.underlyingActor().onReceiveCommand(new ShardLeaderStateChanged(memberId,
+ "member-2-shard-default-" + shardMrgIDSuffix, Optional.of(mock(DataTree.class))));
+
+ shardManager.underlyingActor().onReceiveCommand(MockClusterWrapper.createMemberUp("member-2", getRef().path().toString()));
verify(ready, times(1)).countDown();
public void testRoleChangeNotificationDoNothingForUnknownShard() throws Exception {
new JavaTestKit(getSystem()) {
{
- final Props persistentProps = ShardManager.props(
- new MockClusterWrapper(),
- new MockConfiguration(),
- DatastoreContext.newBuilder().persistent(true).build(), ready);
- final TestActorRef<ShardManager> shardManager =
- TestActorRef.create(getSystem(), persistentProps);
+ TestActorRef<ShardManager> shardManager = TestActorRef.create(getSystem(), newShardMgrProps());
- shardManager.underlyingActor().onReceiveCommand(new RoleChangeNotification("unknown", RaftState.Candidate.name(), RaftState.Leader.name()));
+ shardManager.underlyingActor().onReceiveCommand(new RoleChangeNotification(
+ "unknown", RaftState.Candidate.name(), RaftState.Leader.name()));
verify(ready, never()).countDown();
private static class DelegatingShardManagerCreator implements Creator<ShardManager> {
private static final long serialVersionUID = 1L;
- private Creator<ShardManager> delegate;
+ private final Creator<ShardManager> delegate;
public DelegatingShardManagerCreator(Creator<ShardManager> delegate) {
this.delegate = delegate;
return delegate.create();
}
}
+
+ private static class ForwardingShardManager extends ShardManager {
+ private CountDownLatch findPrimaryMessageReceived = new CountDownLatch(1);
+ private CountDownLatch memberUpReceived = new CountDownLatch(1);
+ private CountDownLatch memberRemovedReceived = new CountDownLatch(1);
+ private final ActorRef shardActor;
+ private final String name;
+
+ protected ForwardingShardManager(ClusterWrapper cluster, Configuration configuration,
+ DatastoreContext datastoreContext, CountDownLatch waitTillReadyCountdownLatch, String name,
+ ActorRef shardActor) {
+ super(cluster, configuration, datastoreContext, waitTillReadyCountdownLatch);
+ this.shardActor = shardActor;
+ this.name = name;
+ }
+
+ @Override
+ public void handleCommand(Object message) throws Exception {
+ try{
+ super.handleCommand(message);
+ } finally {
+ if(message instanceof FindPrimary) {
+ findPrimaryMessageReceived.countDown();
+ } else if(message instanceof ClusterEvent.MemberUp) {
+ String role = ((ClusterEvent.MemberUp)message).member().roles().head();
+ if(!getCluster().getCurrentMemberName().equals(role)) {
+ memberUpReceived.countDown();
+ }
+ } else if(message instanceof ClusterEvent.MemberRemoved) {
+ String role = ((ClusterEvent.MemberRemoved)message).member().roles().head();
+ if(!getCluster().getCurrentMemberName().equals(role)) {
+ memberRemovedReceived.countDown();
+ }
+ }
+ }
+ }
+
+ @Override
+ public String persistenceId() {
+ return name;
+ }
+
+ @Override
+ protected ActorRef newShardActor(SchemaContext schemaContext, ShardInformation info) {
+ return shardActor;
+ }
+
+ void waitForMemberUp() {
+ assertEquals("MemberUp received", true,
+ Uninterruptibles.awaitUninterruptibly(memberUpReceived, 5, TimeUnit.SECONDS));
+ memberUpReceived = new CountDownLatch(1);
+ }
+
+ void waitForMemberRemoved() {
+ assertEquals("MemberRemoved received", true,
+ Uninterruptibles.awaitUninterruptibly(memberRemovedReceived, 5, TimeUnit.SECONDS));
+ memberRemovedReceived = new CountDownLatch(1);
+ }
+
+ void verifyFindPrimary() {
+ assertEquals("FindPrimary received", true,
+ Uninterruptibles.awaitUninterruptibly(findPrimaryMessageReceived, 5, TimeUnit.SECONDS));
+ findPrimaryMessageReceived = new CountDownLatch(1);
+ }
+ }
}
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.dispatch.Dispatchers;
import akka.dispatch.OnComplete;
import akka.japi.Creator;
-import akka.japi.Procedure;
import akka.pattern.Patterns;
-import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.SaveSnapshotSuccess;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
-import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Test;
import org.mockito.InOrder;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
+import org.opendaylight.controller.cluster.datastore.messages.ReadData;
+import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
+import org.opendaylight.controller.cluster.datastore.utils.MockDataTreeChangeListener;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListenerReply;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.cluster.raft.Snapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
+import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
public class ShardTest extends AbstractShardTest {
+ private static final QName CARS_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test:cars", "2014-03-13", "cars");
+
@Test
public void testRegisterChangeListener() throws Exception {
new ShardTestKit(getSystem()) {{
"testRegisterChangeListener-DataChangeListener");
shard.tell(new RegisterChangeListener(TestModel.TEST_PATH,
- dclActor.path(), AsyncDataBroker.DataChangeScope.BASE), getRef());
+ dclActor, AsyncDataBroker.DataChangeScope.BASE), getRef());
RegisterChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
RegisterChangeListenerReply.class);
@Override
public Shard create() throws Exception {
- return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
- newDatastoreContext(), SCHEMA_CONTEXT) {
+ // Use a non persistent provider because this test actually invokes persist on the journal
+ // this will cause all other messages to not be queued properly after that.
+ // The basic issue is that you cannot use TestActorRef with a persistent actor (at least when
+ // it does do a persist)
+ return new Shard(shardID, Collections.<String,String>emptyMap(),
+ dataStoreContextBuilder.persistent(false).build(), SCHEMA_CONTEXT) {
@Override
public void onReceiveCommand(final Object message) throws Exception {
if(message instanceof ElectionTimeout && firstElectionTimeout) {
onFirstElectionTimeout.await(5, TimeUnit.SECONDS));
// Now send the RegisterChangeListener and wait for the reply.
- shard.tell(new RegisterChangeListener(path, dclActor.path(),
+ shard.tell(new RegisterChangeListener(path, dclActor,
AsyncDataBroker.DataChangeScope.SUBTREE), getRef());
RegisterChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
}};
}
+ @Test
+ public void testRegisterDataTreeChangeListener() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps(), "testRegisterDataTreeChangeListener");
+
+ waitUntilLeader(shard);
+
+ shard.tell(new UpdateSchemaContext(SchemaContextHelper.full()), ActorRef.noSender());
+
+ MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataTreeChangeListenerActor.props(listener),
+ "testRegisterDataTreeChangeListener-DataTreeChangeListener");
+
+ shard.tell(new RegisterDataTreeChangeListener(TestModel.TEST_PATH, dclActor), getRef());
+
+ RegisterDataTreeChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
+ RegisterDataTreeChangeListenerReply.class);
+ String replyPath = reply.getListenerRegistrationPath().toString();
+ assertTrue("Incorrect reply path: " + replyPath, replyPath.matches(
+ "akka:\\/\\/test\\/user\\/testRegisterDataTreeChangeListener\\/\\$.*"));
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ listener.waitForChangeEvents();
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @SuppressWarnings("serial")
+ @Test
+ public void testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ final CountDownLatch onFirstElectionTimeout = new CountDownLatch(1);
+ final CountDownLatch onChangeListenerRegistered = new CountDownLatch(1);
+ Creator<Shard> creator = new Creator<Shard>() {
+ boolean firstElectionTimeout = true;
+
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<String,String>emptyMap(),
+ dataStoreContextBuilder.persistent(false).build(), SCHEMA_CONTEXT) {
+ @Override
+ public void onReceiveCommand(final Object message) throws Exception {
+ if(message instanceof ElectionTimeout && firstElectionTimeout) {
+ firstElectionTimeout = false;
+ final ActorRef self = getSelf();
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.awaitUninterruptibly(
+ onChangeListenerRegistered, 5, TimeUnit.SECONDS);
+ self.tell(message, self);
+ }
+ }.start();
+
+ onFirstElectionTimeout.countDown();
+ } else {
+ super.onReceiveCommand(message);
+ }
+ }
+ };
+ }
+ };
+
+ MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataTreeChangeListenerActor.props(listener),
+ "testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration-DataChangeListener");
+
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)),
+ "testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration");
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ assertEquals("Got first ElectionTimeout", true,
+ onFirstElectionTimeout.await(5, TimeUnit.SECONDS));
+
+ shard.tell(new RegisterDataTreeChangeListener(path, dclActor), getRef());
+ RegisterDataTreeChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterDataTreeChangeListenerReply.class);
+ assertNotNull("getListenerRegistratioznPath", reply.getListenerRegistrationPath());
+
+ shard.tell(new FindLeader(), getRef());
+ FindLeaderReply findLeadeReply =
+ expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+ assertNull("Expected the shard not to be the leader", findLeadeReply.getLeaderActor());
+
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ onChangeListenerRegistered.countDown();
+
+ // TODO: investigate why we do not receive data chage events
+ listener.waitForChangeEvents();
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
@Test
public void testCreateTransaction(){
new ShardTestKit(getSystem()) {{
final CountDownLatch recoveryComplete = new CountDownLatch(1);
class TestShard extends Shard {
TestShard() {
- super(shardID, Collections.<ShardIdentifier, String>singletonMap(shardID, null),
+ super(shardID, Collections.<String, String>singletonMap(shardID.toString(), null),
newDatastoreContext(), SCHEMA_CONTEXT);
}
Uninterruptibles.awaitUninterruptibly(recoveryComplete, 5, TimeUnit.SECONDS));
String address = "akka://foobar";
- shard.underlyingActor().onReceiveCommand(new PeerAddressResolved(shardID, address));
+ shard.underlyingActor().onReceiveCommand(new PeerAddressResolved(shardID.toString(), address));
assertEquals("getPeerAddresses", address,
((TestShard)shard.underlyingActor()).getPeerAddresses().get(shardID.toString()));
TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(),
"testApplySnapshot");
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- store.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree store = InMemoryDataTreeFactory.getInstance().create();
+ store.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(store, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
}
@Test
- public void testRecovery() throws Exception {
+ public void testApplyStateWithCandidatePayload() throws Exception {
- // Set up the InMemorySnapshotStore.
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testApplyState");
+
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ DataTreeCandidate candidate = DataTreeCandidates.fromNormalizedNode(TestModel.TEST_PATH, node);
+
+ ApplyState applyState = new ApplyState(null, "test", new ReplicatedLogImplEntry(1, 2,
+ DataTreeCandidatePayload.create(candidate)));
+
+ shard.underlyingActor().onReceiveCommand(applyState);
+
+ NormalizedNode<?,?> actual = readStore(shard, TestModel.TEST_PATH);
+ assertEquals("Applied state", node, actual);
- InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
- testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
+
+ DataTree setupInMemorySnapshotStore() throws DataValidationFailedException {
+ DataTree testStore = InMemoryDataTreeFactory.getInstance().create();
+ testStore.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
SerializationUtils.serializeNormalizedNode(root),
Collections.<ReplicatedLogEntry>emptyList(), 0, 1, -1, -1));
+ return testStore;
+ }
+
+ private static DataTreeCandidatePayload payloadForModification(DataTree source, DataTreeModification mod) throws DataValidationFailedException {
+ source.validate(mod);
+ final DataTreeCandidate candidate = source.prepare(mod);
+ source.commit(candidate);
+ return DataTreeCandidatePayload.create(candidate);
+ }
+
+ @Test
+ public void testDataTreeCandidateRecovery() throws Exception {
+ // Set up the InMemorySnapshotStore.
+ final DataTree source = setupInMemorySnapshotStore();
+
+ final DataTreeModification writeMod = source.takeSnapshot().newModification();
+ writeMod.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
+
+ // Set up the InMemoryJournal.
+ InMemoryJournal.addEntry(shardID.toString(), 0, new ReplicatedLogImplEntry(0, 1, payloadForModification(source, writeMod)));
+
+ int nListEntries = 16;
+ Set<Integer> listEntryKeys = new HashSet<>();
+
+ // Add some ModificationPayload entries
+ for (int i = 1; i <= nListEntries; i++) {
+ listEntryKeys.add(Integer.valueOf(i));
+
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
+
+ final DataTreeModification mod = source.takeSnapshot().newModification();
+ mod.merge(path, ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i));
+
+ InMemoryJournal.addEntry(shardID.toString(), i, new ReplicatedLogImplEntry(i, 1,
+ payloadForModification(source, mod)));
+ }
+
+ InMemoryJournal.addEntry(shardID.toString(), nListEntries + 1,
+ new ApplyJournalEntries(nListEntries));
+
+ testRecovery(listEntryKeys);
+ }
+
+ @Test
+ public void testModicationRecovery() throws Exception {
+
+ // Set up the InMemorySnapshotStore.
+ setupInMemorySnapshotStore();
// Set up the InMemoryJournal.
testRecovery(listEntryKeys);
}
- private ModificationPayload newModificationPayload(final Modification... mods) throws IOException {
+ private static ModificationPayload newModificationPayload(final Modification... mods) throws IOException {
MutableCompositeModification compMod = new MutableCompositeModification();
for(Modification mod: mods) {
compMod.addModification(mod);
return new ModificationPayload(compMod);
}
- @SuppressWarnings({ "unchecked" })
@Test
public void testConcurrentThreePhaseCommits() throws Throwable {
new ShardTestKit(getSystem()) {{
waitUntilLeader(shard);
- // Setup 3 simulated transactions with mock cohorts backed by real cohorts.
+ // Setup 3 simulated transactions with mock cohorts backed by real cohorts.
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
modification2);
String transactionID3 = "tx3";
MutableCompositeModification modification3 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ cohort1, modification1, true, false), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
+ expectMsgClass(duration, ReadyTransactionReply.class));
assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the CanCommitTransaction message for the first Tx.
// Send the ForwardedReadyTransaction for the next 2 Tx's.
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
- cohort3, modification3, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort3, modification3, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
// processed after the first Tx completes.
CanCommitTransactionReply.fromSerializable(resp);
assertEquals("Can commit", true, canCommitReply.getCanCommit());
- Future<Object> commitFuture = Patterns.ask(shard,
- new CommitTransaction(transactionID).toSerializable(), timeout);
- commitFuture.onComplete(new OnCommitFutureComplete(), getSystem().dispatcher());
- }
- }
+ Future<Object> commitFuture = Patterns.ask(shard,
+ new CommitTransaction(transactionID).toSerializable(), timeout);
+ commitFuture.onComplete(new OnCommitFutureComplete(), getSystem().dispatcher());
+ }
+ }
+
+ canCommitFuture1.onComplete(new OnCanCommitFutureComplete(transactionID2),
+ getSystem().dispatcher());
+
+ canCommitFuture2.onComplete(new OnCanCommitFutureComplete(transactionID3),
+ getSystem().dispatcher());
+
+ boolean done = commitLatch.await(timeoutSec, TimeUnit.SECONDS);
+
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Commits complete", true, done);
+
+ InOrder inOrder = inOrder(cohort1, cohort2, cohort3);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort1).commit();
+ inOrder.verify(cohort2).canCommit();
+ inOrder.verify(cohort2).preCommit();
+ inOrder.verify(cohort2).commit();
+ inOrder.verify(cohort3).canCommit();
+ inOrder.verify(cohort3).preCommit();
+ inOrder.verify(cohort3).commit();
+
+ // Verify data in the data store.
+
+ verifyOuterListEntry(shard, 1);
+
+ verifyLastApplied(shard, 2);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ private static BatchedModifications newBatchedModifications(String transactionID, YangInstanceIdentifier path,
+ NormalizedNode<?, ?> data, boolean ready, boolean doCommitOnReady) {
+ return newBatchedModifications(transactionID, null, path, data, ready, doCommitOnReady);
+ }
+
+ private static BatchedModifications newBatchedModifications(String transactionID, String transactionChainID,
+ YangInstanceIdentifier path, NormalizedNode<?, ?> data, boolean ready, boolean doCommitOnReady) {
+ BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION, transactionChainID);
+ batched.addModification(new WriteModification(path, data));
+ batched.setReady(ready);
+ batched.setDoCommitOnReady(doCommitOnReady);
+ return batched;
+ }
+
+ @Test
+ public void testBatchedModificationsWithNoCommitOnReady() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testBatchedModificationsWithNoCommitOnReady");
+
+ waitUntilLeader(shard);
+
+ final String transactionID = "tx";
+ FiniteDuration duration = duration("5 seconds");
+
+ final AtomicReference<ShardDataTreeCohort> mockCohort = new AtomicReference<>();
+ ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
+ @Override
+ public ShardDataTreeCohort decorate(String txID, ShardDataTreeCohort actual) {
+ if(mockCohort.get() == null) {
+ mockCohort.set(createDelegatingMockCohort("cohort", actual));
+ }
+
+ return mockCohort.get();
+ }
+ };
+
+ shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
+
+ // Send a BatchedModifications to start a transaction.
+
+ shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false, false), getRef());
+ expectMsgClass(duration, BatchedModificationsReply.class);
+
+ // Send a couple more BatchedModifications.
+
+ shard.tell(newBatchedModifications(transactionID, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), false, false), getRef());
+ expectMsgClass(duration, BatchedModificationsReply.class);
+
+ shard.tell(newBatchedModifications(transactionID, YangInstanceIdentifier.builder(
+ TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ InOrder inOrder = inOrder(mockCohort.get());
+ inOrder.verify(mockCohort.get()).canCommit();
+ inOrder.verify(mockCohort.get()).preCommit();
+ inOrder.verify(mockCohort.get()).commit();
+
+ // Verify data in the data store.
+
+ verifyOuterListEntry(shard, 1);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testBatchedModificationsWithCommitOnReady() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testBatchedModificationsWithCommitOnReady");
+
+ waitUntilLeader(shard);
+
+ final String transactionID = "tx";
+ FiniteDuration duration = duration("5 seconds");
+
+ final AtomicReference<ShardDataTreeCohort> mockCohort = new AtomicReference<>();
+ ShardCommitCoordinator.CohortDecorator cohortDecorator = new ShardCommitCoordinator.CohortDecorator() {
+ @Override
+ public ShardDataTreeCohort decorate(String txID, ShardDataTreeCohort actual) {
+ if(mockCohort.get() == null) {
+ mockCohort.set(createDelegatingMockCohort("cohort", actual));
+ }
+
+ return mockCohort.get();
+ }
+ };
+
+ shard.underlyingActor().getCommitCoordinator().setCohortDecorator(cohortDecorator);
+
+ // Send a BatchedModifications to start a transaction.
+
+ shard.tell(newBatchedModifications(transactionID, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false, false), getRef());
+ expectMsgClass(duration, BatchedModificationsReply.class);
+
+ // Send a couple more BatchedModifications.
+
+ shard.tell(newBatchedModifications(transactionID, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), false, false), getRef());
+ expectMsgClass(duration, BatchedModificationsReply.class);
+
+ shard.tell(newBatchedModifications(transactionID, YangInstanceIdentifier.builder(
+ TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), true, true), getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ InOrder inOrder = inOrder(mockCohort.get());
+ inOrder.verify(mockCohort.get()).canCommit();
+ inOrder.verify(mockCohort.get()).preCommit();
+ inOrder.verify(mockCohort.get()).commit();
+
+ // Verify data in the data store.
+
+ verifyOuterListEntry(shard, 1);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void verifyOuterListEntry(final TestActorRef<Shard> shard, Object expIDValue) throws Exception {
+ NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+ assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
+ outerList.getValue() instanceof Iterable);
+ Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
+ entry instanceof MapEntryNode);
+ MapEntryNode mapEntry = (MapEntryNode)entry;
+ Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
+ mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
+ assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.get().getValue());
+ }
+
+ @Test
+ public void testBatchedModificationsOnTransactionChain() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testBatchedModificationsOnTransactionChain");
+
+ waitUntilLeader(shard);
+
+ String transactionChainID = "txChain";
+ String transactionID1 = "tx1";
+ String transactionID2 = "tx2";
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Send a BatchedModifications to start a chained write transaction and ready it.
+
+ ContainerNode containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ shard.tell(newBatchedModifications(transactionID1, transactionChainID, path,
+ containerNode, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Create a read Tx on the same chain.
+
+ shard.tell(new CreateTransaction(transactionID2, TransactionProxy.TransactionType.READ_ONLY.ordinal() ,
+ transactionChainID).toSerializable(), getRef());
+
+ CreateTransactionReply createReply = expectMsgClass(duration("3 seconds"), CreateTransactionReply.class);
+
+ getSystem().actorSelection(createReply.getTransactionActorPath()).tell(new ReadData(path), getRef());
+ ReadDataReply readReply = expectMsgClass(duration("3 seconds"), ReadDataReply.class);
+ assertEquals("Read node", containerNode, readReply.getNormalizedNode());
+
+ // Commit the write transaction.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // Verify data in the data store.
+
+ NormalizedNode<?, ?> actualNode = readStore(shard, path);
+ assertEquals("Stored node", containerNode, actualNode);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testOnBatchedModificationsWhenNotLeader() {
+ final AtomicBoolean overrideLeaderCalls = new AtomicBoolean();
+ new ShardTestKit(getSystem()) {{
+ Creator<Shard> creator = new Creator<Shard>() {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<String,String>emptyMap(),
+ newDatastoreContext(), SCHEMA_CONTEXT) {
+ @Override
+ protected boolean isLeader() {
+ return overrideLeaderCalls.get() ? false : super.isLeader();
+ }
+
+ @Override
+ protected ActorSelection getLeader() {
+ return overrideLeaderCalls.get() ? getSystem().actorSelection(getRef().path()) :
+ super.getLeader();
+ }
+ };
+ }
+ };
+
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)), "testOnBatchedModificationsWhenNotLeader");
+
+ waitUntilLeader(shard);
+
+ overrideLeaderCalls.set(true);
+
+ BatchedModifications batched = new BatchedModifications("tx", DataStoreVersions.CURRENT_VERSION, "");
+
+ shard.tell(batched, ActorRef.noSender());
+
+ expectMsgEquals(batched);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testForwardedReadyTransactionWithImmediateCommit() throws Exception{
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testForwardedReadyTransactionWithImmediateCommit");
+
+ waitUntilLeader(shard);
+
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ ShardDataTreeCohort cohort = setupMockWriteTransaction("cohort", dataStore,
+ TestModel.TEST_PATH, containerNode, modification);
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, ThreePhaseCommitCohortMessages.CommitTransactionReply.class);
+
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
+
+ NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+ assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testCommitWithPersistenceDisabled() throws Throwable {
+ dataStoreContextBuilder.persistent(false);
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitWithPersistenceDisabled");
+
+ waitUntilLeader(shard);
+
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
+
+ // Setup a simulated transactions with a mock cohort.
+
+ String transactionID = "tx";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ ShardDataTreeCohort cohort = setupMockWriteTransaction("cohort", dataStore,
+ TestModel.TEST_PATH, containerNode, modification);
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
+
+ NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+ assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ private static DataTreeCandidateTip mockCandidate(final String name) {
+ DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
+ DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
+ doReturn(ModificationType.WRITE).when(mockCandidateNode).getModificationType();
+ doReturn(Optional.of(ImmutableNodes.containerNode(CARS_QNAME))).when(mockCandidateNode).getDataAfter();
+ doReturn(YangInstanceIdentifier.builder().build()).when(mockCandidate).getRootPath();
+ doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
+ return mockCandidate;
+ }
+
+ private static DataTreeCandidateTip mockUnmodifiedCandidate(final String name) {
+ DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
+ DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
+ doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).getModificationType();
+ doReturn(YangInstanceIdentifier.builder().build()).when(mockCandidate).getRootPath();
+ doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
+ return mockCandidate;
+ }
+
+ @Test
+ public void testCommitWhenTransactionHasNoModifications(){
+ // Note that persistence is enabled which would normally result in the entry getting written to the journal
+ // but here that need not happen
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitWhenTransactionHasNoModifications");
+
+ waitUntilLeader(shard);
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).preCommit();
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).commit();
+ doReturn(mockUnmodifiedCandidate("cohort1-candidate")).when(cohort).getCandidate();
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
+ expectMsgClass(duration, ThreePhaseCommitCohortMessages.CommitTransactionReply.class);
+
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
+
+ // Use MBean for verification
+ // Committed transaction count should increase as usual
+ assertEquals(1,shard.underlyingActor().getShardMBean().getCommittedTransactionsCount());
+
+ // Commit index should not advance because this does not go into the journal
+ assertEquals(-1, shard.underlyingActor().getShardMBean().getCommitIndex());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+
+ }
+ };
+ }
+
+ @Test
+ public void testCommitWhenTransactionHasModifications(){
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitWhenTransactionHasModifications");
+
+ waitUntilLeader(shard);
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ modification.addModification(new DeleteModification(YangInstanceIdentifier.builder().build()));
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).preCommit();
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).commit();
+ doReturn(mockCandidate("cohort1-candidate")).when(cohort).getCandidate();
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
- canCommitFuture1.onComplete(new OnCanCommitFutureComplete(transactionID2),
- getSystem().dispatcher());
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- canCommitFuture2.onComplete(new OnCanCommitFutureComplete(transactionID3),
- getSystem().dispatcher());
+ // Send the CanCommitTransaction message.
- boolean done = commitLatch.await(timeoutSec, TimeUnit.SECONDS);
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
- if(caughtEx.get() != null) {
- throw caughtEx.get();
- }
+ shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
+ expectMsgClass(duration, ThreePhaseCommitCohortMessages.CommitTransactionReply.class);
- assertEquals("Commits complete", true, done);
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+ inOrder.verify(cohort).commit();
- InOrder inOrder = inOrder(cohort1, cohort2, cohort3);
- inOrder.verify(cohort1).canCommit();
- inOrder.verify(cohort1).preCommit();
- inOrder.verify(cohort1).commit();
- inOrder.verify(cohort2).canCommit();
- inOrder.verify(cohort2).preCommit();
- inOrder.verify(cohort2).commit();
- inOrder.verify(cohort3).canCommit();
- inOrder.verify(cohort3).preCommit();
- inOrder.verify(cohort3).commit();
+ // Use MBean for verification
+ // Committed transaction count should increase as usual
+ assertEquals(1, shard.underlyingActor().getShardMBean().getCommittedTransactionsCount());
- // Verify data in the data store.
+ // Commit index should advance as we do not have an empty modification
+ assertEquals(0, shard.underlyingActor().getShardMBean().getCommitIndex());
- NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
- assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
- assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
- outerList.getValue() instanceof Iterable);
- Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
- assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
- entry instanceof MapEntryNode);
- MapEntryNode mapEntry = (MapEntryNode)entry;
- Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
- mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
- assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
- assertEquals(TestModel.ID_QNAME.getLocalName() + " value", 1, idLeaf.get().getValue());
-
- verifyLastLogIndex(shard, 2);
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
- shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
- }};
+ }
+ };
}
@Test
- public void testCommitWithPersistenceDisabled() throws Throwable {
- dataStoreContextBuilder.persistent(false);
+ public void testCommitPhaseFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
waitUntilLeader(shard);
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ // Setup 2 simulated transactions with mock cohorts. The first one fails in the
+ // commit phase.
- // Setup a simulated transactions with a mock cohort.
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ ShardDataTreeCohort cohort1 = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort1).preCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).commit();
+ doReturn(mockCandidate("cohort1-candidate")).when(cohort1).getCandidate();
- String transactionID = "tx";
- MutableCompositeModification modification = new MutableCompositeModification();
- NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort", dataStore,
- TestModel.TEST_PATH, containerNode, modification);
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ ShardDataTreeCohort cohort2 = mock(ShardDataTreeCohort.class, "cohort2");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- // Send the CanCommitTransaction message.
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
- shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Can commit", true, canCommitReply.getCanCommit());
- // Send the CanCommitTransaction message.
+ // Send the CanCommitTransaction message for the 2nd Tx. This should get queued and
+ // processed after the first Tx completes.
- shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
- expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+ Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2).toSerializable(), timeout);
- InOrder inOrder = inOrder(cohort);
- inOrder.verify(cohort).canCommit();
- inOrder.verify(cohort).preCommit();
- inOrder.verify(cohort).commit();
+ // Send the CommitTransaction message for the first Tx. This should send back an error
+ // and trigger the 2nd Tx to proceed.
- NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
- assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ canCommitFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable t, final Object resp) {
+ latch.countDown();
+ }
+ }, getSystem().dispatcher());
+
+ assertEquals("2nd CanCommit complete", true, latch.await(5, TimeUnit.SECONDS));
+
+ InOrder inOrder = inOrder(cohort1, cohort2);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort1).commit();
+ inOrder.verify(cohort2).canCommit();
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
- public void testCommitPhaseFailure() throws Throwable {
+ public void testPreCommitPhaseFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
- "testCommitPhaseFailure");
+ "testPreCommitPhaseFailure");
waitUntilLeader(shard);
- // Setup 2 simulated transactions with mock cohorts. The first one fails in the
- // commit phase.
-
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort1 = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
- doReturn(Futures.immediateFuture(null)).when(cohort1).preCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).commit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).preCommit();
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ ShardDataTreeCohort cohort2 = mock(ShardDataTreeCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort1, modification1, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
InOrder inOrder = inOrder(cohort1, cohort2);
inOrder.verify(cohort1).canCommit();
inOrder.verify(cohort1).preCommit();
- inOrder.verify(cohort1).commit();
inOrder.verify(cohort2).canCommit();
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
@Test
- public void testPreCommitPhaseFailure() throws Throwable {
+ public void testCanCommitPhaseFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
- "testPreCommitPhaseFailure");
+ "testCanCommitPhaseFailure");
waitUntilLeader(shard);
- String transactionID = "tx1";
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID1 = "tx1";
MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).preCommit();
- FiniteDuration duration = duration("5 seconds");
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
+ CanCommitTransactionReply reply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("getCanCommit", true, reply.getCanCommit());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testCanCommitPhaseFalseResponse() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitPhaseFalseResponse");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.FALSE)).when(cohort).canCommit();
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message.
- shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
- CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
- expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
- assertEquals("Can commit", true, canCommitReply.getCanCommit());
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ CanCommitTransactionReply reply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("getCanCommit", false, reply.getCanCommit());
- // Send the CommitTransaction message. This should send back an error
- // for preCommit failure.
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
+ reply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("getCanCommit", true, reply.getCanCommit());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testImmediateCommitWithCanCommitPhaseFailure() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testImmediateCommitWithCanCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
- shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
expectMsgClass(duration, akka.actor.Status.Failure.class);
- InOrder inOrder = inOrder(cohort);
- inOrder.verify(cohort).canCommit();
- inOrder.verify(cohort).preCommit();
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).commit();
+ DataTreeCandidateTip candidate = mock(DataTreeCandidateTip.class);
+ DataTreeCandidateNode candidateRoot = mock(DataTreeCandidateNode.class);
+ doReturn(ModificationType.UNMODIFIED).when(candidateRoot).getModificationType();
+ doReturn(candidateRoot).when(candidate).getRootNode();
+ doReturn(candidate).when(cohort).getCandidate();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
- public void testCanCommitPhaseFailure() throws Throwable {
+ public void testImmediateCommitWithCanCommitPhaseFalseResponse() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
- "testCanCommitPhaseFailure");
+ "testImmediateCommitWithCanCommitPhaseFalseResponse");
waitUntilLeader(shard);
String transactionID = "tx1";
MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
- doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
+ ShardDataTreeCohort cohort = mock(ShardDataTreeCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.FALSE)).when(cohort).canCommit();
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort, modification, true, true), getRef());
- // Send the CanCommitTransaction message.
-
- shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
expectMsgClass(duration, akka.actor.Status.Failure.class);
+ // Send another can commit to ensure the failed one got cleaned up.
+
+ reset(cohort);
+
+ String transactionID2 = "tx2";
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort).commit();
+ DataTreeCandidateTip candidate = mock(DataTreeCandidateTip.class);
+ DataTreeCandidateNode candidateRoot = mock(DataTreeCandidateNode.class);
+ doReturn(ModificationType.UNMODIFIED).when(candidateRoot).getModificationType();
+ doReturn(candidateRoot).when(candidate).getRootNode();
+ doReturn(candidate).when(cohort).getCandidate();
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort, modification, true, true), getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
waitUntilLeader(shard);
final FiniteDuration duration = duration("5 seconds");
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
final String transactionID = "tx1";
- Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit =
- new Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>>() {
+ Function<ShardDataTreeCohort, ListenableFuture<Void>> preCommit =
+ new Function<ShardDataTreeCohort, ListenableFuture<Void>>() {
@Override
- public ListenableFuture<Void> apply(final DOMStoreThreePhaseCommitCohort cohort) {
+ public ListenableFuture<Void> apply(final ShardDataTreeCohort cohort) {
ListenableFuture<Void> preCommitFuture = cohort.preCommit();
// Simulate an AbortTransaction message occurring during replication, after
};
MutableCompositeModification modification = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME),
modification, preCommit);
shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
- cohort, modification, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort, modification, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
final FiniteDuration duration = duration("5 seconds");
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
writeToStore(shard, TestModel.OUTER_LIST_PATH,
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
MutableCompositeModification modification2 = new MutableCompositeModification();
YangInstanceIdentifier listNodePath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
listNodePath,
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2),
modification2);
// Ready the Tx's
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort1, modification1, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx. We don't send the commit so it should timeout.
shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+ // Try to commit the 1st Tx - should fail as it's not the current Tx.
+
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
// Commit the 2nd Tx.
shard.tell(new CommitTransaction(transactionID2).toSerializable(), getRef());
final FiniteDuration duration = duration("5 seconds");
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
modification2);
String transactionID3 = "tx3";
MutableCompositeModification modification3 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification3);
// Ready the Tx's
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort1, modification1, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
- cohort3, modification3, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort3, modification3, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// canCommit 1st Tx.
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ ShardDataTreeCohort cohort1 = mock(ShardDataTreeCohort.class, "cohort1");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
doReturn(Futures.immediateFuture(null)).when(cohort1).abort();
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ ShardDataTreeCohort cohort2 = mock(ShardDataTreeCohort.class, "cohort2");
doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
FiniteDuration duration = duration("5 seconds");
// by the ShardTransaction.
shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort1, modification1, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
- expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+ cohort2, modification2, true, false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
// Send the CanCommitTransaction message for the first Tx.
@SuppressWarnings("serial")
public void testCreateSnapshot(final boolean persistent, final String shardActorName) throws Exception{
- final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
- class DelegatingPersistentDataProvider implements DataPersistenceProvider {
- DataPersistenceProvider delegate;
-
- DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public boolean isRecoveryApplicable() {
- return delegate.isRecoveryApplicable();
- }
+ final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
- @Override
- public <T> void persist(T o, Procedure<T> procedure) {
- delegate.persist(o, procedure);
+ final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
+ class TestPersistentDataProvider extends DelegatingPersistentDataProvider {
+ TestPersistentDataProvider(DataPersistenceProvider delegate) {
+ super(delegate);
}
@Override
public void saveSnapshot(Object o) {
savedSnapshot.set(o);
- delegate.saveSnapshot(o);
- }
-
- @Override
- public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
- delegate.deleteSnapshots(criteria);
- }
-
- @Override
- public void deleteMessages(long sequenceNumber) {
- delegate.deleteMessages(sequenceNumber);
+ super.saveSnapshot(o);
}
}
dataStoreContextBuilder.persistent(persistent);
new ShardTestKit(getSystem()) {{
- final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
- Creator<Shard> creator = new Creator<Shard>() {
- @Override
- public Shard create() throws Exception {
- return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
- newDatastoreContext(), SCHEMA_CONTEXT) {
+ class TestShard extends Shard {
- DelegatingPersistentDataProvider delegating;
+ protected TestShard(ShardIdentifier name, Map<String, String> peerAddresses,
+ DatastoreContext datastoreContext, SchemaContext schemaContext) {
+ super(name, peerAddresses, datastoreContext, schemaContext);
+ setPersistence(new TestPersistentDataProvider(super.persistence()));
+ }
- @Override
- protected DataPersistenceProvider persistence() {
- if(delegating == null) {
- delegating = new DelegatingPersistentDataProvider(super.persistence());
- }
+ @Override
+ public void handleCommand(Object message) {
+ super.handleCommand(message);
- return delegating;
- }
+ if (message instanceof SaveSnapshotSuccess || message.equals("commit_snapshot")) {
+ latch.get().countDown();
+ }
+ }
- @Override
- protected void commitSnapshot(final long sequenceNumber) {
- super.commitSnapshot(sequenceNumber);
- latch.get().countDown();
- }
- };
+ @Override
+ public RaftActorContext getRaftActorContext() {
+ return super.getRaftActorContext();
+ }
+ }
+
+ Creator<Shard> creator = new Creator<Shard>() {
+ @Override
+ public Shard create() throws Exception {
+ return new TestShard(shardID, Collections.<String,String>emptyMap(),
+ newDatastoreContext(), SCHEMA_CONTEXT);
}
};
NormalizedNode<?,?> expectedRoot = readStore(shard, YangInstanceIdentifier.builder().build());
- CaptureSnapshot capture = new CaptureSnapshot(-1, -1, -1, -1, -1, -1);
- shard.tell(capture, getRef());
+ // Trigger creation of a snapshot by ensuring
+ RaftActorContext raftActorContext = ((TestShard) shard.underlyingActor()).getRaftActorContext();
+ raftActorContext.getSnapshotManager().capture(mock(ReplicatedLogEntry.class), -1);
assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
latch.set(new CountDownLatch(1));
savedSnapshot.set(null);
- shard.tell(capture, getRef());
+ raftActorContext.getSnapshotManager().capture(mock(ReplicatedLogEntry.class), -1);
assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
/**
* This test simply verifies that the applySnapShot logic will work
* @throws ReadFailedException
+ * @throws DataValidationFailedException
*/
@Test
- public void testInMemoryDataStoreRestore() throws ReadFailedException {
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", MoreExecutors.sameThreadExecutor());
-
- store.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ public void testInMemoryDataTreeRestore() throws ReadFailedException, DataValidationFailedException {
+ DataTree store = InMemoryDataTreeFactory.getInstance().create();
+ store.setSchemaContext(SCHEMA_CONTEXT);
- DOMStoreWriteTransaction putTransaction = store.newWriteOnlyTransaction();
+ DataTreeModification putTransaction = store.takeSnapshot().newModification();
putTransaction.write(TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- commitTransaction(putTransaction);
+ commitTransaction(store, putTransaction);
- NormalizedNode<?, ?> expected = readStore(store);
+ NormalizedNode<?, ?> expected = readStore(store, YangInstanceIdentifier.builder().build());
- DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
+ DataTreeModification writeTransaction = store.takeSnapshot().newModification();
writeTransaction.delete(YangInstanceIdentifier.builder().build());
writeTransaction.write(YangInstanceIdentifier.builder().build(), expected);
- commitTransaction(writeTransaction);
+ commitTransaction(store, writeTransaction);
- NormalizedNode<?, ?> actual = readStore(store);
+ NormalizedNode<?, ?> actual = readStore(store, YangInstanceIdentifier.builder().build());
assertEquals(expected, actual);
}
final DatastoreContext persistentContext = DatastoreContext.newBuilder().
shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(true).build();
- final Props persistentProps = Shard.props(shardID, Collections.<ShardIdentifier, String>emptyMap(),
+ final Props persistentProps = Shard.props(shardID, Collections.<String, String>emptyMap(),
persistentContext, SCHEMA_CONTEXT);
final DatastoreContext nonPersistentContext = DatastoreContext.newBuilder().
shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(false).build();
- final Props nonPersistentProps = Shard.props(shardID, Collections.<ShardIdentifier, String>emptyMap(),
+ final Props nonPersistentProps = Shard.props(shardID, Collections.<String, String>emptyMap(),
nonPersistentContext, SCHEMA_CONTEXT);
new ShardTestKit(getSystem()) {{
TestActorRef<Shard> shard1 = TestActorRef.create(getSystem(),
persistentProps, "testPersistence1");
- assertTrue("Recovery Applicable", shard1.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ assertTrue("Recovery Applicable", shard1.underlyingActor().persistence().isRecoveryApplicable());
shard1.tell(PoisonPill.getInstance(), ActorRef.noSender());
TestActorRef<Shard> shard2 = TestActorRef.create(getSystem(),
nonPersistentProps, "testPersistence2");
- assertFalse("Recovery Not Applicable", shard2.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ assertFalse("Recovery Not Applicable", shard2.underlyingActor().persistence().isRecoveryApplicable());
shard2.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
+
}
@Test
TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testOnDatastoreContext");
assertEquals("isRecoveryApplicable", true,
- shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ shard.underlyingActor().persistence().isRecoveryApplicable());
waitUntilLeader(shard);
shard.tell(dataStoreContextBuilder.persistent(false).build(), ActorRef.noSender());
assertEquals("isRecoveryApplicable", false,
- shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ shard.underlyingActor().persistence().isRecoveryApplicable());
shard.tell(dataStoreContextBuilder.persistent(true).build(), ActorRef.noSender());
assertEquals("isRecoveryApplicable", true,
- shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+ shard.underlyingActor().persistence().isRecoveryApplicable());
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
shard.tell(new RegisterRoleChangeListener(), listener);
- // TODO: MessageCollectorActor exists as a test util in both the akka-raft and distributed-datastore
- // projects. Need to move it to commons as a regular utility and then we can get rid of this arbitrary
- // sleep.
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ MessageCollectorActor.expectFirstMatching(listener, RegisterRoleChangeListenerReply.class);
+
+ ShardLeaderStateChanged leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
+ ShardLeaderStateChanged.class);
+ assertEquals("getLocalShardDataTree present", true,
+ leaderStateChanged.getLocalShardDataTree().isPresent());
+ assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(),
+ leaderStateChanged.getLocalShardDataTree().get());
- List<Object> allMatching = MessageCollectorActor.getAllMatching(listener, RegisterRoleChangeListenerReply.class);
+ MessageCollectorActor.clearMessages(listener);
- assertEquals(1, allMatching.size());
+ // Force a leader change
+
+ shard.tell(new RequestVote(10000, "member2", 50, 50), getRef());
+
+ leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
+ ShardLeaderStateChanged.class);
+ assertEquals("getLocalShardDataTree present", false,
+ leaderStateChanged.getLocalShardDataTree().isPresent());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
};
}
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
- private void commitTransaction(final DOMStoreWriteTransaction transaction) {
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- ListenableFuture<Void> future =
- commitCohort.preCommit();
- try {
- future.get();
- future = commitCohort.commit();
- future.get();
- } catch (InterruptedException | ExecutionException e) {
- }
+ private static void commitTransaction(DataTree store, final DataTreeModification modification) throws DataValidationFailedException {
+ modification.ready();
+ store.validate(modification);
+ store.commit(store.prepare(modification));
}
}
import akka.actor.Props;
import akka.pattern.AskTimeoutException;
import akka.testkit.TestActorRef;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* @author Basheeruddin Ahmed <syedbahm@cisco.com>
*/
public class ShardTransactionFailureTest extends AbstractActorTest {
- private static final InMemoryDOMDataStore store =
- new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
-
private static final SchemaContext testSchemaContext =
- TestModel.createTestContext();
+ TestModel.createTestContext();
+ private static final TransactionType RO = TransactionType.READ_ONLY;
+ private static final TransactionType RW = TransactionType.READ_WRITE;
+ private static final TransactionType WO = TransactionType.WRITE_ONLY;
+
+ private static final ShardDataTree store = new ShardDataTree(testSchemaContext);
private static final ShardIdentifier SHARD_IDENTIFIER =
ShardIdentifier.builder().memberName("member-1")
private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
- @BeforeClass
- public static void staticSetup() {
- store.onGlobalContextUpdated(testSchemaContext);
- }
-
private ActorRef createShard(){
- return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.<ShardIdentifier, String>emptyMap(), datastoreContext,
+ return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.<String, String>emptyMap(), datastoreContext,
TestModel.createTestContext()));
}
throws Throwable {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(RO, store.newReadOnlyTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- subject.underlyingActor().getDOMStoreTransaction().close();
+ subject.underlyingActor().getDOMStoreTransaction().abort();
future = akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
throws Throwable {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- subject.underlyingActor().getDOMStoreTransaction().close();
+ subject.underlyingActor().getDOMStoreTransaction().abort();
future = akka.pattern.Patterns.ask(subject, readData, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
throws Throwable {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
akka.pattern.Patterns.ask(subject, dataExists, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- subject.underlyingActor().getDOMStoreTransaction().close();
+ subject.underlyingActor().getDOMStoreTransaction().abort();
future = akka.pattern.Patterns.ask(subject, dataExists, 3000);
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(WO, store.newReadWriteTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props, "testNegativeMergeTransactionReady");
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(RW, store.newReadWriteTransaction("test-txn", null), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doThrow;
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.actor.Status.Failure;
import akka.actor.Terminated;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
-import org.junit.Before;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.ShardWriteTransaction.GetCompositeModificationReply;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public class ShardTransactionTest extends AbstractActorTest {
private static final SchemaContext testSchemaContext = TestModel.createTestContext();
+ private static final TransactionType RO = TransactionType.READ_ONLY;
+ private static final TransactionType RW = TransactionType.READ_WRITE;
+ private static final TransactionType WO = TransactionType.WRITE_ONLY;
private static final ShardIdentifier SHARD_IDENTIFIER =
ShardIdentifier.builder().memberName("member-1")
private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
- private final InMemoryDOMDataStore store =
- new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+ private final ShardDataTree store = new ShardDataTree(testSchemaContext);
- @Before
- public void setup() {
- store.onGlobalContextUpdated(testSchemaContext);
- }
+ private int txCounter = 0;
- private ActorRef createShard(){
+ private ActorRef createShard() {
return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.<ShardIdentifier, String>emptyMap(), datastoreContext, TestModel.createTestContext()));
+ Collections.<String, String>emptyMap(), datastoreContext, TestModel.createTestContext()));
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, String name) {
- return newTransactionActor(transaction, name, DataStoreVersions.CURRENT_VERSION);
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, String name) {
+ return newTransactionActor(type, transaction, name, DataStoreVersions.CURRENT_VERSION);
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, String name, short version) {
- return newTransactionActor(transaction, null, name, version);
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, String name, short version) {
+ return newTransactionActor(type, transaction, null, name, version);
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, ActorRef shard, String name) {
- return newTransactionActor(transaction, null, name, DataStoreVersions.CURRENT_VERSION);
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shard, String name) {
+ return newTransactionActor(type, transaction, null, name, DataStoreVersions.CURRENT_VERSION);
}
- private ActorRef newTransactionActor(DOMStoreTransaction transaction, ActorRef shard, String name,
+ private ActorRef newTransactionActor(TransactionType type, AbstractShardDataTreeTransaction<?> transaction, ActorRef shard, String name,
short version) {
- Props props = ShardTransaction.props(transaction, shard != null ? shard : createShard(),
- testSchemaContext, datastoreContext, shardStats, "txn", version);
+ Props props = ShardTransaction.props(type, transaction, shard != null ? shard : createShard(),
+ datastoreContext, shardStats, "txn", version);
return getSystem().actorOf(props, name);
}
+ private ReadOnlyShardDataTreeTransaction readOnlyTransaction() {
+ return store.newReadOnlyTransaction("test-ro-" + String.valueOf(txCounter++), null);
+ }
+
+ private ReadWriteShardDataTreeTransaction readWriteTransaction() {
+ return store.newReadWriteTransaction("test-rw-" + String.valueOf(txCounter++), null);
+ }
+
@Test
public void testOnReceiveReadData() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- testOnReceiveReadData(newTransactionActor(store.newReadOnlyTransaction(), shard, "testReadDataRO"));
+ testOnReceiveReadData(newTransactionActor(RO, readOnlyTransaction(), shard, "testReadDataRO"));
- testOnReceiveReadData(newTransactionActor(store.newReadWriteTransaction(), shard, "testReadDataRW"));
+ testOnReceiveReadData(newTransactionActor(RW, readWriteTransaction(), shard, "testReadDataRW"));
}
private void testOnReceiveReadData(final ActorRef transaction) {
final ActorRef shard = createShard();
testOnReceiveReadDataWhenDataNotFound(newTransactionActor(
- store.newReadOnlyTransaction(), shard, "testReadDataWhenDataNotFoundRO"));
+ RO, readOnlyTransaction(), shard, "testReadDataWhenDataNotFoundRO"));
testOnReceiveReadDataWhenDataNotFound(newTransactionActor(
- store.newReadWriteTransaction(), shard, "testReadDataWhenDataNotFoundRW"));
+ RW, readWriteTransaction(), shard, "testReadDataWhenDataNotFoundRW"));
}
private void testOnReceiveReadDataWhenDataNotFound(final ActorRef transaction) {
@Test
public void testOnReceiveReadDataHeliumR1() throws Exception {
new JavaTestKit(getSystem()) {{
- ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ ActorRef transaction = newTransactionActor(RO, readOnlyTransaction(),
"testOnReceiveReadDataHeliumR1", DataStoreVersions.HELIUM_1_VERSION);
transaction.tell(new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(),
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- testOnReceiveDataExistsPositive(newTransactionActor(store.newReadOnlyTransaction(), shard,
+ testOnReceiveDataExistsPositive(newTransactionActor(RO, readOnlyTransaction(), shard,
"testDataExistsPositiveRO"));
- testOnReceiveDataExistsPositive(newTransactionActor(store.newReadWriteTransaction(), shard,
+ testOnReceiveDataExistsPositive(newTransactionActor(RW, readWriteTransaction(), shard,
"testDataExistsPositiveRW"));
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- testOnReceiveDataExistsNegative(newTransactionActor(store.newReadOnlyTransaction(), shard,
+ testOnReceiveDataExistsNegative(newTransactionActor(RO, readOnlyTransaction(), shard,
"testDataExistsNegativeRO"));
- testOnReceiveDataExistsNegative(newTransactionActor(store.newReadWriteTransaction(), shard,
+ testOnReceiveDataExistsNegative(newTransactionActor(RW, readWriteTransaction(), shard,
"testDataExistsNegativeRW"));
}
}
@Test
- public void testOnReceiveWriteData() throws Exception {
+ public void testOnReceiveWriteData() {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveWriteData");
transaction.tell(new WriteData(TestModel.TEST_PATH,
}
@Test
- public void testOnReceiveHeliumR1WriteData() throws Exception {
+ public void testOnReceiveHeliumR1WriteData() {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveHeliumR1WriteData", DataStoreVersions.HELIUM_1_VERSION);
Encoded encoded = new NormalizedNodeToNodeCodec(null).encode(TestModel.TEST_PATH,
}
@Test
- public void testOnReceiveMergeData() throws Exception {
+ public void testOnReceiveMergeData() {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testMergeData");
transaction.tell(new MergeData(TestModel.TEST_PATH,
@Test
public void testOnReceiveHeliumR1MergeData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testOnReceiveHeliumR1MergeData", DataStoreVersions.HELIUM_1_VERSION);
Encoded encoded = new NormalizedNodeToNodeCodec(null).encode(TestModel.TEST_PATH,
@Test
public void testOnReceiveDeleteData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testDeleteData");
transaction.tell(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.HELIUM_2_VERSION).
public void testOnReceiveBatchedModifications() throws Exception {
new JavaTestKit(getSystem()) {{
- DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
- final ActorRef transaction = newTransactionActor(mockWriteTx, "testOnReceiveBatchedModifications");
+ ShardDataTreeTransactionParent parent = Mockito.mock(ShardDataTreeTransactionParent.class);
+ DataTreeModification mockModification = Mockito.mock(DataTreeModification.class);
+ ReadWriteShardDataTreeTransaction mockWriteTx = new ReadWriteShardDataTreeTransaction(parent, "id", mockModification);
+ final ActorRef transaction = newTransactionActor(RW, mockWriteTx, "testOnReceiveBatchedModifications");
YangInstanceIdentifier writePath = TestModel.TEST_PATH;
NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
- BatchedModifications batched = new BatchedModifications(DataStoreVersions.CURRENT_VERSION);
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
batched.addModification(new WriteModification(writePath, writeData));
batched.addModification(new MergeModification(mergePath, mergeData));
batched.addModification(new DeleteModification(deletePath));
DeleteModification delete = (DeleteModification)compositeModification.getModifications().get(2);
assertEquals("getPath", deletePath, delete.getPath());
- InOrder inOrder = Mockito.inOrder(mockWriteTx);
- inOrder.verify(mockWriteTx).write(writePath, writeData);
- inOrder.verify(mockWriteTx).merge(mergePath, mergeData);
- inOrder.verify(mockWriteTx).delete(deletePath);
+ InOrder inOrder = Mockito.inOrder(mockModification);
+ inOrder.verify(mockModification).write(writePath, writeData);
+ inOrder.verify(mockModification).merge(mergePath, mergeData);
+ inOrder.verify(mockModification).delete(deletePath);
}};
}
@Test
- public void testOnReceiveReadyTransaction() throws Exception {
+ public void testOnReceiveBatchedModificationsReadyWithoutImmediateCommit() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testReadyTransaction");
- watch(transaction);
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithoutImmediateCommit");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(writePath, writeData));
+
+ transaction.tell(batched, getRef());
+ BatchedModificationsReply reply = expectMsgClass(duration("5 seconds"), BatchedModificationsReply.class);
+ assertEquals("getNumBatched", 1, reply.getNumBatched());
+
+ batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+ }};
+ }
+
+ @Test
+ public void testOnReceiveBatchedModificationsReadyWithImmediateCommit() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithImmediateCommit");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(writePath, writeData));
+ batched.setReady(true);
+ batched.setDoCommitOnReady(true);
+ batched.setTotalMessagesSent(1);
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), CommitTransactionReply.SERIALIZABLE_CLASS);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+ }};
+ }
+
+ @Test(expected=TestException.class)
+ public void testOnReceiveBatchedModificationsFailure() throws Throwable {
+ new JavaTestKit(getSystem()) {{
+
+ ShardDataTreeTransactionParent parent = Mockito.mock(ShardDataTreeTransactionParent.class);
+ DataTreeModification mockModification = Mockito.mock(DataTreeModification.class);
+ ReadWriteShardDataTreeTransaction mockWriteTx = new ReadWriteShardDataTreeTransaction(parent, "id", mockModification);
+ final ActorRef transaction = newTransactionActor(RW, mockWriteTx,
+ "testOnReceiveBatchedModificationsFailure");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doThrow(new TestException()).when(mockModification).write(path, node);
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.addModification(new WriteModification(path, node));
+
+ transaction.tell(batched, getRef());
+ expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+
+ batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+ Failure failure = expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ if(failure != null) {
+ throw failure.cause();
+ }
+ }};
+ }
+
+ @Test(expected=IllegalStateException.class)
+ public void testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Throwable {
+ new JavaTestKit(getSystem()) {{
+
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
+ "testOnReceiveBatchedModificationsReadyWithIncorrectTotalMessageCount");
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
+
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, null);
+ batched.setReady(true);
+ batched.setTotalMessagesSent(2);
+
+ transaction.tell(batched, getRef());
+
+ Failure failure = expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ if(failure != null) {
+ throw failure.cause();
+ }
+ }};
+ }
+
+ @Test
+ public void testOnReceivePreLithiumReadyTransaction() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
+ "testReadyTransaction", DataStoreVersions.HELIUM_2_VERSION);
+
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
transaction.tell(new ReadyTransaction().toSerializable(), getRef());
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
- Terminated.class);
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
- Terminated.class);
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
}};
// test
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testReadyTransaction2");
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
+ "testReadyTransaction2", DataStoreVersions.HELIUM_2_VERSION);
- watch(transaction);
+ JavaTestKit watcher = new JavaTestKit(getSystem());
+ watcher.watch(transaction);
transaction.tell(new ReadyTransaction(), getRef());
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
- Terminated.class);
- expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
- Terminated.class);
+ expectMsgClass(duration("5 seconds"), ReadyTransactionReply.class);
+ watcher.expectMsgClass(duration("5 seconds"), Terminated.class);
}};
}
@Test
public void testOnReceiveCreateSnapshot() throws Exception {
new JavaTestKit(getSystem()) {{
- ShardTest.writeToStore(store, TestModel.TEST_PATH,
+ ShardTest.writeToStore(store.getDataTree(), TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- NormalizedNode<?,?> expectedRoot = ShardTest.readStore(store,
+ NormalizedNode<?,?> expectedRoot = ShardTest.readStore(store.getDataTree(),
YangInstanceIdentifier.builder().build());
- final ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(TransactionType.READ_ONLY, readOnlyTransaction(),
"testOnReceiveCreateSnapshot");
watch(transaction);
@Test
public void testReadWriteTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testReadWriteTxOnReceiveCloseTransaction");
watch(transaction);
@Test
public void testWriteOnlyTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(WO, readWriteTransaction(),
"testWriteTxOnReceiveCloseTransaction");
watch(transaction);
@Test
public void testReadOnlyTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ final ActorRef transaction = newTransactionActor(TransactionType.READ_ONLY, readOnlyTransaction(),
"testReadOnlyTxOnReceiveCloseTransaction");
watch(transaction);
@Test(expected=UnknownMessageException.class)
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn",
- DataStoreVersions.CURRENT_VERSION);
+ final Props props = ShardTransaction.props(TransactionType.READ_ONLY, readOnlyTransaction(), shard,
+ datastoreContext, shardStats, "txn", DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
transaction.receive(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION).
500, TimeUnit.MILLISECONDS).build();
new JavaTestKit(getSystem()) {{
- final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
+ final ActorRef transaction = newTransactionActor(RW, readWriteTransaction(),
"testShardTransactionInactivity");
watch(transaction);
expectMsgClass(duration("3 seconds"), Terminated.class);
}};
}
+
+ public static class TestException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+ }
}
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
stubber.when(actorContext).executeOperationAsync(any(ActorSelection.class),
isA(requestType), any(Timeout.class));
+
+ doReturn(new Timeout(Duration.apply(1000, TimeUnit.MILLISECONDS)))
+ .when(actorContext).getTransactionCommitOperationTimeout();
}
private void verifyCohortInvocations(int nCohorts, Class<?> requestType) {
ListenableFuture<Boolean> future = proxy.canCommit();
- assertEquals("canCommit", false, future.get(5, TimeUnit.SECONDS));
+ Boolean actual = future.get(5, TimeUnit.SECONDS);
- verifyCohortInvocations(3, CanCommitTransaction.SERIALIZABLE_CLASS);
+ assertEquals("canCommit", false, actual);
+
+ verifyCohortInvocations(2, CanCommitTransaction.SERIALIZABLE_CLASS);
}
@Test(expected = TestException.class)
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Assert;
import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
verify(mockActorContext, times(0)).acquireTxCreationPermit();
}
+ /**
+ * Tests 2 successive chained write-only transactions and verifies the second transaction isn't
+ * initiated until the first one completes its read future.
+ */
+ @Test
+ public void testChainedWriteOnlyTransactions() throws Exception {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ ActorRef txActorRef1 = setupActorContextWithoutInitialCreateTransaction(getSystem());
+
+ Promise<Object> batchedReplyPromise1 = akka.dispatch.Futures.promise();
+ doReturn(batchedReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(txActorRef1)), isA(BatchedModifications.class));
+
+ DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
+
+ NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ writeTx1.write(TestModel.TEST_PATH, writeNode1);
+
+ writeTx1.ready();
+
+ verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
+
+ ActorRef txActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
+
+ expectBatchedModifications(txActorRef2, 1);
+
+ final NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ final DOMStoreWriteTransaction writeTx2 = txChainProxy.newWriteOnlyTransaction();
+
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch write2Complete = new CountDownLatch(1);
+ new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
+ } catch (Exception e) {
+ caughtEx.set(e);
+ } finally {
+ write2Complete.countDown();
+ }
+ }
+ }.start();
+
+ assertEquals("Tx 2 write should've completed", true, write2Complete.await(5, TimeUnit.SECONDS));
+
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ try {
+ verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+ } catch (AssertionError e) {
+ fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
+ }
+
+ batchedReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
+
+ // Tx 2 should've proceeded to find the primary shard.
+ verify(mockActorContext, timeout(5000).times(2)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+ }
+
/**
* Tests 2 successive chained read-write transactions and verifies the second transaction isn't
* initiated until the first one completes its read future.
Promise<Object> readyReplyPromise1 = akka.dispatch.Futures.promise();
doReturn(readyReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(txActorRef1)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ eq(actorSelection(txActorRef1)), isA(BatchedModifications.class));
DOMStoreWriteTransaction writeTx1 = txChainProxy.newReadWriteTransaction();
writeTx1.ready();
- verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1));
+ verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
String tx2MemberName = "tx2MemberName";
doReturn(tx2MemberName).when(mockActorContext).getCurrentMemberName();
fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
}
- readyReplyPromise1.success(readySerializedTxReply(txActorRef1.path().toString()).value().get().get());
+ readyReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
verify(mockActorContext, timeout(5000)).executeOperationAsync(eq(getSystem().actorSelection(shardActorRef2.path())),
eqCreateTransaction(tx2MemberName, READ_WRITE));
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_WRITE;
import akka.actor.Props;
import akka.dispatch.Futures;
import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregatorTest;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
+import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.Promise;
-import scala.concurrent.duration.Duration;
@SuppressWarnings("resource")
public class TransactionProxyTest extends AbstractTransactionProxyTest {
if (exToThrow instanceof PrimaryNotFoundException) {
doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
} else {
- doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ doReturn(primaryShardInfoReply(getSystem(), actorRef)).
when(mockActorContext).findPrimaryShardAsync(anyString());
}
testReadWithExceptionOnInitialCreateTransaction(new TestException());
}
- @Test(expected = TestException.class)
- public void testReadWithPriorRecordingOperationFailure() throws Throwable {
- doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
- when(mockActorContext).getDatastoreContext();
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectFailedBatchedModifications(actorRef);
-
- doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedReadData());
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- try {
- propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
- } finally {
- verify(mockActorContext, times(0)).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedReadData());
- }
- }
-
@Test
public void testReadWithPriorRecordingOperationSuccessful() throws Throwable {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext).
actorSelection(actorRef.path().toString());
- doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ doReturn(primaryShardInfoReply(getSystem(), actorRef)).
when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
}
- @Test(expected = TestException.class)
- public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
- doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
- when(mockActorContext).getDatastoreContext();
-
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- expectFailedBatchedModifications(actorRef);
-
- doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDataExists());
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
-
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- transactionProxy.delete(TestModel.TEST_PATH);
-
- try {
- propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
- } finally {
- verify(mockActorContext, times(0)).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDataExists());
- }
- }
-
@Test
public void testExistsWithPriorRecordingOperationSuccessful() throws Throwable {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
transactionProxy.exists(TestModel.TEST_PATH);
}
- private void verifyRecordingOperationFutures(List<Future<Object>> futures,
- Class<?>... expResultTypes) throws Exception {
- assertEquals("getRecordingOperationFutures size", expResultTypes.length, futures.size());
-
- int i = 0;
- for( Future<Object> future: futures) {
- assertNotNull("Recording operation Future is null", future);
-
- Class<?> expResultType = expResultTypes[i++];
- if(Throwable.class.isAssignableFrom(expResultType)) {
- try {
- Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- fail("Expected exception from recording operation Future");
- } catch(Exception e) {
- // Expected
- }
- } else {
- assertEquals(String.format("Recording operation %d Future result type", i +1 ), expResultType,
- Await.result(future, Duration.create(5, TimeUnit.SECONDS)).getClass());
- }
- }
- }
-
@Test
public void testWrite() throws Exception {
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- // This sends the batched modification.
- transactionProxy.ready();
-
- verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), false);
}
@Test
public void testWriteAfterAsyncRead() throws Throwable {
- ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem());
+ ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem(), DefaultShardStrategy.DEFAULT_SHARD);
Promise<Object> createTxPromise = akka.dispatch.Futures.promise();
doReturn(createTxPromise).when(mockActorContext).executeOperationAsync(
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
+ expectBatchedModificationsReady(actorRef);
final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
// This sends the batched modification.
transactionProxy.ready();
- verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), true);
}
@Test(expected=IllegalStateException.class)
@Test
public void testMerge() throws Exception {
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
- // This sends the batched modification.
- transactionProxy.ready();
-
- verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite));
-
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite), false);
}
@Test
public void testDelete() throws Exception {
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.delete(TestModel.TEST_PATH);
- // This sends the batched modification.
- transactionProxy.ready();
+ verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH), false);
+ }
+
+ @Test
+ public void testReadWrite() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
- verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH));
+ expectBatchedModifications(actorRef, 1);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+
+ transactionProxy.read(TestModel.TEST_PATH);
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), false,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
}
@Test
- public void testReady() throws Exception {
+ public void testReadyWithReadWrite() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- expectBatchedModifications(actorRef, 1);
- expectReadyTransaction(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ verifyBatchedModifications(batchedModifications.get(0), true, true,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
- isA(BatchedModifications.class));
+ assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
}
@Test
- public void testReadyWithRecordingOperationFailure() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+ public void testReadyWithNoModifications() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
- expectFailedBatchedModifications(actorRef);
+ expectBatchedModificationsReady(actorRef, true);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- expectReadyTransaction(actorRef);
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
- doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), true, true);
+ }
+
+ @Test
+ public void testReadyWithMultipleShardWrites() throws Exception {
+ ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+
+ ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, "junk");
+
+ expectBatchedModificationsReady(actorRef1);
+ expectBatchedModificationsReady(actorRef2);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+ transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
-
- verifyCohortFutures(proxy, TestException.class);
-
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(), TestException.class);
+ verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef1),
+ actorSelection(actorRef2));
}
@Test
- public void testReadyWithReplyFailure() throws Exception {
+ public void testReadyWithWriteOnlyAndLastBatchPending() throws Exception {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectBatchedModifications(actorRef, 1);
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)),
- isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ assertTrue(ready instanceof SingleCommitCohortProxy);
+
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
+ verifyBatchedModifications(batchedModifications.get(0), true, true,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- verifyCohortFutures(proxy, TestException.class);
+ verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
+ isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
@Test
- public void testReadyWithInitialCreateTransactionFailure() throws Exception {
+ public void testReadyWithWriteOnlyAndLastBatchEmpty() throws Exception {
+ dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(Futures.failed(new PrimaryNotFoundException("mock"))).when(
- mockActorContext).findPrimaryShardAsync(anyString());
+ expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- transactionProxy.delete(TestModel.TEST_PATH);
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ verifyBatchedModifications(batchedModifications.get(0), false,
+ new WriteModification(TestModel.TEST_PATH, nodeToWrite));
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ verifyBatchedModifications(batchedModifications.get(1), true, true);
- verifyCohortFutures(proxy, PrimaryNotFoundException.class);
+ verify(mockActorContext, never()).executeOperationAsync(eq(actorSelection(actorRef)),
+ isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
@Test
- public void testReadyWithInvalidReplyMessageType() throws Exception {
+ public void testReadyWithReplyFailure() throws Exception {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- expectBatchedModifications(actorRef, 1);
+ expectFailedBatchedModifications(actorRef);
- doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)),
- isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+
+ transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof SingleCommitCohortProxy);
+
+ verifyCohortFutures((SingleCommitCohortProxy)ready, TestException.class);
+ }
+
+ private void testWriteOnlyTxWithFindPrimaryShardFailure(Exception toThrow) throws Exception {
+ doReturn(Futures.failed(toThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+ transactionProxy.delete(TestModel.TEST_PATH);
+
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ assertTrue(ready instanceof SingleCommitCohortProxy);
+
+ verifyCohortFutures((SingleCommitCohortProxy)ready, toThrow.getClass());
+ }
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ @Test
+ public void testWriteOnlyTxWithPrimaryNotFoundException() throws Exception {
+ testWriteOnlyTxWithFindPrimaryShardFailure(new PrimaryNotFoundException("mock"));
+ }
- verifyCohortFutures(proxy, IllegalArgumentException.class);
+ @Test
+ public void testWriteOnlyTxWithNotInitializedException() throws Exception {
+ testWriteOnlyTxWithFindPrimaryShardFailure(new NotInitializedException("mock"));
}
@Test
- public void testUnusedTransaction() throws Exception {
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+ public void testWriteOnlyTxWithNoShardLeaderException() throws Exception {
+ testWriteOnlyTxWithFindPrimaryShardFailure(new NoShardLeaderException("mock"));
+ }
+
+ @Test
+ public void testReadyWithInvalidReplyMessageType() throws Exception {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+ ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
+
+ ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, "junk");
+
+ doReturn(Futures.successful(new Object())).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef1)), isA(BatchedModifications.class));
+
+ expectBatchedModificationsReady(actorRef2);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+
+ transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertEquals("canCommit", true, ready.canCommit().get());
- ready.preCommit().get();
- ready.commit().get();
+ assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+
+ verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef2),
+ IllegalArgumentException.class);
}
@Test
*/
@Test
public void testLocalTxActorRead() throws Exception {
- ActorSystem actorSystem = getSystem();
- ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
- doReturn(actorSystem.actorSelection(shardActorRef.path())).
- when(mockActorContext).actorSelection(shardActorRef.path().toString());
-
- doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
- when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1").setTransactionActorPath(actorPath).build();
-
- doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
- executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
- eqCreateTransaction(memberName, READ_ONLY));
-
- doReturn(true).when(mockActorContext).isPathLocal(actorPath);
+ setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
+ doReturn(true).when(mockActorContext).isPathLocal(anyString());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,READ_ONLY);
@Test
public void testLocalTxActorReady() throws Exception {
- ActorSystem actorSystem = getSystem();
- ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
- doReturn(actorSystem.actorSelection(shardActorRef.path())).
- when(mockActorContext).actorSelection(shardActorRef.path().toString());
-
- doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
- when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
- String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
- setTransactionId("txn-1").setTransactionActorPath(actorPath).
- setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
-
- doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
- executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
- eqCreateTransaction(memberName, WRITE_ONLY));
-
- doReturn(true).when(mockActorContext).isPathLocal(actorPath);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+ doReturn(true).when(mockActorContext).isPathLocal(anyString());
- doReturn(batchedModificationsReply(1)).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), isA(BatchedModifications.class));
+ expectBatchedModificationsReady(actorRef, true);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class);
-
- // testing ready
- doReturn(readyTxReply(shardActorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), isA(ReadyTransaction.class));
-
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ assertTrue(ready instanceof SingleCommitCohortProxy);
- verifyCohortFutures(proxy, getSystem().actorSelection(shardActorRef.path()));
+ verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
}
private static interface TransactionProxyOperation {
when(mockActorContext).actorSelection(shardActorRef.path().toString());
if(shardFound) {
- doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
+ doReturn(primaryShardInfoReply(actorSystem, shardActorRef)).
when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
} else {
- doReturn(Futures.failed(new Exception("not found")))
+ doReturn(Futures.failed(new PrimaryNotFoundException("test")))
.when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
}
- String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
+ ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+ String actorPath = txActorRef.path().toString();
CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
setTransactionId("txn-1").setTransactionActorPath(actorPath).
setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
+ doReturn(actorSystem.actorSelection(actorPath)).when(mockActorContext).actorSelection(actorPath);
+
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
eqCreateTransaction(memberName, READ_WRITE));
- doReturn(true).when(mockActorContext).isPathLocal(actorPath);
+ doReturn(true).when(mockActorContext).isPathLocal(anyString());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
@Test
public void testWriteThrottlingWhenShardFound(){
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
throttleOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
@Test
public void testWriteThrottlingWhenShardNotFound(){
// Confirm that there is no throttling when the Shard is not found
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
@Test
public void testWriteCompletion(){
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
@Test
public void testMergeThrottlingWhenShardFound(){
-
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
throttleOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
@Test
public void testMergeThrottlingWhenShardNotFound(){
-
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
@Test
public void testMergeCompletion(){
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
@Test
public void testDeleteCompletion(){
+ dataStoreContextBuilder.shardBatchedModificationCount(1);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
}, 2, true);
}
- @Test
- public void testModificationOperationBatching() throws Throwable {
+ private void testModificationOperationBatching(TransactionType type) throws Exception {
int shardBatchedModificationCount = 3;
- doReturn(dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount).build()).
- when(mockActorContext).getDatastoreContext();
+ dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), type);
expectBatchedModifications(actorRef, shardBatchedModificationCount);
- expectReadyTransaction(actorRef);
-
YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
YangInstanceIdentifier deletePath1 = TestModel.TEST_PATH;
YangInstanceIdentifier deletePath2 = TestModel.OUTER_LIST_PATH;
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, type);
transactionProxy.write(writePath1, writeNode1);
transactionProxy.write(writePath2, writeNode2);
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), new WriteModification(writePath1, writeNode1),
+ verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
new WriteModification(writePath2, writeNode2), new DeleteModification(deletePath1));
- verifyBatchedModifications(batchedModifications.get(1), new MergeModification(mergePath1, mergeNode1),
+ verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
- verifyBatchedModifications(batchedModifications.get(2), new MergeModification(mergePath3, mergeNode3),
- new DeleteModification(deletePath2));
+ verifyBatchedModifications(batchedModifications.get(2), true, true,
+ new MergeModification(mergePath3, mergeNode3), new DeleteModification(deletePath2));
+
+ assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
+ }
+
+ @Test
+ public void testReadWriteModificationOperationBatching() throws Throwable {
+ testModificationOperationBatching(READ_WRITE);
+ }
+
+ @Test
+ public void testWriteOnlyModificationOperationBatching() throws Throwable {
+ testModificationOperationBatching(WRITE_ONLY);
+ }
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ @Test
+ public void testOptimizedWriteOnlyModificationOperationBatching() throws Throwable {
+ dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
+ testModificationOperationBatching(WRITE_ONLY);
}
@Test
public void testModificationOperationBatchingWithInterleavedReads() throws Throwable {
+
int shardBatchedModificationCount = 10;
- doReturn(dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount).build()).
- when(mockActorContext).getDatastoreContext();
+ dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
- verifyBatchedModifications(batchedModifications.get(0), new WriteModification(writePath1, writeNode1),
+ verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
new WriteModification(writePath2, writeNode2));
- verifyBatchedModifications(batchedModifications.get(1), new MergeModification(mergePath1, mergeNode1),
+ verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
new MergeModification(mergePath2, mergeNode2));
- verifyBatchedModifications(batchedModifications.get(2), new DeleteModification(deletePath));
+ verifyBatchedModifications(batchedModifications.get(2), false, new DeleteModification(deletePath));
InOrder inOrder = Mockito.inOrder(mockActorContext);
inOrder.verify(mockActorContext).executeOperationAsync(
inOrder.verify(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
+ }
+
+ @Test
+ public void testReadRoot() throws ReadFailedException, InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
+
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ Configuration configuration = mock(Configuration.class);
+ doReturn(configuration).when(mockActorContext).getConfiguration();
+ doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(Sets.newHashSet("test", "cars")).when(configuration).getAllShardNames();
+
+ NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ setUpReadData("test", NormalizedNodeAggregatorTest.getRootNode(expectedNode1, schemaContext));
+ setUpReadData("cars", NormalizedNodeAggregatorTest.getRootNode(expectedNode2, schemaContext));
+
+ doReturn(memberName).when(mockActorContext).getCurrentMemberName();
+
+ doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
+
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
+ YangInstanceIdentifier.builder().build()).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+
+ NormalizedNode<?, ?> normalizedNode = readOptional.get();
+
+ assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+
+ Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+
+ for(NormalizedNode<?,?> node : collection){
+ assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
+ }
+
+ assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
+ NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME) != null);
+
+ assertEquals(expectedNode1, NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME));
+
+ assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
+ NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
+
+ assertEquals(expectedNode2, NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME));
+ }
+
+
+ private void setUpReadData(String shardName, NormalizedNode<?, ?> expectedNode) {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(getSystem().actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(primaryShardInfoReply(getSystem(), shardActorRef)).
+ when(mockActorContext).findPrimaryShardAsync(eq(shardName));
+
+ doReturn(true).when(mockActorContext).isPathLocal(shardActorRef.path().toString());
+
+ ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(txActorRef.path())).
+ when(mockActorContext).actorSelection(txActorRef.path().toString());
+
+ doReturn(Futures.successful(createTransactionReply(txActorRef, DataStoreVersions.CURRENT_VERSION))).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, TransactionType.READ_ONLY));
+
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(txActorRef)), eqSerializedReadData(YangInstanceIdentifier.builder().build()));
}
}
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
+import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
}
+ @Test
+ public void testAdjustRateLimitForUnusedTransaction() {
+ doReturn(commitTimer).when(actorContext).getOperationTimer("one", "commit");
+ doReturn("one").when(actorContext).getDataStoreType();
+
+ Timer commitTimer2 = Mockito.mock(Timer.class);
+ Snapshot commitSnapshot2 = Mockito.mock(Snapshot.class);
+
+ doReturn(commitSnapshot2).when(commitTimer2).getSnapshot();
+
+ doReturn(commitTimer2).when(actorContext).getOperationTimer("two", "commit");
+
+ DatastoreContext.newBuilder().dataStoreType("one").build();
+ DatastoreContext.newBuilder().dataStoreType("two").build();
+
+ doReturn(TimeUnit.MICROSECONDS.toNanos(500) * 1D).when(commitSnapshot).getValue(1 * 0.1);
+
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+ verify(actorContext, never()).setTxCreationLimit(anyDouble());
+
+ Mockito.reset(commitSnapshot);
+
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+ verify(actorContext, never()).setTxCreationLimit(anyDouble());
+
+ System.out.println(""+TimeUnit.SECONDS.toNanos(30)/TimeUnit.MICROSECONDS.toNanos(100));
+
+ doReturn(TimeUnit.MICROSECONDS.toNanos(100) * 1D).when(commitSnapshot2).getValue(1 * 0.1);
+
+ TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+ verify(actorContext).setTxCreationLimit(Matchers.doubleThat(approximately(1000)));
+ }
+
public Matcher<Double> approximately(final double val){
return new BaseMatcher<Double>() {
@Override
public boolean matches(Object o) {
Double aDouble = (Double) o;
- return aDouble > val && aDouble < val+1;
+ return aDouble >= val && aDouble <= val+1;
}
@Override
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.inOrder;
-import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
+import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.HELIUM_2_VERSION;
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import akka.dispatch.Dispatchers;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.MoreExecutors;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import org.mockito.InOrder;
import org.opendaylight.controller.cluster.datastore.AbstractShardTest;
import org.opendaylight.controller.cluster.datastore.Shard;
+import org.opendaylight.controller.cluster.datastore.ShardDataTree;
+import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort;
import org.opendaylight.controller.cluster.datastore.ShardTestKit;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.cluster.raft.Snapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
NormalizedNodeToNodeCodec codec = new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT);
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- store.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree store = InMemoryDataTreeFactory.getInstance().create();
+ store.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(store, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
@Test
public void testHelium2VersionRecovery() throws Exception {
- // Set up the InMemorySnapshotStore.
-
- InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
- testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+ DataTree testStore = InMemoryDataTreeFactory.getInstance().create();
+ testStore.setSchemaContext(SCHEMA_CONTEXT);
writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
- "testConcurrentThreePhaseCommits");
+ "testPreLithiumConcurrentThreePhaseCommits");
waitUntilLeader(shard);
// Setup 3 simulated transactions with mock cohorts backed by real cohorts.
- InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+ ShardDataTree dataStore = shard.underlyingActor().getDataStore();
String transactionID1 = "tx1";
MutableCompositeModification modification1 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
String transactionID2 = "tx2";
MutableCompositeModification modification2 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
modification2);
String transactionID3 = "tx3";
MutableCompositeModification modification3 = new MutableCompositeModification();
- DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
.nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
// Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
- cohort1, modification1, true), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, HELIUM_2_VERSION,
+ cohort1, modification1, true, false), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the ForwardedReadyTransaction for the next 2 Tx's.
- shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
- cohort2, modification2, true), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID2, HELIUM_2_VERSION,
+ cohort2, modification2, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
- cohort3, modification3, true), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID3, HELIUM_2_VERSION,
+ cohort3, modification3, true, false), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
assertEquals(TestModel.ID_QNAME.getLocalName() + " value", 1, idLeaf.get().getValue());
- verifyLastLogIndex(shard, 2);
+ verifyLastApplied(shard, 2);
shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_WRITE;
+import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.WRITE_ONLY;
import akka.actor.ActorRef;
import akka.dispatch.Futures;
+import akka.util.Timeout;
import com.google.common.base.Optional;
import java.util.concurrent.TimeUnit;
+import org.junit.Ignore;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.datastore.AbstractThreePhaseCommitCohort;
import org.opendaylight.controller.cluster.datastore.AbstractTransactionProxyTest;
import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.ThreePhaseCommitCohortProxy;
import org.opendaylight.controller.cluster.datastore.TransactionProxy;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import scala.concurrent.Future;
/**
* Unit tests for backwards compatibility with pre-Lithium versions.
return argThat(matcher);
}
+ private CanCommitTransaction eqCanCommitTransaction(final String transactionID) {
+ ArgumentMatcher<CanCommitTransaction> matcher = new ArgumentMatcher<CanCommitTransaction>() {
+ @Override
+ public boolean matches(Object argument) {
+ return ThreePhaseCommitCohortMessages.CanCommitTransaction.class.equals(argument.getClass()) &&
+ CanCommitTransaction.fromSerializable(argument).getTransactionID().equals(transactionID);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private CommitTransaction eqCommitTransaction(final String transactionID) {
+ ArgumentMatcher<CommitTransaction> matcher = new ArgumentMatcher<CommitTransaction>() {
+ @Override
+ public boolean matches(Object argument) {
+ return ThreePhaseCommitCohortMessages.CommitTransaction.class.equals(argument.getClass()) &&
+ CommitTransaction.fromSerializable(argument).getTransactionID().equals(transactionID);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private Future<Object> readySerializedTxReply(String path, short version) {
+ return Futures.successful(new ReadyTransactionReply(path, version).toSerializable());
+ }
+
private ActorRef testCompatibilityWithHeliumVersion(short version) throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, version);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, version,
+ DefaultShardStrategy.DEFAULT_SHARD);
NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
doReturn(Futures.successful(new DeleteDataReply().toSerializable(version))).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyDeleteData(TestModel.TEST_PATH));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString(), version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
transactionProxy.delete(TestModel.TEST_PATH);
- DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+ AbstractThreePhaseCommitCohort<?> proxy = transactionProxy.ready();
- assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
- ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ doReturn(Futures.successful(CanCommitTransactionReply.YES.toSerializable())).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)),
+ eqCanCommitTransaction(transactionProxy.getIdentifier().toString()), any(Timeout.class));
- verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ doReturn(Futures.successful(new CommitTransactionReply().toSerializable())).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)),
+ eqCommitTransaction(transactionProxy.getIdentifier().toString()), any(Timeout.class));
+
+ Boolean canCommit = proxy.canCommit().get(3, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit.booleanValue());
+
+ proxy.preCommit().get(3, TimeUnit.SECONDS);
+
+ proxy.commit().get(3, TimeUnit.SECONDS);
return actorRef;
}
verify(mockActorContext, Mockito.never()).resolvePath(eq(actorRef.path().toString()),
eq(actorRef.path().toString()));
}
+
+ @Test
+ @Ignore
+ // FIXME: disabled until we can get the primary shard version from the ShardManager as we now skip
+ // creating transaction actors for write-only Tx's.
+ public void testWriteOnlyCompatibilityWithHeliumR2Version() throws Exception {
+ short version = DataStoreVersions.HELIUM_2_VERSION;
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY, version,
+ DefaultShardStrategy.DEFAULT_SHARD);
+
+ NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(Futures.successful(new WriteDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyWriteData(testNode));
+
+ doReturn(readySerializedTxReply(actorRef.path().toString(), version)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+
+ doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
+ eq(actorRef.path().toString()));
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+
+ transactionProxy.write(TestModel.TEST_PATH, testNode);
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+
+ ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+ }
}
SchemaContext schemaContext = TestModel.createTestContext();
Props shardProps = Shard.props(ShardIdentifier.builder().memberName("member-1").
shardName("inventory").type("config").build(),
- Collections.<ShardIdentifier,String>emptyMap(),
+ Collections.<String,String>emptyMap(),
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).build(),
schemaContext).withDispatcher(Dispatchers.DefaultDispatcherId());
SchemaContext schemaContext = TestModel.createTestContext();
Props shardProps = Shard.props(ShardIdentifier.builder().memberName("member-1").
shardName("inventory").type("config").build(),
- Collections.<ShardIdentifier,String>emptyMap(),
+ Collections.<String,String>emptyMap(),
DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).build(),
schemaContext).withDispatcher(Dispatchers.DefaultDispatcherId());
YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
- BatchedModifications batched = new BatchedModifications(DataStoreVersions.CURRENT_VERSION);
+ BatchedModifications batched = new BatchedModifications("tx1", DataStoreVersions.CURRENT_VERSION, "txChain");
batched.addModification(new WriteModification(writePath, writeData));
batched.addModification(new MergeModification(mergePath, mergeData));
batched.addModification(new DeleteModification(deletePath));
+ batched.setReady(true);
+ batched.setTotalMessagesSent(5);
BatchedModifications clone = (BatchedModifications) SerializationUtils.clone(
(Serializable) batched.toSerializable());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
+ assertEquals("getTransactionID", "tx1", clone.getTransactionID());
+ assertEquals("getTransactionChainID", "txChain", clone.getTransactionChainID());
+ assertEquals("isReady", true, clone.isReady());
+ assertEquals("getTotalMessagesSent", 5, clone.getTotalMessagesSent());
assertEquals("getModifications size", 3, clone.getModifications().size());
DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, delete.getVersion());
assertEquals("getPath", deletePath, delete.getPath());
+
+ // Test with different params.
+
+ batched = new BatchedModifications("tx2", (short)10, null);
+
+ clone = (BatchedModifications) SerializationUtils.clone((Serializable) batched.toSerializable());
+
+ assertEquals("getVersion", 10, clone.getVersion());
+ assertEquals("getTransactionID", "tx2", clone.getTransactionID());
+ assertEquals("getTransactionChainID", "", clone.getTransactionChainID());
+ assertEquals("isReady", false, clone.isReady());
+
+ assertEquals("getModifications size", 0, clone.getModifications().size());
+
}
@Test
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static org.junit.Assert.assertEquals;
+import java.io.Serializable;
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+
+/**
+ * Unit tests for ReadyTransactionReply.
+ *
+ * @author Thomas Pantelis
+ */
+public class ReadyTransactionReplyTest {
+
+ @Test
+ public void testSerialization() {
+ String cohortPath = "cohort path";
+ ReadyTransactionReply expected = new ReadyTransactionReply(cohortPath);
+
+ Object serialized = expected.toSerializable();
+ assertEquals("Serialized type", ReadyTransactionReply.class, serialized.getClass());
+
+ ReadyTransactionReply actual = ReadyTransactionReply.fromSerializable(SerializationUtils.clone(
+ (Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
+ assertEquals("getCohortPath", cohortPath, actual.getCohortPath());
+ }
+
+ @Test
+ public void testSerializationWithPreLithiumVersion() throws Exception {
+ String cohortPath = "cohort path";
+ ReadyTransactionReply expected = new ReadyTransactionReply(cohortPath, DataStoreVersions.HELIUM_2_VERSION);
+
+ Object serialized = expected.toSerializable();
+ assertEquals("Serialized type", ShardTransactionMessages.ReadyTransactionReply.class, serialized.getClass());
+
+ ReadyTransactionReply actual = ReadyTransactionReply.fromSerializable(SerializationUtils.clone(
+ (Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.HELIUM_2_VERSION, actual.getVersion());
+ assertEquals("getCohortPath", cohortPath, actual.getCohortPath());
+ }
+}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static junit.framework.TestCase.assertEquals;
+import akka.actor.Actor;
+import akka.serialization.Serialization;
+import akka.testkit.TestActorRef;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
+import org.opendaylight.controller.cluster.raft.TestActorFactory;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
+
+public class RegisterChangeListenerReplyTest extends AbstractActorTest {
+
+ private TestActorFactory factory;
+
+
+ @Before
+ public void setUp(){
+ factory = new TestActorFactory(getSystem());
+ }
+
+ @After
+ public void shutDown(){
+ factory.close();
+ }
+
+ @Test
+ public void testToSerializable(){
+ TestActorRef<Actor> testActor = factory.createTestActor(MessageCollectorActor.props());
+
+ RegisterChangeListenerReply registerChangeListenerReply = new RegisterChangeListenerReply(testActor);
+
+ ListenerRegistrationMessages.RegisterChangeListenerReply serialized
+ = registerChangeListenerReply.toSerializable();
+
+ assertEquals(Serialization.serializedActorPath(testActor), serialized.getListenerRegistrationPath());
+ }
+
+ @Test
+ public void testFromSerializable(){
+ TestActorRef<Actor> testActor = factory.createTestActor(MessageCollectorActor.props());
+
+ RegisterChangeListenerReply registerChangeListenerReply = new RegisterChangeListenerReply(testActor);
+
+ ListenerRegistrationMessages.RegisterChangeListenerReply serialized
+ = registerChangeListenerReply.toSerializable();
+
+
+ RegisterChangeListenerReply fromSerialized
+ = RegisterChangeListenerReply.fromSerializable(getSystem(), serialized);
+
+ assertEquals(testActor.path().toString(), fromSerialized.getListenerRegistrationPath().toString());
+ }
+
+}
\ No newline at end of file
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static junit.framework.TestCase.assertEquals;
+import akka.actor.Actor;
+import akka.serialization.Serialization;
+import akka.testkit.TestActorRef;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
+import org.opendaylight.controller.cluster.raft.TestActorFactory;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
+
+public class RegisterChangeListenerTest extends AbstractActorTest {
+
+ private TestActorFactory factory;
+
+ @Before
+ public void setUp(){
+ factory = new TestActorFactory(getSystem());
+ }
+
+ @After
+ public void shutDown(){
+ factory.close();
+ }
+
+ @Test
+ public void testToSerializable(){
+ TestActorRef<Actor> testActor = factory.createTestActor(MessageCollectorActor.props());
+ RegisterChangeListener registerChangeListener = new RegisterChangeListener(TestModel.TEST_PATH, testActor
+ , AsyncDataBroker.DataChangeScope.BASE);
+
+ ListenerRegistrationMessages.RegisterChangeListener serialized
+ = registerChangeListener.toSerializable();
+
+ NormalizedNodeMessages.InstanceIdentifier path = serialized.getInstanceIdentifierPath();
+
+ assertEquals("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", path.getCode(0));
+ assertEquals(Serialization.serializedActorPath(testActor), serialized.getDataChangeListenerActorPath());
+ assertEquals(AsyncDataBroker.DataChangeScope.BASE.ordinal(), serialized.getDataChangeScope());
+
+ }
+
+ @Test
+ public void testFromSerializable(){
+ TestActorRef<Actor> testActor = factory.createTestActor(MessageCollectorActor.props());
+ RegisterChangeListener registerChangeListener = new RegisterChangeListener(TestModel.TEST_PATH, testActor
+ , AsyncDataBroker.DataChangeScope.SUBTREE);
+
+ ListenerRegistrationMessages.RegisterChangeListener serialized
+ = registerChangeListener.toSerializable();
+
+
+ RegisterChangeListener fromSerialized = RegisterChangeListener.fromSerializable(getSystem(), serialized);
+
+ assertEquals(TestModel.TEST_PATH, registerChangeListener.getPath());
+ assertEquals(testActor.path().toString(), fromSerialized.getDataChangeListenerPath().toString());
+ assertEquals(AsyncDataBroker.DataChangeScope.SUBTREE, fromSerialized.getScope());
+
+
+ }
+}
\ No newline at end of file
package org.opendaylight.controller.cluster.datastore.modification;
import static org.junit.Assert.assertEquals;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
assertEquals("getPath", writePath, write.getPath());
assertEquals("getData", writeData, write.getData());
- ModificationPayload cloned =
- (ModificationPayload) SerializationUtils.clone(payload);
+ ModificationPayload cloned = SerializationUtils.clone(payload);
deserialized = (MutableCompositeModification) payload.getModification();
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Optional;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
import com.typesafe.config.ConfigFactory;
+import java.util.Arrays;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.time.StopWatch;
import org.junit.Assert;
import org.junit.Test;
+import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.Configuration;
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
-import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
+import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
+import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
+import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
public class ActorContextTest extends AbstractActorTest{
+ static final Logger log = LoggerFactory.getLogger(ActorContextTest.class);
+
+ private static class TestMessage {
+ }
+
private static class MockShardManager extends UntypedActor {
private final boolean found;
private final ActorRef actorRef;
+ private final Map<String,Object> findPrimaryResponses = Maps.newHashMap();
private MockShardManager(boolean found, ActorRef actorRef){
}
@Override public void onReceive(Object message) throws Exception {
+ if(message instanceof FindPrimary) {
+ FindPrimary fp = (FindPrimary)message;
+ Object resp = findPrimaryResponses.get(fp.getShardName());
+ if(resp == null) {
+ log.error("No expected FindPrimary response found for shard name {}", fp.getShardName());
+ } else {
+ getSender().tell(resp, getSelf());
+ }
+
+ return;
+ }
+
if(found){
getSender().tell(new LocalShardFound(actorRef), getSelf());
} else {
}
}
+ void addFindPrimaryResp(String shardName, Object resp) {
+ findPrimaryResponses.put(shardName, resp);
+ }
+
private static Props props(final boolean found, final ActorRef actorRef){
return Props.create(new MockShardManagerCreator(found, actorRef) );
}
+ private static Props props(){
+ return Props.create(new MockShardManagerCreator() );
+ }
+
@SuppressWarnings("serial")
private static class MockShardManagerCreator implements Creator<MockShardManager> {
final boolean found;
final ActorRef actorRef;
+ MockShardManagerCreator() {
+ this.found = false;
+ this.actorRef = null;
+ }
+
MockShardManagerCreator(boolean found, ActorRef actorRef) {
this.found = found;
this.actorRef = actorRef;
@Test
public void testRateLimiting(){
- DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
-
- doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
- doReturn("config").when(mockDataStoreContext).getDataStoreType();
- doReturn(Timeout.apply(100, TimeUnit.MILLISECONDS)).when(mockDataStoreContext).getShardLeaderElectionTimeout();
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ transactionCreationInitialRateLimit(155L).build();
ActorContext actorContext =
new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
- mock(Configuration.class), mockDataStoreContext);
+ mock(Configuration.class), dataStoreContext);
// Check that the initial value is being picked up from DataStoreContext
- assertEquals(mockDataStoreContext.getTransactionCreationInitialRateLimit(), actorContext.getTxCreationLimit(), 1e-15);
+ assertEquals(dataStoreContext.getTransactionCreationInitialRateLimit(), actorContext.getTxCreationLimit(), 1e-15);
actorContext.setTxCreationLimit(1.0);
@Test
public void testClientDispatcherIsGlobalDispatcher(){
-
- DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
-
- doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
- doReturn("config").when(mockDataStoreContext).getDataStoreType();
- doReturn(Timeout.apply(100, TimeUnit.MILLISECONDS)).when(mockDataStoreContext).getShardLeaderElectionTimeout();
-
ActorContext actorContext =
new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
- mock(Configuration.class), mockDataStoreContext);
+ mock(Configuration.class), DatastoreContext.newBuilder().build());
assertEquals(getSystem().dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
@Test
public void testClientDispatcherIsNotGlobalDispatcher(){
-
- DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
-
- doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
- doReturn("config").when(mockDataStoreContext).getDataStoreType();
- doReturn(Timeout.apply(100, TimeUnit.MILLISECONDS)).when(mockDataStoreContext).getShardLeaderElectionTimeout();
-
ActorSystem actorSystem = ActorSystem.create("with-custom-dispatchers", ConfigFactory.load("application-with-custom-dispatchers.conf"));
ActorContext actorContext =
new ActorContext(actorSystem, mock(ActorRef.class), mock(ClusterWrapper.class),
- mock(Configuration.class), mockDataStoreContext);
+ mock(Configuration.class), DatastoreContext.newBuilder().build());
assertNotEquals(actorSystem.dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
}
@Test
- public void testFindPrimaryShardAsyncPrimaryFound() throws Exception {
+ public void testFindPrimaryShardAsyncRemotePrimaryFound() throws Exception {
TestActorRef<MessageCollectorActor> shardManager =
TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
- DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
-
- doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
- doReturn("config").when(mockDataStoreContext).getDataStoreType();
- doReturn(Timeout.apply(100, TimeUnit.MILLISECONDS)).when(mockDataStoreContext).getShardLeaderElectionTimeout();
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
+ final String expPrimaryPath = "akka://test-system/find-primary-shard";
ActorContext actorContext =
new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
- mock(Configuration.class), mockDataStoreContext) {
+ mock(Configuration.class), dataStoreContext) {
@Override
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
- return Futures.successful((Object) new PrimaryFound("akka://test-system/test"));
+ return Futures.successful((Object) new RemotePrimaryShardFound(expPrimaryPath));
}
};
-
- Future<ActorSelection> foobar = actorContext.findPrimaryShardAsync("foobar");
- ActorSelection actual = Await.result(foobar, Duration.apply(5000, TimeUnit.MILLISECONDS));
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
+ PrimaryShardInfo actual = Await.result(foobar, Duration.apply(5000, TimeUnit.MILLISECONDS));
assertNotNull(actual);
+ assertEquals("LocalShardDataTree present", false, actual.getLocalShardDataTree().isPresent());
+ assertTrue("Unexpected PrimaryShardActor path " + actual.getPrimaryShardActor().path(),
+ expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
- Future<ActorSelection> cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
- ActorSelection cachedSelection = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
+ PrimaryShardInfo cachedInfo = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
- assertEquals(cachedSelection, actual);
+ assertEquals(cachedInfo, actual);
// Wait for 200 Milliseconds. The cached entry should have been removed.
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
- cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
assertNull(cached);
+ }
+
+ @Test
+ public void testFindPrimaryShardAsyncLocalPrimaryFound() throws Exception {
+
+ TestActorRef<MessageCollectorActor> shardManager =
+ TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
+
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
+
+ final DataTree mockDataTree = Mockito.mock(DataTree.class);
+ final String expPrimaryPath = "akka://test-system/find-primary-shard";
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
+ mock(Configuration.class), dataStoreContext) {
+ @Override
+ protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
+ return Futures.successful((Object) new LocalPrimaryShardFound(expPrimaryPath, mockDataTree));
+ }
+ };
+
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
+ PrimaryShardInfo actual = Await.result(foobar, Duration.apply(5000, TimeUnit.MILLISECONDS));
+
+ assertNotNull(actual);
+ assertEquals("LocalShardDataTree present", true, actual.getLocalShardDataTree().isPresent());
+ assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().get());
+ assertTrue("Unexpected PrimaryShardActor path " + actual.getPrimaryShardActor().path(),
+ expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
+
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
+
+ PrimaryShardInfo cachedInfo = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
+
+ assertEquals(cachedInfo, actual);
+ // Wait for 200 Milliseconds. The cached entry should have been removed.
+
+ Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+
+ cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
+
+ assertNull(cached);
}
@Test
TestActorRef<MessageCollectorActor> shardManager =
TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
- DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
-
- doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
- doReturn("config").when(mockDataStoreContext).getDataStoreType();
- doReturn(Timeout.apply(100, TimeUnit.MILLISECONDS)).when(mockDataStoreContext).getShardLeaderElectionTimeout();
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
ActorContext actorContext =
new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
- mock(Configuration.class), mockDataStoreContext) {
+ mock(Configuration.class), dataStoreContext) {
@Override
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
- return Futures.successful((Object) new PrimaryNotFound("foobar"));
+ return Futures.successful((Object) new PrimaryNotFoundException("not found"));
}
};
- Future<ActorSelection> foobar = actorContext.findPrimaryShardAsync("foobar");
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
try {
Await.result(foobar, Duration.apply(100, TimeUnit.MILLISECONDS));
}
- Future<ActorSelection> cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
assertNull(cached);
-
}
@Test
TestActorRef<MessageCollectorActor> shardManager =
TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
- DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
-
- doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
- doReturn("config").when(mockDataStoreContext).getDataStoreType();
- doReturn(Timeout.apply(100, TimeUnit.MILLISECONDS)).when(mockDataStoreContext).getShardLeaderElectionTimeout();
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().dataStoreType("config").
+ shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
ActorContext actorContext =
new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
- mock(Configuration.class), mockDataStoreContext) {
+ mock(Configuration.class), dataStoreContext) {
@Override
protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
- return Futures.successful((Object) new ActorNotInitialized());
+ return Futures.successful((Object) new NotInitializedException("not iniislized"));
}
};
- Future<ActorSelection> foobar = actorContext.findPrimaryShardAsync("foobar");
+ Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
try {
Await.result(foobar, Duration.apply(100, TimeUnit.MILLISECONDS));
}
- Future<ActorSelection> cached = actorContext.getPrimaryShardActorSelectionCache().getIfPresent("foobar");
+ Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
assertNull(cached);
+ }
+
+ @Test
+ public void testBroadcast() {
+ new JavaTestKit(getSystem()) {{
+ ActorRef shardActorRef1 = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ ActorRef shardActorRef2 = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ TestActorRef<MockShardManager> shardManagerActorRef = TestActorRef.create(getSystem(), MockShardManager.props());
+ MockShardManager shardManagerActor = shardManagerActorRef.underlyingActor();
+ shardManagerActor.addFindPrimaryResp("shard1", new RemotePrimaryShardFound(shardActorRef1.path().toString()));
+ shardManagerActor.addFindPrimaryResp("shard2", new RemotePrimaryShardFound(shardActorRef2.path().toString()));
+ shardManagerActor.addFindPrimaryResp("shard3", new NoShardLeaderException("not found"));
+
+ Configuration mockConfig = mock(Configuration.class);
+ doReturn(Sets.newLinkedHashSet(Arrays.asList("shard1", "shard2", "shard3"))).
+ when(mockConfig).getAllShardNames();
+
+ ActorContext actorContext = new ActorContext(getSystem(), shardManagerActorRef,
+ mock(ClusterWrapper.class), mockConfig,
+ DatastoreContext.newBuilder().shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build());
+ actorContext.broadcast(new TestMessage());
+
+ expectFirstMatching(shardActorRef1, TestMessage.class);
+ expectFirstMatching(shardActorRef2, TestMessage.class);
+ }};
}
+ private <T> T expectFirstMatching(ActorRef actor, Class<T> clazz) {
+ int count = 5000 / 50;
+ for(int i = 0; i < count; i++) {
+ try {
+ T message = (T) MessageCollectorActor.getFirstMatching(actor, clazz);
+ if(message != null) {
+ return message;
+ }
+ } catch (Exception e) {}
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Did not receive message of type " + clazz);
+ return null;
+ }
}
import akka.actor.ActorRef;
import akka.actor.UntypedActor;
-
import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
* </p>
*/
public class MessageCollectorActor extends UntypedActor {
- private List<Object> messages = new ArrayList<>();
+ private final List<Object> messages = new ArrayList<>();
@Override public void onReceive(Object message) throws Exception {
if(message instanceof String){
if("messages".equals(message)){
- getSender().tell(new ArrayList(messages), getSelf());
+ getSender().tell(new ArrayList<>(messages), getSelf());
}
} else {
messages.add(message);
}
}
+ public void clear() {
+ messages.clear();
+ }
+
public static List<Object> getAllMessages(ActorRef actor) throws Exception {
FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
Timeout operationTimeout = new Timeout(operationDuration);
return output;
}
+ public static <T> T expectFirstMatching(ActorRef actor, Class<T> clazz) {
+ int count = 5000 / 50;
+ for(int i = 0; i < count; i++) {
+ try {
+ T message = (T) getFirstMatching(actor, clazz);
+ if(message != null) {
+ return message;
+ }
+ } catch (Exception e) {}
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Did not receive message of type " + clazz);
+ return null;
+ }
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
-import com.google.common.base.Optional;
public class MockActorContext extends ActorContext {
return executeRemoteOperationResponse;
}
- @Override public Optional<ActorSelection> findPrimaryShard(String shardName) {
- return Optional.absent();
- }
-
public void setExecuteShardOperationResponse(Object response){
executeShardOperationResponse = response;
}
import akka.cluster.ClusterEvent;
import akka.cluster.MemberStatus;
import akka.cluster.UniqueAddress;
-import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
-import scala.collection.JavaConversions;
import java.util.HashSet;
import java.util.Set;
+import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
+import scala.collection.JavaConversions;
public class MockClusterWrapper implements ClusterWrapper{
private Address selfAddress = new Address("akka.tcp", "test", "127.0.0.1", 2550);
+ private String currentMemberName = "member-1";
+
+ public MockClusterWrapper() {
+ }
+
+ public MockClusterWrapper(String currentMemberName) {
+ this.currentMemberName = currentMemberName;
+ }
@Override
public void subscribeToMemberEvents(ActorRef actorRef) {
@Override
public String getCurrentMemberName() {
- return "member-1";
+ return currentMemberName;
}
@Override
}
- private static ClusterEvent.MemberUp createMemberUp(String memberName, String address) {
+ public static ClusterEvent.MemberUp createMemberUp(String memberName, String address) {
akka.cluster.UniqueAddress uniqueAddress = new UniqueAddress(
AddressFromURIString.parse(address), 55);
package org.opendaylight.controller.cluster.datastore.utils;
import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
public class MockConfiguration implements Configuration{
- @Override public List<String> getMemberShardNames(final String memberName) {
- return Arrays.asList("default");
+ private Map<String, List<String>> shardMembers = ImmutableMap.<String, List<String>>builder().
+ put("default", Arrays.asList("member-1", "member-2")).
+ /*put("astronauts", Arrays.asList("member-2", "member-3")).*/build();
+
+ public MockConfiguration() {
+ }
+
+ public MockConfiguration(Map<String, List<String>> shardMembers) {
+ this.shardMembers = shardMembers;
}
- @Override public Optional<String> getModuleNameFromNameSpace(
+ @Override
+ public List<String> getMemberShardNames(final String memberName) {
+ return new ArrayList<>(shardMembers.keySet());
+ }
+ @Override
+ public Optional<String> getModuleNameFromNameSpace(
final String nameSpace) {
return Optional.absent();
}
return Arrays.asList("member-2", "member-3");
}
- return Collections.emptyList();
+ List<String> members = shardMembers.get(shardName);
+ return members != null ? members : Collections.<String>emptyList();
}
@Override public Set<String> getAllShardNames() {
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+import javax.annotation.Nonnull;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
+
+ private final List<Collection<DataTreeCandidate>> changeList =
+ Collections.synchronizedList(Lists.<Collection<DataTreeCandidate>>newArrayList());
+
+ private volatile CountDownLatch changeLatch;
+ private int expChangeEventCount;
+
+ public MockDataTreeChangeListener(int expChangeEventCount) {
+ reset(expChangeEventCount);
+ }
+
+ public void reset(int expChangeEventCount) {
+ changeLatch = new CountDownLatch(expChangeEventCount);
+ this.expChangeEventCount = expChangeEventCount;
+ changeList.clear();
+ }
+
+ @Override
+ public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
+ changeList.add(changes);
+ changeLatch.countDown();
+ }
+
+ public void waitForChangeEvents() {
+ boolean done = Uninterruptibles.awaitUninterruptibly(changeLatch, 5, TimeUnit.SECONDS);
+ if(!done) {
+ fail(String.format("Missing change notifications. Expected: %d. Actual: %d",
+ expChangeEventCount, (expChangeEventCount - changeLatch.getCount())));
+ }
+ }
+
+ public void expectNoMoreChanges(String assertMsg) {
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+ assertEquals(assertMsg, expChangeEventCount, changeList.size());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import com.google.common.base.Optional;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import org.junit.Test;
+import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
+import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class NormalizedNodeAggregatorTest {
+
+ @Test
+ public void testAggregate() throws InterruptedException, ExecutionException, ReadFailedException, DataValidationFailedException {
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
+
+ Optional<NormalizedNode<?, ?>> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.builder().build(),
+ Lists.newArrayList(
+ Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode1, schemaContext)),
+ Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode2, schemaContext))),
+ schemaContext);
+
+
+ NormalizedNode<?,?> normalizedNode = optional.get();
+
+ assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+
+ Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+
+ for(NormalizedNode<?,?> node : collection){
+ assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
+ }
+
+ assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
+ findChildWithQName(collection, TestModel.TEST_QNAME) != null);
+
+ assertEquals(expectedNode1, findChildWithQName(collection, TestModel.TEST_QNAME));
+
+ assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
+ findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
+
+ assertEquals(expectedNode2, findChildWithQName(collection, CarsModel.BASE_QNAME));
+
+ }
+
+ public static NormalizedNode<?,?> getRootNode(NormalizedNode<?, ?> moduleNode, SchemaContext schemaContext) throws ReadFailedException, ExecutionException, InterruptedException {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", Executors.newSingleThreadExecutor());
+ store.onGlobalContextUpdated(schemaContext);
+
+ DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
+
+ writeTransaction.merge(YangInstanceIdentifier.builder().node(moduleNode.getNodeType()).build(), moduleNode);
+
+ DOMStoreThreePhaseCommitCohort ready = writeTransaction.ready();
+
+ ready.canCommit().get();
+ ready.preCommit().get();
+ ready.commit().get();
+
+ DOMStoreReadTransaction readTransaction = store.newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read = readTransaction.read(YangInstanceIdentifier.builder().build());
+
+ Optional<NormalizedNode<?, ?>> nodeOptional = read.checkedGet();
+
+ return nodeOptional.get();
+ }
+
+ public static NormalizedNode<?,?> findChildWithQName(Collection<NormalizedNode<?, ?>> collection, QName qName) {
+ for(NormalizedNode<?,?> node : collection){
+ if(node.getNodeType().equals(qName)){
+ return node;
+ }
+ }
+
+ return null;
+ }
+
+}
\ No newline at end of file
*/
package org.opendaylight.controller.md.cluster.datastore.model;
+import com.google.common.io.Resources;
+import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
-import java.util.Set;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.parser.api.YangSyntaxErrorException;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
public class TestModel {
- public static final QName TEST_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13",
- "test");
-
- public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
- public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
- public static final QName OUTER_CHOICE_QNAME = QName.create(TEST_QNAME, "outer-choice");
- public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
- public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
- public static final QName DESC_QNAME = QName.create(TEST_QNAME, "desc");
- public static final QName VALUE_QNAME = QName.create(TEST_QNAME, "value");
- private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
-
- public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
- public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
- node(OUTER_LIST_QNAME).build();
- public static final YangInstanceIdentifier INNER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
- node(OUTER_LIST_QNAME).node(INNER_LIST_QNAME).build();
- public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
- public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
-
-
- public static final InputStream getDatastoreTestInputStream() {
- return getInputStream(DATASTORE_TEST_YANG);
- }
-
- private static InputStream getInputStream(final String resourceName) {
- return TestModel.class.getResourceAsStream(DATASTORE_TEST_YANG);
- }
-
- public static SchemaContext createTestContext() {
- YangParserImpl parser = new YangParserImpl();
- Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(getDatastoreTestInputStream()));
- return parser.resolveSchemaContext(modules);
- }
+ public static final QName TEST_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13",
+ "test");
+
+ public static final QName JUNK_QNAME = QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:junk", "2014-03-13",
+ "junk");
+
+
+ public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+ public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+ public static final QName OUTER_CHOICE_QNAME = QName.create(TEST_QNAME, "outer-choice");
+ public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+ public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+ public static final QName DESC_QNAME = QName.create(TEST_QNAME, "desc");
+ public static final QName VALUE_QNAME = QName.create(TEST_QNAME, "value");
+ private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+ public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ public static final YangInstanceIdentifier JUNK_PATH = YangInstanceIdentifier.of(JUNK_QNAME);
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).build();
+ public static final YangInstanceIdentifier INNER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).node(INNER_LIST_QNAME).build();
+ public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
+ public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
+
+
+ public static final InputStream getDatastoreTestInputStream() {
+ return TestModel.class.getResourceAsStream(DATASTORE_TEST_YANG);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ try {
+ return parser.parseSources(Collections.singleton(Resources.asByteSource(TestModel.class.getResource(DATASTORE_TEST_YANG))));
+ } catch (IOException | YangSyntaxErrorException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
}
}
in-memory-journal {
- class = "org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal"
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
}
in-memory-snapshot-store {
# Class name of the plugin.
- class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore"
# Dispatcher for the plugin actor.
plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
}
mailbox-capacity = 1000
mailbox-push-timeout-time = 100ms
}
+
+Member1 {
+ bounded-mailbox {
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+ mailbox-capacity = 1000
+ mailbox-push-timeout-time = 100ms
+ }
+
+ in-memory-journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
+
+ in-memory-snapshot-store {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore"
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+ }
+
+ akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+ persistence.journal.plugin = "in-memory-journal"
+
+ loglevel = "DEBUG"
+
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+
+ serializers {
+ java = "akka.serialization.JavaSerializer"
+ proto = "akka.remote.serialization.ProtobufSerializer"
+ }
+
+ serialization-bindings {
+ "com.google.protobuf.Message" = proto
+ }
+ }
+ remote {
+ log-remote-lifecycle-events = off
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2558
+ }
+ }
+
+ cluster {
+ auto-down-unreachable-after = 100s
+
+ roles = [
+ "member-1"
+ ]
+ }
+ }
+}
+
+Member2 {
+ bounded-mailbox {
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+ mailbox-capacity = 1000
+ mailbox-push-timeout-time = 100ms
+ }
+
+ in-memory-journal {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+ }
+
+ in-memory-snapshot-store {
+ class = "org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore"
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+ }
+
+ akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+ persistence.journal.plugin = "in-memory-journal"
+
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+
+ serializers {
+ java = "akka.serialization.JavaSerializer"
+ proto = "akka.remote.serialization.ProtobufSerializer"
+ }
+
+ serialization-bindings {
+ "com.google.protobuf.Message" = proto
+ }
+ }
+ remote {
+ log-remote-lifecycle-events = off
+ netty.tcp {
+ hostname = "127.0.0.1"
+ port = 2559
+ }
+ }
+
+ cluster {
+ auto-down-unreachable-after = 100s
+
+ roles = [
+ "member-2"
+ ]
+ }
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-core-api</artifactId>
<packaging>bundle</packaging>
*/
package org.opendaylight.controller.md.sal.dom.api;
+import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import java.io.Serializable;
import java.util.Iterator;
return oi.hasNext() ? -1 : 0;
}
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("datastore", datastoreType).add("root", rootIdentifier).toString();
+ }
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-broker-impl</artifactId>
<packaging>bundle</packaging>
import com.google.common.util.concurrent.ListenableFuture;
import com.lmax.disruptor.EventHandler;
import com.lmax.disruptor.InsufficientCapacityException;
-import com.lmax.disruptor.SleepingWaitStrategy;
+import com.lmax.disruptor.PhasedBackoffWaitStrategy;
import com.lmax.disruptor.WaitStrategy;
import com.lmax.disruptor.dsl.Disruptor;
import com.lmax.disruptor.dsl.ProducerType;
*/
public final class DOMNotificationRouter implements AutoCloseable, DOMNotificationPublishService, DOMNotificationService {
private static final ListenableFuture<Void> NO_LISTENERS = Futures.immediateFuture(null);
- private static final WaitStrategy DEFAULT_STRATEGY = new SleepingWaitStrategy();
+ private static final WaitStrategy DEFAULT_STRATEGY = PhasedBackoffWaitStrategy.withLock(1L, 30L, TimeUnit.MILLISECONDS);
private static final EventHandler<DOMNotificationRouterEvent> DISPATCH_NOTIFICATIONS = new EventHandler<DOMNotificationRouterEvent>() {
@Override
public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) throws Exception {
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AbstractCheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
* A {@link Future} used to report the status of an future {@link java.util.concurrent.Future}.
*/
final class PingPongFuture extends AbstractCheckedFuture<Void, TransactionCommitFailedException> {
- protected PingPongFuture(final ListenableFuture<Void> delegate) {
- super(delegate);
- }
+ protected PingPongFuture(final ListenableFuture<Void> delegate) {
+ super(delegate);
+ }
- @Override
- protected TransactionCommitFailedException mapException(final Exception e) {
- Preconditions.checkArgument(e instanceof TransactionCommitFailedException);
- return (TransactionCommitFailedException) e;
+ @Override
+ protected TransactionCommitFailedException mapException(final Exception e) {
+ if (e.getCause() instanceof TransactionCommitFailedException){
+ return (TransactionCommitFailedException) e.getCause();
+ } else {
+ return new TransactionCommitFailedException(e.getMessage(), e.getCause(), null);
}
+ }
}
+
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertTrue;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DOMDataTreeListenerTest {
+
+ private SchemaContext schemaContext;
+ private AbstractDOMDataBroker domBroker;
+ private ListeningExecutorService executor;
+ private ExecutorService futureExecutor;
+ private CommitExecutorService commitExecutor;
+
+ private static final DataContainerChild<?, ?> OUTER_LIST = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1))
+ .build();
+
+ private static final DataContainerChild<?, ?> OUTER_LIST_2 = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2))
+ .build();
+
+ private static final NormalizedNode<?, ?> TEST_CONTAINER = Builders.containerBuilder()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(OUTER_LIST)
+ .build();
+
+ private static final NormalizedNode<?, ?> TEST_CONTAINER_2 = Builders.containerBuilder()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+ .withChild(OUTER_LIST_2)
+ .build();
+
+ private static DOMDataTreeIdentifier ROOT_DATA_TREE_ID = new DOMDataTreeIdentifier(
+ LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
+
+ private static DOMDataTreeIdentifier OUTER_LIST_DATA_TREE_ID = new DOMDataTreeIdentifier(
+ LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH);
+
+ @Before
+ public void setupStore() {
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.newDirectExecutorService());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.newDirectExecutorService());
+ schemaContext = TestModel.createTestContext();
+
+ operStore.onGlobalContextUpdated(schemaContext);
+ configStore.onGlobalContextUpdated(schemaContext);
+
+ ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder() //
+ .put(CONFIGURATION, configStore) //
+ .put(OPERATIONAL, operStore) //
+ .build();
+
+ commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
+ futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
+ executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
+ domBroker = new SerializedDOMDataBroker(stores, executor);
+ }
+
+ @After
+ public void tearDown() {
+ if (executor != null) {
+ executor.shutdownNow();
+ }
+
+ if (futureExecutor != null) {
+ futureExecutor.shutdownNow();
+ }
+ }
+
+ @Test
+ public void writeContainerEmptyTreeTest() throws InterruptedException {
+ CountDownLatch latch = new CountDownLatch(1);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+
+ final DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(1, listener.getReceivedChanges().size());
+ final Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void replaceContainerContainerInTreeTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER_2);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(TEST_CONTAINER, TEST_CONTAINER_2, ModificationType.WRITE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void deleteContainerContainerInTreeTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(TEST_CONTAINER, null, ModificationType.DELETE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void replaceChildListContainerInTreeTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH, OUTER_LIST_2);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(TEST_CONTAINER, TEST_CONTAINER_2, ModificationType.SUBTREE_MODIFIED, candidateRoot);
+ final DataTreeCandidateNode modifiedChild = candidateRoot.getModifiedChild(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME));
+ assertNotNull(modifiedChild);
+ checkChange(OUTER_LIST, OUTER_LIST_2, ModificationType.WRITE, modifiedChild);
+ listenerReg.close();
+ }
+
+ @Test
+ public void rootModificationChildListenerTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(OUTER_LIST_DATA_TREE_ID, listener);
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER_2);
+ writeTx.submit().checkedGet();
+
+ latch.await(1, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, OUTER_LIST, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(OUTER_LIST, OUTER_LIST_2, ModificationType.WRITE, candidateRoot);
+ listenerReg.close();
+ }
+
+ @Test
+ public void listEntryChangeNonRootRegistrationTest() throws InterruptedException, TransactionCommitFailedException {
+ CountDownLatch latch = new CountDownLatch(2);
+
+ DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
+ assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!",
+ dataTreeChangeService);
+
+ DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
+ writeTx.submit().checkedGet();
+
+ final TestDataTreeListener listener = new TestDataTreeListener(latch);
+ final ListenerRegistration<TestDataTreeListener> listenerReg =
+ dataTreeChangeService.registerDataTreeChangeListener(OUTER_LIST_DATA_TREE_ID, listener);
+
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId1 =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId2 =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2);
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId3 =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 3);
+
+ final MapEntryNode outerListEntry1 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
+ final MapEntryNode outerListEntry2 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2);
+ final MapEntryNode outerListEntry3 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 3);
+
+ final MapNode listAfter = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+ .withChild(outerListEntry2)
+ .withChild(outerListEntry3)
+ .build();
+
+ writeTx = domBroker.newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId1));
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId2),
+ outerListEntry2);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId3),
+ outerListEntry3);
+ writeTx.submit();
+
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertEquals(2, listener.getReceivedChanges().size());
+ Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
+ assertEquals(1, changes.size());
+
+ DataTreeCandidate candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ DataTreeCandidateNode candidateRoot = candidate.getRootNode();
+ checkChange(null, OUTER_LIST, ModificationType.WRITE, candidateRoot);
+
+ changes = listener.getReceivedChanges().get(1);
+ assertEquals(1, changes.size());
+
+ candidate = changes.iterator().next();
+ assertNotNull(candidate);
+ candidateRoot = candidate.getRootNode();
+ checkChange(OUTER_LIST, listAfter, ModificationType.SUBTREE_MODIFIED, candidateRoot);
+ final DataTreeCandidateNode entry1Canditate = candidateRoot.getModifiedChild(outerListEntryId1);
+ checkChange(outerListEntry1, null, ModificationType.DELETE, entry1Canditate);
+ final DataTreeCandidateNode entry2Canditate = candidateRoot.getModifiedChild(outerListEntryId2);
+ checkChange(null, outerListEntry2, ModificationType.WRITE, entry2Canditate);
+ final DataTreeCandidateNode entry3Canditate = candidateRoot.getModifiedChild(outerListEntryId3);
+ checkChange(null, outerListEntry3, ModificationType.WRITE, entry3Canditate);
+ listenerReg.close();
+ }
+
+ private static void checkChange(NormalizedNode<?, ?> expectedBefore,
+ NormalizedNode<?, ?> expectedAfter,
+ ModificationType expectedMod,
+ DataTreeCandidateNode candidateNode) {
+ if (expectedBefore != null) {
+ assertTrue(candidateNode.getDataBefore().isPresent());
+ assertEquals(expectedBefore, candidateNode.getDataBefore().get());
+ } else {
+ assertFalse(candidateNode.getDataBefore().isPresent());
+ }
+
+ if (expectedAfter != null) {
+ assertTrue(candidateNode.getDataAfter().isPresent());
+ assertEquals(expectedAfter, candidateNode.getDataAfter().get());
+ } else {
+ assertFalse(candidateNode.getDataAfter().isPresent());
+ }
+
+ assertEquals(expectedMod, candidateNode.getModificationType());
+ }
+
+ private DOMDataTreeChangeService getDOMDataTreeChangeService() {
+ final DOMDataBrokerExtension extension = domBroker.getSupportedExtensions()
+ .get(DOMDataTreeChangeService.class);
+ if (extension == null) {
+ return null;
+ }
+ DOMDataTreeChangeService dataTreeChangeService = null;
+ if (extension instanceof DOMDataTreeChangeService) {
+ dataTreeChangeService = (DOMDataTreeChangeService) extension;
+ }
+ return dataTreeChangeService;
+ }
+
+
+ static class CommitExecutorService extends ForwardingExecutorService {
+
+ ExecutorService delegate;
+
+ public CommitExecutorService(final ExecutorService delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ protected ExecutorService delegate() {
+ return delegate;
+ }
+ }
+
+ static class TestDataTreeListener implements DOMDataTreeChangeListener {
+
+ private List<Collection<DataTreeCandidate>> receivedChanges = new ArrayList<>();
+ private CountDownLatch latch;
+
+ public TestDataTreeListener(final CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
+ receivedChanges.add(changes);
+ latch.countDown();
+ }
+
+ public List<Collection<DataTreeCandidate>> getReceivedChanges() {
+ return receivedChanges;
+ }
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-core-spi</artifactId>
<packaging>bundle</packaging>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
+import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.slf4j.Logger;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
/**
- * Abstract DOM Store Transaction
+ * Abstract DOM Store Transaction.
*
* Convenience super implementation of DOM Store transaction which provides
* common implementation of {@link #toString()} and {@link #getIdentifier()}.
+ *
+ * It can optionally capture the context where it was allocated.
+ *
+ * <T> identifier type
*/
-abstract class AbstractDOMStoreTransaction implements DOMStoreTransaction {
+@Beta
+public abstract class AbstractDOMStoreTransaction<T> implements DOMStoreTransaction {
private final Throwable debugContext;
- private final Object identifier;
+ private final T identifier;
+
+ protected AbstractDOMStoreTransaction(@Nonnull final T identifier) {
+ this(identifier, false);
+ }
- protected AbstractDOMStoreTransaction(final Object identifier, final boolean debug) {
+ protected AbstractDOMStoreTransaction(@Nonnull final T identifier, final boolean debug) {
this.identifier = Preconditions.checkNotNull(identifier, "Identifier must not be null.");
this.debugContext = debug ? new Throwable().fillInStackTrace() : null;
}
@Override
- public final Object getIdentifier() {
+ public final T getIdentifier() {
return identifier;
}
- protected final void warnDebugContext(final Logger logger) {
- if (debugContext != null) {
- logger.warn("Transaction {} has been allocated in the following context", identifier, debugContext);
- }
+ /**
+ * Return the context in which this transaction was allocated.
+ *
+ * @return The context in which this transaction was allocated, or null
+ * if the context was not recorded.
+ */
+ @Nullable
+ public final Throwable getDebugContext() {
+ return debugContext;
}
@Override
* ToStringHelper instance
* @return ToStringHelper instance which was passed in
*/
- protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
+ protected ToStringHelper addToStringAttributes(@Nonnull final ToStringHelper toStringHelper) {
return toStringHelper.add("id", identifier);
}
-}
\ No newline at end of file
+}
import org.opendaylight.controller.md.sal.dom.spi.AbstractRegistrationTree;
import org.opendaylight.controller.md.sal.dom.spi.RegistrationTreeNode;
import org.opendaylight.controller.md.sal.dom.spi.RegistrationTreeSnapshot;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
/**
* Callback notifying the subclass that the specified registration is being closed and it's user no longer
- * wishes to receive notifications. This notification is invoked while the {@link ListenerRegistration#close()}
+ * wishes to receive notifications. This notification is invoked while the {@link org.opendaylight.yangtools.concepts.ListenerRegistration#close()}
* method is executing. Subclasses can use this callback to properly remove any delayed notifications pending
* towards the registration.
*
}
@Override
- public final <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
+ public final <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
// Take the write lock
takeLock();
try {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import com.google.common.base.Preconditions;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract implementation of the {@link DOMStoreTransactionChain} interface relying on {@link DataTreeSnapshot} supplier
+ * and backend commit coordinator.
+ *
+ * @param <T> transaction identifier type
+ */
+@Beta
+public abstract class AbstractSnapshotBackedTransactionChain<T> extends TransactionReadyPrototype<T> implements DOMStoreTransactionChain {
+ private static abstract class State {
+ /**
+ * Allocate a new snapshot.
+ *
+ * @return A new snapshot
+ */
+ protected abstract DataTreeSnapshot getSnapshot();
+ }
+
+ private static final class Idle extends State {
+ private final AbstractSnapshotBackedTransactionChain<?> chain;
+
+ Idle(final AbstractSnapshotBackedTransactionChain<?> chain) {
+ this.chain = Preconditions.checkNotNull(chain);
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ return chain.takeSnapshot();
+ }
+ }
+
+ /**
+ * We have a transaction out there.
+ */
+ private static final class Allocated extends State {
+ private static final AtomicReferenceFieldUpdater<Allocated, DataTreeSnapshot> SNAPSHOT_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(Allocated.class, DataTreeSnapshot.class, "snapshot");
+ private final DOMStoreWriteTransaction transaction;
+ private volatile DataTreeSnapshot snapshot;
+
+ Allocated(final DOMStoreWriteTransaction transaction) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ }
+
+ public DOMStoreWriteTransaction getTransaction() {
+ return transaction;
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ final DataTreeSnapshot ret = snapshot;
+ Preconditions.checkState(ret != null, "Previous transaction %s is not ready yet", transaction.getIdentifier());
+ return ret;
+ }
+
+ void setSnapshot(final DataTreeSnapshot snapshot) {
+ final boolean success = SNAPSHOT_UPDATER.compareAndSet(this, null, snapshot);
+ Preconditions.checkState(success, "Transaction %s has already been marked as ready", transaction.getIdentifier());
+ }
+ }
+
+ /**
+ * Chain is logically shut down, no further allocation allowed.
+ */
+ private static final class Shutdown extends State {
+ private final String message;
+
+ Shutdown(final String message) {
+ this.message = Preconditions.checkNotNull(message);
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ throw new IllegalStateException(message);
+ }
+ }
+
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<AbstractSnapshotBackedTransactionChain, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(AbstractSnapshotBackedTransactionChain.class, State.class, "state");
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractSnapshotBackedTransactionChain.class);
+ private static final Shutdown CLOSED = new Shutdown("Transaction chain is closed");
+ private static final Shutdown FAILED = new Shutdown("Transaction chain has failed");
+ private final Idle idleState;
+ private volatile State state;
+
+ protected AbstractSnapshotBackedTransactionChain() {
+ idleState = new Idle(this);
+ state = idleState;
+ }
+
+ private Entry<State, DataTreeSnapshot> getSnapshot() {
+ final State localState = state;
+ return new SimpleEntry<>(localState, localState.getSnapshot());
+ }
+
+ private boolean recordTransaction(final State expected, final DOMStoreWriteTransaction transaction) {
+ final State state = new Allocated(transaction);
+ return STATE_UPDATER.compareAndSet(this, expected, state);
+ }
+
+ @Override
+ public final DOMStoreReadTransaction newReadOnlyTransaction() {
+ final Entry<State, DataTreeSnapshot> entry = getSnapshot();
+ return SnapshotBackedTransactions.newReadTransaction(nextTransactionIdentifier(), getDebugTransactions(), entry.getValue());
+ }
+
+ @Override
+ public final DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ Entry<State, DataTreeSnapshot> entry;
+ DOMStoreReadWriteTransaction ret;
+
+ do {
+ entry = getSnapshot();
+ ret = new SnapshotBackedReadWriteTransaction<T>(nextTransactionIdentifier(),
+ getDebugTransactions(), entry.getValue(), this);
+ } while (!recordTransaction(entry.getKey(), ret));
+
+ return ret;
+ }
+
+ @Override
+ public final DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ Entry<State, DataTreeSnapshot> entry;
+ DOMStoreWriteTransaction ret;
+
+ do {
+ entry = getSnapshot();
+ ret = new SnapshotBackedWriteTransaction<T>(nextTransactionIdentifier(),
+ getDebugTransactions(), entry.getValue(), this);
+ } while (!recordTransaction(entry.getKey(), ret));
+
+ return ret;
+ }
+
+ @Override
+ protected final void transactionAborted(final SnapshotBackedWriteTransaction<T> tx) {
+ final State localState = state;
+ if (localState instanceof Allocated) {
+ final Allocated allocated = (Allocated)localState;
+ if (allocated.getTransaction().equals(tx)) {
+ final boolean success = STATE_UPDATER.compareAndSet(this, localState, idleState);
+ if (!success) {
+ LOG.warn("Transaction {} aborted, but chain {} state already transitioned from {} to {}, very strange",
+ tx, this, localState, state);
+ }
+ }
+ }
+ }
+
+ @Override
+ protected final DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction<T> tx, final DataTreeModification tree) {
+ final State localState = state;
+
+ if (localState instanceof Allocated) {
+ final Allocated allocated = (Allocated)localState;
+ final DOMStoreWriteTransaction transaction = allocated.getTransaction();
+ Preconditions.checkState(tx.equals(transaction), "Mis-ordered ready transaction %s last allocated was %s", tx, transaction);
+ allocated.setSnapshot(tree);
+ } else {
+ LOG.debug("Ignoring transaction {} readiness due to state {}", tx, localState);
+ }
+
+ return createCohort(tx, tree);
+ }
+
+ @Override
+ public final void close() {
+ final State localState = state;
+
+ do {
+ Preconditions.checkState(!CLOSED.equals(localState), "Transaction chain {} has been closed", this);
+
+ if (FAILED.equals(localState)) {
+ LOG.debug("Ignoring user close in failed state");
+ return;
+ }
+ } while (!STATE_UPDATER.compareAndSet(this, localState, CLOSED));
+ }
+
+ /**
+ * Notify the base logic that a previously-submitted transaction has been committed successfully.
+ *
+ * @param transaction Transaction which completed successfully.
+ */
+ protected final void onTransactionCommited(final SnapshotBackedWriteTransaction<T> transaction) {
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ final State localState = state;
+
+ if (!(localState instanceof Allocated)) {
+ // This can legally happen if the chain is shut down before the transaction was committed
+ // by the backend.
+ LOG.debug("Ignoring successful transaction {} in state {}", transaction, localState);
+ return;
+ }
+
+ final Allocated allocated = (Allocated)localState;
+ final DOMStoreWriteTransaction tx = allocated.getTransaction();
+ if (!tx.equals(transaction)) {
+ LOG.debug("Ignoring non-latest successful transaction {} in state {}", transaction, allocated);
+ return;
+ }
+
+ if (!STATE_UPDATER.compareAndSet(this, localState, idleState)) {
+ LOG.debug("Transaction chain {} has already transitioned from {} to {}, not making it idle", this, localState, state);
+ }
+ }
+
+ /**
+ * Notify the base logic that a previously-submitted transaction has failed.
+ *
+ * @param transaction Transaction which failed.
+ * @param cause Failure cause
+ */
+ protected final void onTransactionFailed(final SnapshotBackedWriteTransaction<T> transaction, final Throwable cause) {
+ LOG.debug("Transaction chain {} failed on transaction {}", this, transaction, cause);
+ state = FAILED;
+ }
+
+ /**
+ * Return the next transaction identifier.
+ *
+ * @return transaction identifier.
+ */
+ protected abstract T nextTransactionIdentifier();
+
+ /**
+ * Inquire as to whether transactions should record their allocation context.
+ *
+ * @return True if allocation context should be recorded.
+ */
+ protected abstract boolean getDebugTransactions();
+
+ /**
+ * Take a fresh {@link DataTreeSnapshot} from the backend.
+ *
+ * @return A new snapshot.
+ */
+ protected abstract DataTreeSnapshot takeSnapshot();
+
+ /**
+ * Create a cohort for driving the transaction through the commit process.
+ *
+ * @param transaction Transaction handle
+ * @param modification {@link DataTreeModification} which needs to be applied to the backend
+ * @return A {@link DOMStoreThreePhaseCommitCohort} cohort.
+ */
+ protected abstract DOMStoreThreePhaseCommitCohort createCohort(final SnapshotBackedWriteTransaction<T> transaction, final DataTreeModification modification);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} implementations,
+ * which forward most of their functionality to a backend {@link #delegate()}.
+ */
+@Beta
+public abstract class ForwardingDOMStoreThreePhaseCommitCohort extends ForwardingObject implements DOMStoreThreePhaseCommitCohort {
+ @Override
+ protected abstract DOMStoreThreePhaseCommitCohort delegate();
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return delegate().canCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegate().preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegate().abort();
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ return delegate().commit();
+ }
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
import static com.google.common.base.Preconditions.checkNotNull;
-
+import com.google.common.annotations.Beta;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
* Implementation of read-only transaction backed by {@link DataTreeSnapshot}
* which delegates most of its calls to similar methods provided by underlying snapshot.
*
+ * <T> identifier type
*/
-final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction
- implements DOMStoreReadTransaction {
-
+@Beta
+public final class SnapshotBackedReadTransaction<T> extends AbstractDOMStoreTransaction<T> implements DOMStoreReadTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadTransaction.class);
private volatile DataTreeSnapshot stableSnapshot;
- public SnapshotBackedReadTransaction(final Object identifier, final boolean debug, final DataTreeSnapshot snapshot) {
+ /**
+ * Creates a new read-only transaction.
+ *
+ * @param identifier Transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ */
+ SnapshotBackedReadTransaction(final T identifier, final boolean debug, final DataTreeSnapshot snapshot) {
super(identifier, debug);
this.stableSnapshot = Preconditions.checkNotNull(snapshot);
LOG.debug("ReadOnly Tx: {} allocated with snapshot {}", identifier, snapshot);
checkNotNull(path, "Path must not be null.");
try {
- return Futures.immediateCheckedFuture(
- read(path).checkedGet().isPresent());
+ return Futures.immediateCheckedFuture(read(path).checkedGet().isPresent());
} catch (ReadFailedException e) {
return Futures.immediateFailedCheckedFuture(e);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.annotations.Beta;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
* Implementation of Read-Write transaction which is backed by {@link DataTreeSnapshot}
* and executed according to {@link TransactionReadyPrototype}.
*
+ * @param <T> identifier type
*/
-final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
+@Beta
+public final class SnapshotBackedReadWriteTransaction<T> extends SnapshotBackedWriteTransaction<T> implements DOMStoreReadWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
- /**
- * Creates new read-write transaction.
- *
- * @param identifier transaction Identifier
- * @param snapshot Snapshot which will be modified.
- * @param readyImpl Implementation of ready method.
- */
- protected SnapshotBackedReadWriteTransaction(final Object identifier, final boolean debug,
- final DataTreeSnapshot snapshot, final TransactionReadyPrototype store) {
- super(identifier, debug, snapshot, store);
+ SnapshotBackedReadWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
+ super(identifier, debug, snapshot, readyImpl);
}
@Override
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+
+/**
+ * Public utility class for instantiating snapshot-backed transactions.
+ */
+@Beta
+public final class SnapshotBackedTransactions {
+ private SnapshotBackedTransactions() {
+ throw new UnsupportedOperationException("Utility class");
+ }
+
+ /**
+ * Creates a new read-only transaction.
+ *
+ * @param identifier Transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ */
+ public static <T> SnapshotBackedReadTransaction<T> newReadTransaction(final T identifier, final boolean debug, final DataTreeSnapshot snapshot) {
+ return new SnapshotBackedReadTransaction<T>(identifier, debug, snapshot);
+ }
+
+ /**
+ * Creates a new read-write transaction.
+ *
+ * @param identifier transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ * @param readyImpl Implementation of ready method.
+ */
+ public static <T> SnapshotBackedReadWriteTransaction<T> newReadWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
+ return new SnapshotBackedReadWriteTransaction<T>(identifier, debug, snapshot, readyImpl);
+ }
+
+ /**
+ * Creates a new write-only transaction.
+ *
+ * @param identifier transaction Identifier
+ * @param debug Enable transaction debugging
+ * @param snapshot Snapshot which will be modified.
+ * @param readyImpl Implementation of ready method.
+ */
+ public static <T> SnapshotBackedWriteTransaction<T> newWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
+ return new SnapshotBackedWriteTransaction<T>(identifier, debug, snapshot, readyImpl);
+ }
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
import static com.google.common.base.Preconditions.checkState;
+import com.google.common.annotations.Beta;
import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
/**
* Implementation of Write transaction which is backed by
* {@link DataTreeSnapshot} and executed according to
- * {@link org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
+ * {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
*
+ * @param <T> Identifier type
*/
-class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
+@Beta
+public class SnapshotBackedWriteTransaction<T> extends AbstractDOMStoreTransaction<T> implements DOMStoreWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
+ @SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+ @SuppressWarnings("rawtypes")
private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
// non-null when not ready
- private volatile TransactionReadyPrototype readyImpl;
+ private volatile TransactionReadyPrototype<T> readyImpl;
// non-null when not committed/closed
private volatile DataTreeModification mutableTree;
- /**
- * Creates new write-only transaction.
- *
- * @param identifier
- * transaction Identifier
- * @param snapshot
- * Snapshot which will be modified.
- * @param readyImpl
- * Implementation of ready method.
- */
- public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
- final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
+ SnapshotBackedWriteTransaction(final T identifier, final boolean debug,
+ final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
super(identifier, debug);
this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
mutableTree = snapshot.newModification();
* @param path Path to read
* @return null if the the transaction has been closed;
*/
- protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+ final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
return readyImpl == null ? null : mutableTree.readNode(path);
}
@Override
public DOMStoreThreePhaseCommitCohort ready() {
- final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ @SuppressWarnings("unchecked")
+ final TransactionReadyPrototype<T> wasReady = READY_UPDATER.getAndSet(this, null);
checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
LOG.debug("Store transaction: {} : Ready", getIdentifier());
@Override
public void close() {
- final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ @SuppressWarnings("unchecked")
+ final TransactionReadyPrototype<T> wasReady = READY_UPDATER.getAndSet(this, null);
if (wasReady != null) {
LOG.debug("Store transaction: {} : Closed", getIdentifier());
TREE_UPDATER.lazySet(this, null);
/**
* Prototype implementation of
- * {@link #ready(org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction)}
+ * {@link #ready(org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction)}
*
* This class is intended to be implemented by Transaction factories
- * responsible for allocation of {@link org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction} and
+ * responsible for allocation of {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction} and
* providing underlying logic for applying implementation.
*
+ * @param <T> identifier type
*/
- abstract static class TransactionReadyPrototype {
+ public abstract static class TransactionReadyPrototype<T> {
/**
* Called when a transaction is closed without being readied. This is not invoked for
* transactions which are ready.
*
* @param tx Transaction which got aborted.
*/
- protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
+ protected abstract void transactionAborted(final SnapshotBackedWriteTransaction<T> tx);
/**
* Returns a commit coordinator associated with supplied transactions.
* Modified data tree which has been constructed.
* @return DOMStoreThreePhaseCommitCohort associated with transaction
*/
- protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
+ protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction<T> tx, DataTreeModification tree);
}
}
\ No newline at end of file
<parent>\r
<artifactId>sal-parent</artifactId>\r
<groupId>org.opendaylight.controller</groupId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.3.0-SNAPSHOT</version>\r
</parent>\r
<artifactId>sal-dom-xsql-config</artifactId>\r
<description>Configuration files for md-sal</description>\r
</type>
<name>XSQL</name>
<data-broker>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
- <name>binding-data-broker</name>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
</data-broker>
<async-data-broker>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-dom-xsql</artifactId>
<packaging>bundle</packaging>
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.odl.xsql;
import java.sql.Connection;
import java.util.logging.Logger;
import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCConnection;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class JDBCDriver implements Driver {
public static JDBCDriver drv = new JDBCDriver();
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.InputStream;
import java.sql.Timestamp;
import java.util.Calendar;
import java.util.Map;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class TablesResultSet implements ResultSet {
private String tables[] = null;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.File;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLAdapter extends Thread implements SchemaContextListener {
private static final int SLEEP = 10000;
private String pinningFile;
private ServerSocket serverSocket = null;
private DOMDataBroker domDataBroker = null;
+ private static final String REFERENCE_FIELD_NAME = "reference";
private XSQLAdapter() {
XSQLAdapter.log("Starting Adapter");
List<Object> result = new LinkedList<Object>();
YangInstanceIdentifier instanceIdentifier = YangInstanceIdentifier
.builder()
- .node(XSQLODLUtils.getPath(table.getODLNode()).get(0))
+ .node(XSQLODLUtils.getPath(table.getFirstFromSchemaNodes()).get(0))
.toInstance();
DOMDataReadTransaction t = this.domDataBroker
.newReadOnlyTransaction();
Object node = t.read(type,
instanceIdentifier).get();
- node = XSQLODLUtils.get(node, "reference");
+ node = XSQLODLUtils.get(node, REFERENCE_FIELD_NAME);
if (node == null) {
return result;
}
-
- Map<?, ?> children = XSQLODLUtils.getChildren(node);
- for (Object c : children.values()) {
- result.add(c);
- /* I don't remember why i did this... possibly to prevent different siblings queried together
- Map<?, ?> sons = XSQLODLUtils.getChildren(c);
- for (Object child : sons.values()) {
- result.add(child);
- }*/
- }
-
+ result.add(node);
return result;
} catch (Exception err) {
XSQLAdapter.log(err);
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.DataInputStream;
import java.util.List;
import java.util.Map;
import java.util.Set;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLBluePrint implements DatabaseMetaData, Serializable {
private static final long serialVersionUID = 1L;
return result;
}
- public void addToBluePrintCache(XSQLBluePrintNode blNode) {
- this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
- Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode
- .getODLTableName());
- if (map == null) {
- map = new HashMap<String, XSQLBluePrintNode>();
- this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
+ public XSQLBluePrintNode addToBluePrintCache(XSQLBluePrintNode blNode,XSQLBluePrintNode parent) {
+ XSQLBluePrintNode existingNode = this.tableNameToBluePrint.get(blNode.getBluePrintNodeName());
+ if(existingNode!=null){
+ existingNode.mergeAugmentation(blNode);
+ return existingNode;
+ }else{
+ this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
+ Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode.getODLTableName());
+ if (map == null) {
+ map = new HashMap<String, XSQLBluePrintNode>();
+ this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
+ }
+ map.put(blNode.getBluePrintNodeName(), blNode);
+ if(parent!=null)
+ parent.addChild(blNode);
+ return blNode;
}
- map.put(blNode.getBluePrintNodeName(), blNode);
}
public Class<?> getGenericType(ParameterizedType type) {
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.io.Serializable;
import java.util.Map;
import java.util.Set;
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLBluePrintNode implements Serializable {
private static final long serialVersionUID = 1L;
private Set<XSQLColumn> columns = new HashSet<XSQLColumn>();
private Map<String, XSQLColumn> origNameToColumn = new HashMap<String, XSQLColumn>();
- private transient Object odlNode = null;
+ private transient Object[] odlSchemaNodes = null;
private boolean module = false;
private String bluePrintTableName = null;
private String odlTableName = null;
private String origName = null;
+ public void mergeAugmentation(XSQLBluePrintNode aug) {
+ this.relations.addAll(aug.relations);
+ this.inheritingNodes.addAll(aug.inheritingNodes);
+ this.children.addAll(aug.children);
+ this.columns.addAll(aug.columns);
+ this.origNameToColumn.putAll(aug.origNameToColumn);
+ if (aug.odlSchemaNodes != null) {
+ for (Object sn : aug.odlSchemaNodes) {
+ addToSchemaNodes(sn);
+ }
+ }
+ }
+
public XSQLBluePrintNode(String name, String _origName, int _level) {
this.level = _level;
this.odlTableName = name;
public XSQLBluePrintNode(Object _odlNode, int _level,
XSQLBluePrintNode _parent) {
- this.odlNode = _odlNode;
+ addToSchemaNodes(_odlNode);
this.level = _level;
this.module = XSQLODLUtils.isModule(_odlNode);
this.parent = _parent;
this.bluePrintTableName = XSQLODLUtils.getBluePrintName(_odlNode);
- this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+ this.odlTableName = XSQLODLUtils
+ .getODLNodeName(getFirstFromSchemaNodes());
+ }
+
+ private void addToSchemaNodes(Object schemaObject) {
+ if (this.odlSchemaNodes == null)
+ this.odlSchemaNodes = new Object[1];
+ else {
+ Object[] temp = new Object[this.odlSchemaNodes.length + 1];
+ System.arraycopy(this.odlSchemaNodes, 0, temp, 0,
+ this.odlSchemaNodes.length);
+ this.odlSchemaNodes = temp;
+ }
+ this.odlSchemaNodes[this.odlSchemaNodes.length - 1] = schemaObject;
+ }
+
+ public Object getFirstFromSchemaNodes() {
+ if (this.odlSchemaNodes == null) {
+ return null;
+ }
+ return this.odlSchemaNodes[0];
}
public String getOrigName() {
public String getODLTableName() {
if (this.odlTableName == null) {
- this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+ this.odlTableName = XSQLODLUtils
+ .getODLNodeName(getFirstFromSchemaNodes());
}
return this.odlTableName;
}
- public Object getODLNode() {
- return this.odlNode;
- }
-
- public void AddChild(XSQLBluePrintNode ch) {
+ public void addChild(XSQLBluePrintNode ch) {
this.children.add(ch);
}
if (myInterfaceName != null) {
return myInterfaceName;
}
- if (odlNode != null) {
+ if (this.odlSchemaNodes != null) {
return getBluePrintNodeName();
}
if (odlTableName != null) {
@Override
public boolean equals(Object obj) {
XSQLBluePrintNode other = (XSQLBluePrintNode) obj;
- if (odlNode != null) {
+ if (this.odlSchemaNodes != null) {
return getBluePrintNodeName().equals(other.getBluePrintNodeName());
} else if (this.odlTableName == null && other.odlTableName != null) {
return false;
}
if (this.odlTableName != null && other.odlTableName == null) {
return false;
- }
- else {
+ } else {
return this.odlTableName.equals(other.odlTableName);
}
}
public int hashCode() {
if (myInterfaceString != null) {
return myInterfaceString.hashCode();
- } else if (odlNode != null) {
+ } else if (this.odlSchemaNodes != null) {
return bluePrintTableName.hashCode();
}
return 0;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql;
import java.lang.reflect.Field;
+import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
import org.opendaylight.yangtools.yang.model.util.Uint32;
import org.opendaylight.yangtools.yang.model.util.Uint64;
import org.opendaylight.yangtools.yang.model.util.Uint8;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLODLUtils {
private static Map<Class<?>, Class<?>> types =
public static boolean createOpenDaylightCache(XSQLBluePrint bluePrint,Object module) {
XSQLBluePrintNode node = new XSQLBluePrintNode(module, 0,null);
- bluePrint.addToBluePrintCache(node);
+ bluePrint.addToBluePrintCache(node,null);
collectODL(bluePrint, node, ((Module) module).getChildNodes(), 1);
return true;
}
return;
}
for (DataSchemaNode n : nodes) {
- if (n instanceof DataNodeContainer /*|| n instanceof LeafListSchemaNode*/
- || n instanceof ListSchemaNode) {
+ if (n instanceof DataNodeContainer) {
XSQLBluePrintNode bn = new XSQLBluePrintNode(n, level,parent);
- bluePrint.addToBluePrintCache(bn);
- parent.AddChild(bn);
- if (n instanceof DataNodeContainer) {
+ bn = bluePrint.addToBluePrintCache(bn,parent);
+ if (n instanceof ListSchemaNode) {
level++;
- collectODL(bluePrint, bn,
- ((DataNodeContainer) n).getChildNodes(), level);
+ collectODL(bluePrint, bn,((ListSchemaNode) n).getChildNodes(), level);
+ Set<AugmentationSchema> s = ((ListSchemaNode)n).getAvailableAugmentations();
+ if(s!=null){
+ for(AugmentationSchema as:s){
+ collectODL(bluePrint, bn,as.getChildNodes(), level);
+ }
+ }
level--;
- } else if (n instanceof ListSchemaNode) {
+ }else{
level++;
- collectODL(bluePrint, bn,
- ((ListSchemaNode) n).getChildNodes(), level);
+ collectODL(bluePrint, bn,((DataNodeContainer) n).getChildNodes(), level);
+ if(n instanceof ContainerSchemaNode){
+ Set<AugmentationSchema> s = ((ContainerSchemaNode)n).getAvailableAugmentations();
+ if(s!=null){
+ for(AugmentationSchema as:s){
+ collectODL(bluePrint, bn,as.getChildNodes(), level);
+ }
+ }
+ }
level--;
}
} else {
Field f = findField(c, name);
return f.get(o);
} catch (Exception err) {
- XSQLAdapter.log(err);
+ //XSQLAdapter.log(err);
}
return null;
}
return (Map<?, ?>) get(o, "children");
}
+ public static Collection<?> getChildrenCollection(Object o) {
+ Object value = get(o, "children");
+ if(value==null)
+ return Collections.emptyList();
+ if(value instanceof Map)
+ return ((Map<?,?>)value).values();
+ else
+ if(value instanceof Collection){
+ return (Collection<?>)value;
+ }else{
+ XSQLAdapter.log("Unknown Child Value Type="+value.getClass().getName());
+ return new ArrayList();
+ }
+ }
+
public static Object getValue(Object o) {
return get(o, "value");
}
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class JDBCProxy implements InvocationHandler {
-
+ private static final Logger LOG = LoggerFactory.getLogger(JDBCProxy.class);
private Object myObject = null;
private Class<?> myObjectClass = null;
}
@Override
- public Object invoke(Object proxy, Method method, Object[] args)
- throws Throwable {
- System.err.println("Class " + this.myObjectClass.getSimpleName()
- + " Method " + method.getName());
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ LOG.debug("Class {} Method {}", this.myObjectClass.getSimpleName(), method.getName());
return method.invoke(this.myObject, args);
}
-
}
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
import java.io.InputStream;
import java.io.Reader;
import java.io.Serializable;
import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrintNode;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLColumn;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLCriteria;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLODLUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
-public class JDBCResultSet implements Serializable, ResultSet,
- ResultSetMetaData {
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
+public class JDBCResultSet implements Serializable, ResultSet, ResultSetMetaData {
private static final long serialVersionUID = -7450200738431047057L;
+ private static final ClassLoader CLASS_LOADER = JDBCResultSet.class.getClassLoader();
+ private static final Class<?>[] PROXY_INTERFACES = new Class[] { ResultSet.class };
+ private static int nextID = 0;
private String sql = null;
private List<XSQLBluePrintNode> tablesInQuery = new ArrayList<XSQLBluePrintNode>();
private transient Map<String, Object> currentRecord = null;
private boolean finished = false;
private int id = 0;
- private static Integer nextID = new Integer(0);
public int numberOfTasks = 0;
private Map<String, Map<XSQLColumn, List<XSQLCriteria>>> criteria = new ConcurrentHashMap<String, Map<XSQLColumn, List<XSQLCriteria>>>();
private Exception err = null;
private List<Record> EMPTY_RESULT = new LinkedList<Record>();
- private transient Map<String,JDBCResultSet> subQueries = new HashMap<String,JDBCResultSet>();
+ private transient Map<String, JDBCResultSet> subQueries = new HashMap<String, JDBCResultSet>();
public ResultSet getProxy() {
- return (ResultSet) Proxy.newProxyInstance(this.getClass().getClassLoader(), new Class[] {ResultSet.class }, new JDBCProxy(this));
+ return this;
+ //return (ResultSet) Proxy.newProxyInstance(CLASS_LOADER, PROXY_INTERFACES, new JDBCProxy(this));
}
public void setSQL(String _sql) {
this.sql = _sql;
}
- public JDBCResultSet addSubQuery(String _sql,String logicalName) {
+ public JDBCResultSet addSubQuery(String _sql, String logicalName) {
if (subQueries == null) {
- subQueries = new HashMap<String,JDBCResultSet>();
+ subQueries = new HashMap<String, JDBCResultSet>();
}
JDBCResultSet rs = new JDBCResultSet(_sql);
- this.subQueries.put(logicalName,rs);
+ this.subQueries.put(logicalName, rs);
return rs;
}
- public Map<String,JDBCResultSet> getSubQueries() {
- if (this.subQueries==null) {
+ public Map<String, JDBCResultSet> getSubQueries() {
+ if (this.subQueries == null) {
this.subQueries = new HashMap<>();
}
return this.subQueries;
}
}
- public int isObjectFitCriteria(Map<String, Object> objValues, String tableName) {
+ public int isObjectFitCriteria(Map<String, Object> objValues,
+ String tableName) {
Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria
.get(tableName);
if (tblCriteria == null) {
}
public static class Record {
+ // The map container the Attribute 2 the attribute value
public Map<String, Object> data = new HashMap<>();
+ // The Element Object (Possibly some kind of NormalizedNode
public Object element = null;
+ // Does this record fit the criteria
+ // In case of a list property, we first collect the list and only then
+ // we
+ // we decide which list item should be included or not.
+ public boolean fitCriteria = true;
public Map<String, Object> getRecord() {
return this.data;
}
}
- private Map<String, Object> collectColumnValues(Object node, XSQLBluePrintNode bpn) {
- Map<?, ?> subChildren = XSQLODLUtils.getChildren(node);
- Map<String, Object> result = new HashMap<>();
- for (Object stc : subChildren.values()) {
- if (stc.getClass().getName().endsWith("ImmutableAugmentationNode")) {
+ public static class RecordsContainer {
+ public List<Record> records = new LinkedList<Record>();
+ public List<Record> fitRecords = new LinkedList<Record>();
+ public Object currentObject = null;
+ }
+
+ private void collectColumnValues(RecordsContainer rContainer,
+ XSQLBluePrintNode bpn) {
+ Collection<?> subChildren = XSQLODLUtils
+ .getChildrenCollection(rContainer.currentObject);
+ Record r = new Record();
+ r.element = rContainer.currentObject;
+ for (Object stc : subChildren) {
+ if (stc.getClass().getName()
+ .endsWith("ImmutableUnkeyedListEntryNode")) {
+ r.fitCriteria = false;
+ rContainer.currentObject = stc;
+ collectColumnValues(rContainer, bpn);
+ } else if (stc.getClass().getName()
+ .endsWith("ImmutableAugmentationNode")) {
Map<?, ?> values = XSQLODLUtils.getChildren(stc);
for (Object key : values.keySet()) {
Object val = values.get(key);
Object value = XSQLODLUtils.getValue(val);
String k = XSQLODLUtils.getNodeName(val);
if (value != null) {
- result.put(bpn.getBluePrintNodeName() + "." + k,
+ r.data.put(bpn.getBluePrintNodeName() + "." + k,
value.toString());
}
}
String k = XSQLODLUtils.getNodeName(stc);
Object value = XSQLODLUtils.getValue(stc);
if (value != null) {
- result.put(bpn.getBluePrintNodeName() + "." + k,
+ r.data.put(bpn.getBluePrintNodeName() + "." + k,
value.toString());
}
}
}
- return result;
+ if (r.fitCriteria) {
+ rContainer.records.add(r);
+ }
}
- private void addToData(Record rec, XSQLBluePrintNode bpn,
- XSQLBluePrint bluePrint, Map<String, Object> fullRecord) {
+ private void addToData(Record rec, XSQLBluePrintNode bpn,XSQLBluePrint bluePrint, Map<String, Object> fullRecord) {
XSQLBluePrintNode eNodes[] = bluePrint
.getBluePrintNodeByODLTableName(XSQLODLUtils
.getNodeIdentiofier(rec.element));
String odlNodeName = XSQLODLUtils.getNodeIdentiofier(child);
if (odlNodeName == null) {
+ if (child instanceof DataContainerNode) {
+ List<Object> augChidlren = getChildren(child, tableName,
+ bluePrint);
+ result.addAll(augChidlren);
+ }
continue;
}
continue;
}
- if (child.getClass().getName().endsWith("ImmutableContainerNode")) {
+ if (child.getClass().getName().endsWith("ImmutableUnkeyedListNode")) {
+ result.add(child);
+ } else if (child.getClass().getName()
+ .endsWith("ImmutableContainerNode")) {
result.add(child);
} else if (child.getClass().getName()
.endsWith("ImmutableAugmentationNode")) {
}
} else if (child.getClass().getName().endsWith("ImmutableMapNode")) {
result.addAll(XSQLODLUtils.getMChildren(child));
+ } else {
+ XSQLAdapter.log("Missed Node Data OF Type="
+ + child.getClass().getName());
}
}
return result;
}
- public List<Record> addRecords(Object element, XSQLBluePrintNode node,boolean root, String tableName, XSQLBluePrint bluePrint) {
+ public List<Record> addRecords(Object element, XSQLBluePrintNode node,
+ boolean root, String tableName, XSQLBluePrint bluePrint) {
List<Record> result = new LinkedList<Record>();
- //In case this is a sibling to the requested table, the elenment type
- //won't be in the path of the leaf node
- if(node==null){
- return result;
- }
String nodeID = XSQLODLUtils.getNodeIdentiofier(element);
if (node.getODLTableName().equals(nodeID)) {
- XSQLBluePrintNode bluePrintNode = bluePrint.getBluePrintNodeByODLTableName(nodeID)[0];
- Record rec = new Record();
- rec.element = element;
- XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode.getBluePrintNodeName());
- if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName()) || bpn != null) {
- Map<String, Object> allKeyValues = collectColumnValues(element, bpn);
- if (!(isObjectFitCriteria(allKeyValues,
- bpn.getBluePrintNodeName()) == 1)) {
- return EMPTY_RESULT;
+ XSQLBluePrintNode bluePrintNode = bluePrint
+ .getBluePrintNodeByODLTableName(nodeID)[0];
+ RecordsContainer rContainer = new RecordsContainer();
+ rContainer.currentObject = element;
+ XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode
+ .getBluePrintNodeName());
+ if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName())
+ || bpn != null) {
+ collectColumnValues(rContainer, bpn);
+ for (Record r : rContainer.records) {
+ if (!(isObjectFitCriteria(r.data,
+ bpn.getBluePrintNodeName()) == 1)) {
+ r.fitCriteria = false;
+ }
+ if (r.fitCriteria) {
+ Record rec = new Record();
+ rec.element = r.element;
+ addToData(rec, bpn, bluePrint, r.data);
+ rContainer.fitRecords.add(rec);
+ }
}
- addToData(rec, bpn, bluePrint, allKeyValues);
+ if (rContainer.fitRecords.isEmpty())
+ return EMPTY_RESULT;
}
- if (root) {
- addRecord(rec.data);
+ if (rContainer.records.isEmpty()) {
+ Record rec = new Record();
+ rec.element = rContainer.currentObject;
+ if (root) {
+ addRecord(rec.data);
+ } else {
+ result.add(rec);
+ }
} else {
- result.add(rec);
+ for (Record rec : rContainer.fitRecords) {
+ if (root) {
+ addRecord(rec.data);
+ } else {
+ result.add(rec);
+ }
+ }
}
return result;
}
XSQLBluePrintNode parent = node.getParent();
- List<Record> subRecords = addRecords(element, parent, false, tableName,bluePrint);
+ List<Record> subRecords = addRecords(element, parent, false, tableName,
+ bluePrint);
for (Record subRec : subRecords) {
List<Object> subO = getChildren(subRec.element, tableName,
bluePrint);
if (subO != null) {
for (Object subData : subO) {
- Record rec = new Record();
- rec.element = subData;
- rec.data.putAll(subRec.data);
+ RecordsContainer rContainer = new RecordsContainer();
+ rContainer.currentObject = subData;
- String recID = XSQLODLUtils.getNodeIdentiofier(rec.element);
+ String recID = XSQLODLUtils
+ .getNodeIdentiofier(rContainer.currentObject);
XSQLBluePrintNode eNodes[] = bluePrint
.getBluePrintNodeByODLTableName(recID);
XSQLBluePrintNode bpn = null;
break;
}
}
- boolean isObjectInCriteria = true;
if (bpn != null) {
- Map<String, Object> allKeyValues = collectColumnValues(rec.element, bpn);
- if ((isObjectFitCriteria(allKeyValues,
- bpn.getBluePrintNodeName()) == 1)) {
- addToData(rec, bpn, bluePrint, allKeyValues);
- } else {
- isObjectInCriteria = false;
+ collectColumnValues(rContainer, bpn);
+ for (Record r : rContainer.records) {
+ if ((isObjectFitCriteria(r.data,
+ bpn.getBluePrintNodeName()) == 1)) {
+ Record rec = new Record();
+ rec.data.putAll(subRec.data);
+ rec.element = r.element;
+ addToData(rec, bpn, bluePrint, r.data);
+ } else {
+ r.fitCriteria = false;
+ }
}
}
-
- if (isObjectInCriteria) {
+ if (rContainer.records.isEmpty()) {
+ Record rec = new Record();
+ rec.data.putAll(subRec.data);
+ rec.element = rContainer.currentObject;
if (root) {
if (!rec.data.isEmpty()) {
addRecord(rec.data);
} else {
result.add(rec);
}
+ } else {
+ for (Record r : rContainer.records) {
+ r.data.putAll(subRec.data);
+ if (r.fitCriteria) {
+ if (root) {
+ if (!r.data.isEmpty()) {
+ addRecord(r.data);
+ }
+ } else {
+ result.add(r);
+ }
+ }
+ }
}
}
}
}
-
return result;
}
}
}
- public static void execute(JDBCResultSet rs, XSQLAdapter adapter)
- throws SQLException {
+ public static void execute(JDBCResultSet rs, XSQLAdapter adapter)throws SQLException {
if(rs.getSQL().toLowerCase().trim().equals("select 1")){
rs.setFinished(true);
return;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.xsql;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQL;
import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQLBuilder;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
- * Created by root on 6/26/14.
- */
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLProvider implements AutoCloseable {
public static final InstanceIdentifier<XSQL> ID = InstanceIdentifier.builder(XSQL.class).build();
- private static final Logger LOG = LoggerFactory.getLogger(XSQLProvider.class);
+ //public static final InstanceIdentifier<SalTest> ID2 = InstanceIdentifier.builder(SalTest.class).build();
public void close() {
}
- public XSQL buildXSQL(DataProviderService dps) {
+ public XSQL buildXSQL(DataBroker dps) {
+ XSQLAdapter.log("Building XSL...");
XSQLBuilder builder = new XSQLBuilder();
builder.setPort("34343");
XSQL xsql = builder.build();
try {
if (dps != null) {
- final DataModificationTransaction t = dps.beginTransaction();
- t.removeOperationalData(ID);
- t.putOperationalData(ID,xsql);
- t.commit().get();
+ XSQLAdapter.log("Starting TRansaction...");
+ WriteTransaction t = dps.newReadWriteTransaction();
+ t.delete(LogicalDatastoreType.OPERATIONAL, ID);
+ t.put(LogicalDatastoreType.OPERATIONAL,ID,xsql);
+ XSQLAdapter.log("Submitting...");
+ t.submit();
}
} catch (Exception e) {
- LOG.warn("Failed to update XSQL port status, ", e);
+ XSQLAdapter.log(e);
}
return xsql;
}
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.xsql.XSQLProvider;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
public class XSQLModule extends org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.AbstractXSQLModule {
+ private static final long SLEEP_TIME_BEFORE_CREATING_TRANSACTION = 10000;
public XSQLModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
XSQLAdapter xsqlAdapter = XSQLAdapter.getInstance();
getSchemaServiceDependency().registerSchemaContextListener(xsqlAdapter);
xsqlAdapter.setDataBroker(getAsyncDataBrokerDependency());
- XSQLProvider p = new XSQLProvider();
- //p.buildXSQL(getDataBrokerDependency());
+ final XSQLProvider p = new XSQLProvider();
+ Runnable runthis = new Runnable() {
+ @Override
+ public void run() {
+ try{Thread.sleep(SLEEP_TIME_BEFORE_CREATING_TRANSACTION);}catch(Exception err){}
+ p.buildXSQL(getDataBrokerDependency());
+ }
+ };
return p;
}
-
}
</type>
<name>XSQL</name>
<data-broker>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
- <name>binding-data-broker</name>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
</data-broker>
<async-data-broker>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
case XSQL {
when "/config:modules/config:module/config:type = 'XSQL'";
- container data-broker {
+ container data-broker {
uses config:service-ref {
refine type {
mandatory false;
- config:required-identity mdsal:binding-data-broker;
+ config:required-identity mdsal:binding-async-data-broker;
}
}
- }
+ }
container async-data-broker {
uses config:service-ref {
import java.io.InputStream;
import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Set;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCResultSet;
import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCServer;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
public class XSQLTest {
-
- XSQLBluePrint bluePrint = null;
+ private static final String DATASTORE_TEST_YANG = "/sal-persisted-dom-test.yang";
+ private XSQLBluePrint bluePrint = null;
+ //private static SchemaContext schemaContext = null;
+ @BeforeClass
+ public static void loadSchemaContext(){
+ //schemaContext = createTestContext();
+ }
@Before
public void before() {
System.out.print("*** XSQL Tests -");
System.out.println(str);
}
+
+ public static final InputStream getDatastoreTestInputStream() {
+ return getInputStream(DATASTORE_TEST_YANG);
+ }
+
+ private static InputStream getInputStream(final String resourceName) {
+ return XSQLTest.class.getResourceAsStream(DATASTORE_TEST_YANG);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(getDatastoreTestInputStream()));
+ return parser.resolveSchemaContext(modules);
+ }
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-dummy-distributed-datastore</artifactId>
<packaging>bundle</packaging>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
- <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-inmemory-datastore</artifactId>
package org.opendaylight.controller.md.sal.dom.store.impl;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-final class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
- private final SnapshotBackedWriteTransaction transaction;
- private final DOMStoreThreePhaseCommitCohort delegate;
+final class ChainedTransactionCommitImpl extends InMemoryDOMStoreThreePhaseCommitCohort {
private final DOMStoreTransactionChainImpl txChain;
- protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
- final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- this.transaction = Preconditions.checkNotNull(transaction);
- this.delegate = Preconditions.checkNotNull(delegate);
+ ChainedTransactionCommitImpl(final InMemoryDOMDataStore store, final SnapshotBackedWriteTransaction<String> transaction,
+ final DataTreeModification modification, final DOMStoreTransactionChainImpl txChain) {
+ super(store, transaction, modification);
this.txChain = Preconditions.checkNotNull(txChain);
}
- @Override
- public ListenableFuture<Boolean> canCommit() {
- return delegate.canCommit();
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return delegate.preCommit();
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return delegate.abort();
- }
-
@Override
public ListenableFuture<Void> commit() {
- ListenableFuture<Void> commitFuture = delegate.commit();
- Futures.addCallback(commitFuture, new FutureCallback<Void>() {
- @Override
- public void onFailure(final Throwable t) {
- txChain.onTransactionFailed(transaction, t);
- }
-
- @Override
- public void onSuccess(final Void result) {
- txChain.onTransactionCommited(transaction);
- }
- });
- return commitFuture;
+ ListenableFuture<Void> ret = super.commit();
+ txChain.transactionCommited(getTransaction());
+ return ret;
}
+
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl;
import com.google.common.base.Preconditions;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.AbstractSnapshotBackedTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-final class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
- private static abstract class State {
- /**
- * Allocate a new snapshot.
- *
- * @return A new snapshot
- */
- protected abstract DataTreeSnapshot getSnapshot();
- }
-
- private static final class Idle extends State {
- private final InMemoryDOMDataStore store;
-
- Idle(final InMemoryDOMDataStore store) {
- this.store = Preconditions.checkNotNull(store);
- }
-
- @Override
- protected DataTreeSnapshot getSnapshot() {
- return store.takeSnapshot();
- }
- }
-
- /**
- * We have a transaction out there.
- */
- private static final class Allocated extends State {
- private static final AtomicReferenceFieldUpdater<Allocated, DataTreeSnapshot> SNAPSHOT_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(Allocated.class, DataTreeSnapshot.class, "snapshot");
- private final DOMStoreWriteTransaction transaction;
- private volatile DataTreeSnapshot snapshot;
-
- Allocated(final DOMStoreWriteTransaction transaction) {
- this.transaction = Preconditions.checkNotNull(transaction);
- }
-
- public DOMStoreWriteTransaction getTransaction() {
- return transaction;
- }
-
- @Override
- protected DataTreeSnapshot getSnapshot() {
- final DataTreeSnapshot ret = snapshot;
- Preconditions.checkState(ret != null, "Previous transaction %s is not ready yet", transaction.getIdentifier());
- return ret;
- }
-
- void setSnapshot(final DataTreeSnapshot snapshot) {
- final boolean success = SNAPSHOT_UPDATER.compareAndSet(this, null, snapshot);
- Preconditions.checkState(success, "Transaction %s has already been marked as ready", transaction.getIdentifier());
- }
- }
-
- /**
- * Chain is logically shut down, no further allocation allowed.
- */
- private static final class Shutdown extends State {
- private final String message;
-
- Shutdown(final String message) {
- this.message = Preconditions.checkNotNull(message);
- }
-
- @Override
- protected DataTreeSnapshot getSnapshot() {
- throw new IllegalStateException(message);
- }
- }
-
- private static final AtomicReferenceFieldUpdater<DOMStoreTransactionChainImpl, State> STATE_UPDATER =
- AtomicReferenceFieldUpdater.newUpdater(DOMStoreTransactionChainImpl.class, State.class, "state");
- private static final Logger LOG = LoggerFactory.getLogger(DOMStoreTransactionChainImpl.class);
- private static final Shutdown CLOSED = new Shutdown("Transaction chain is closed");
- private static final Shutdown FAILED = new Shutdown("Transaction chain has failed");
+final class DOMStoreTransactionChainImpl extends AbstractSnapshotBackedTransactionChain<String> {
private final InMemoryDOMDataStore store;
- private final Idle idleState;
- private volatile State state;
DOMStoreTransactionChainImpl(final InMemoryDOMDataStore store) {
this.store = Preconditions.checkNotNull(store);
- idleState = new Idle(store);
- state = idleState;
- }
-
- private Entry<State, DataTreeSnapshot> getSnapshot() {
- final State localState = state;
- return new SimpleEntry<>(localState, localState.getSnapshot());
- }
-
- private boolean recordTransaction(final State expected, final DOMStoreWriteTransaction transaction) {
- final State state = new Allocated(transaction);
- return STATE_UPDATER.compareAndSet(this, expected, state);
}
@Override
- public DOMStoreReadTransaction newReadOnlyTransaction() {
- final Entry<State, DataTreeSnapshot> entry = getSnapshot();
- return new SnapshotBackedReadTransaction(store.nextIdentifier(), store.getDebugTransactions(), entry.getValue());
+ protected DOMStoreThreePhaseCommitCohort createCohort(final SnapshotBackedWriteTransaction<String> tx, final DataTreeModification modification) {
+ return new ChainedTransactionCommitImpl(store, tx, modification, this);
}
@Override
- public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- Entry<State, DataTreeSnapshot> entry;
- DOMStoreReadWriteTransaction ret;
-
- do {
- entry = getSnapshot();
- ret = new SnapshotBackedReadWriteTransaction(store.nextIdentifier(),
- store.getDebugTransactions(), entry.getValue(), this);
- } while (!recordTransaction(entry.getKey(), ret));
-
- return ret;
- }
-
- @Override
- public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- Entry<State, DataTreeSnapshot> entry;
- DOMStoreWriteTransaction ret;
-
- do {
- entry = getSnapshot();
- ret = new SnapshotBackedWriteTransaction(store.nextIdentifier(),
- store.getDebugTransactions(), entry.getValue(), this);
- } while (!recordTransaction(entry.getKey(), ret));
-
- return ret;
+ protected DataTreeSnapshot takeSnapshot() {
+ return store.takeSnapshot();
}
@Override
- protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
- final State localState = state;
- if (localState instanceof Allocated) {
- final Allocated allocated = (Allocated)localState;
- if (allocated.getTransaction().equals(tx)) {
- final boolean success = STATE_UPDATER.compareAndSet(this, localState, idleState);
- if (!success) {
- LOG.info("State already transitioned from {} to {}", localState, state);
- }
- }
- }
+ protected String nextTransactionIdentifier() {
+ return store.nextIdentifier();
}
@Override
- protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
- final State localState = state;
-
- if (localState instanceof Allocated) {
- final Allocated allocated = (Allocated)localState;
- final DOMStoreWriteTransaction transaction = allocated.getTransaction();
- Preconditions.checkState(tx.equals(transaction), "Mis-ordered ready transaction %s last allocated was %s", tx, transaction);
- allocated.setSnapshot(tree);
- } else {
- LOG.debug("Ignoring transaction {} readiness due to state {}", tx, localState);
- }
-
- return new ChainedTransactionCommitImpl(tx, store.transactionReady(tx, tree), this);
+ protected boolean getDebugTransactions() {
+ return store.getDebugTransactions();
}
- @Override
- public void close() {
- final State localState = state;
-
- do {
- Preconditions.checkState(!CLOSED.equals(localState), "Transaction chain {} has been closed", this);
-
- if (FAILED.equals(localState)) {
- LOG.debug("Ignoring user close in failed state");
- return;
- }
- } while (!STATE_UPDATER.compareAndSet(this, localState, CLOSED));
- }
-
- void onTransactionFailed(final SnapshotBackedWriteTransaction transaction, final Throwable t) {
- LOG.debug("Transaction chain {} failed on transaction {}", this, transaction, t);
- state = FAILED;
- }
-
- void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If the committed transaction was the one we allocated last,
- // we clear it and the ready snapshot, so the next transaction
- // allocated refers to the data tree directly.
- final State localState = state;
-
- if (!(localState instanceof Allocated)) {
- LOG.debug("Ignoring successful transaction {} in state {}", transaction, localState);
- return;
- }
-
- final Allocated allocated = (Allocated)localState;
- final DOMStoreWriteTransaction tx = allocated.getTransaction();
- if (!tx.equals(transaction)) {
- LOG.debug("Ignoring non-latest successful transaction {} in state {}", transaction, allocated);
- return;
- }
-
- if (!STATE_UPDATER.compareAndSet(this, localState, idleState)) {
- LOG.debug("Transaction chain {} has already transitioned from {} to {}, not making it idle", this, localState, state);
- }
+ void transactionCommited(final SnapshotBackedWriteTransaction<String> transaction) {
+ super.onTransactionCommited(transaction);
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import static com.google.common.base.Preconditions.checkState;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedTransactions;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager.Invoker;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
*
* Implementation of {@link DOMStore} which uses {@link DataTree} and other
* classes such as {@link SnapshotBackedWriteTransaction}.
- * {@link SnapshotBackedReadTransaction} and {@link ResolveDataChangeEventsTask}
+ * {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedReadTransaction} and {@link ResolveDataChangeEventsTask}
* to implement {@link DOMStore} contract.
*
*/
-public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable, DOMStoreTreeChangePublisher {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype<String> implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable, DOMStoreTreeChangePublisher {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
- private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
- private static final ListenableFuture<Boolean> CAN_COMMIT_FUTURE = Futures.immediateFuture(Boolean.TRUE);
private static final Invoker<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> DCL_NOTIFICATION_MGR_INVOKER =
new Invoker<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent>() {
@Override
public DOMStoreReadTransaction newReadOnlyTransaction() {
- return new SnapshotBackedReadTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot());
+ return SnapshotBackedTransactions.newReadTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot());
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- return new SnapshotBackedReadWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
+ return SnapshotBackedTransactions.newReadWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- return new SnapshotBackedWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
+ return SnapshotBackedTransactions.newWriteTransaction(nextIdentifier(), debugTransactions, dataTree.takeSnapshot(), this);
}
@Override
}
@Override
- public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
- return changePublisher.registerTreeChangeListener(treeId, listener);
+ public synchronized <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
+ /*
+ * Make sure commit is not occurring right now. Listener has to be
+ * registered and its state capture enqueued at a consistent point.
+ */
+ return changePublisher.registerTreeChangeListener(treeId, listener, dataTree.takeSnapshot());
}
@Override
- protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ protected void transactionAborted(final SnapshotBackedWriteTransaction<String> tx) {
LOG.debug("Tx: {} is closed.", tx.getIdentifier());
}
@Override
- protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
- LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
- return new ThreePhaseCommitImpl(tx, tree);
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction<String> tx, final DataTreeModification modification) {
+ LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), modification);
+ return new InMemoryDOMStoreThreePhaseCommitCohort(this, tx, modification);
}
- Object nextIdentifier() {
+ String nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private final class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
- private final SnapshotBackedWriteTransaction transaction;
- private final DataTreeModification modification;
-
- private ResolveDataChangeEventsTask listenerResolver;
- private DataTreeCandidate candidate;
-
- public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
- this.transaction = writeTransaction;
- this.modification = modification;
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- try {
- dataTree.validate(modification);
- LOG.debug("Store Transaction: {} can be committed", transaction.getIdentifier());
- return CAN_COMMIT_FUTURE;
- } catch (ConflictingModificationAppliedException e) {
- LOG.warn("Store Tx: {} Conflicting modification for {}.", transaction.getIdentifier(),
- e.getPath());
- transaction.warnDebugContext(LOG);
- return Futures.immediateFailedFuture(new OptimisticLockFailedException("Optimistic lock failed.", e));
- } catch (DataValidationFailedException e) {
- LOG.warn("Store Tx: {} Data Precondition failed for {}.", transaction.getIdentifier(),
- e.getPath(), e);
- transaction.warnDebugContext(LOG);
-
- // For debugging purposes, allow dumping of the modification. Coupled with the above
- // precondition log, it should allow us to understand what went on.
- LOG.trace("Store Tx: {} modifications: {} tree: {}", modification, dataTree);
-
- return Futures.immediateFailedFuture(new TransactionCommitFailedException("Data did not pass validation.", e));
- } catch (Exception e) {
- LOG.warn("Unexpected failure in validation phase", e);
- return Futures.immediateFailedFuture(e);
- }
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- try {
- candidate = dataTree.prepare(modification);
- listenerResolver = ResolveDataChangeEventsTask.create(candidate, listenerTree);
- return SUCCESSFUL_FUTURE;
- } catch (Exception e) {
- LOG.warn("Unexpected failure in pre-commit phase", e);
- return Futures.immediateFailedFuture(e);
- }
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- candidate = null;
- return SUCCESSFUL_FUTURE;
- }
+ void validate(final DataTreeModification modification) throws DataValidationFailedException {
+ dataTree.validate(modification);
+ }
- @Override
- public ListenableFuture<Void> commit() {
- checkState(candidate != null, "Proposed subtree must be computed");
-
- /*
- * The commit has to occur atomically with regard to listener
- * registrations.
- */
- synchronized (InMemoryDOMDataStore.this) {
- dataTree.commit(candidate);
- changePublisher.publishChange(candidate);
- listenerResolver.resolve(dataChangeListenerNotificationManager);
- }
+ DataTreeCandidate prepare(final DataTreeModification modification) {
+ return dataTree.prepare(modification);
+ }
- return SUCCESSFUL_FUTURE;
- }
+ synchronized void commit(final DataTreeCandidate candidate) {
+ dataTree.commit(candidate);
+ changePublisher.publishChange(candidate);
+ ResolveDataChangeEventsTask.create(candidate, listenerTree).resolve(dataChangeListenerNotificationManager);
}
}
--- /dev/null
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import static com.google.common.base.Preconditions.checkState;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class InMemoryDOMStoreThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
+ private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMStoreThreePhaseCommitCohort.class);
+ private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
+ private static final ListenableFuture<Boolean> CAN_COMMIT_FUTURE = Futures.immediateFuture(Boolean.TRUE);
+ private final SnapshotBackedWriteTransaction<String> transaction;
+ private final DataTreeModification modification;
+ private final InMemoryDOMDataStore store;
+ private DataTreeCandidate candidate;
+
+ public InMemoryDOMStoreThreePhaseCommitCohort(final InMemoryDOMDataStore store, final SnapshotBackedWriteTransaction<String> writeTransaction, final DataTreeModification modification) {
+ this.transaction = Preconditions.checkNotNull(writeTransaction);
+ this.modification = Preconditions.checkNotNull(modification);
+ this.store = Preconditions.checkNotNull(store);
+ }
+
+ private static void warnDebugContext(final AbstractDOMStoreTransaction<?> transaction) {
+ final Throwable ctx = transaction.getDebugContext();
+ if (ctx != null) {
+ LOG.warn("Transaction {} has been allocated in the following context", transaction.getIdentifier(), ctx);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<Boolean> canCommit() {
+ try {
+ store.validate(modification);
+ LOG.debug("Store Transaction: {} can be committed", getTransaction().getIdentifier());
+ return CAN_COMMIT_FUTURE;
+ } catch (ConflictingModificationAppliedException e) {
+ LOG.warn("Store Tx: {} Conflicting modification for {}.", getTransaction().getIdentifier(),
+ e.getPath());
+ warnDebugContext(getTransaction());
+ return Futures.immediateFailedFuture(new OptimisticLockFailedException("Optimistic lock failed.", e));
+ } catch (DataValidationFailedException e) {
+ LOG.warn("Store Tx: {} Data Precondition failed for {}.", getTransaction().getIdentifier(),
+ e.getPath(), e);
+ warnDebugContext(getTransaction());
+
+ // For debugging purposes, allow dumping of the modification. Coupled with the above
+ // precondition log, it should allow us to understand what went on.
+ LOG.trace("Store Tx: {} modifications: {} tree: {}", modification, store);
+
+ return Futures.immediateFailedFuture(new TransactionCommitFailedException("Data did not pass validation.", e));
+ } catch (Exception e) {
+ LOG.warn("Unexpected failure in validation phase", e);
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<Void> preCommit() {
+ try {
+ candidate = store.prepare(modification);
+ return SUCCESSFUL_FUTURE;
+ } catch (Exception e) {
+ LOG.warn("Unexpected failure in pre-commit phase", e);
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<Void> abort() {
+ candidate = null;
+ return SUCCESSFUL_FUTURE;
+ }
+
+ protected final SnapshotBackedWriteTransaction<String> getTransaction() {
+ return transaction;
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ checkState(candidate != null, "Proposed subtree must be computed");
+
+ /*
+ * The commit has to occur atomically with regard to listener
+ * registrations.
+ */
+ store.commit(candidate);
+ return SUCCESSFUL_FUTURE;
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.base.Optional;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.ExecutorService;
import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTreeChangePublisher;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager.Invoker;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations, final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
- final DataTreeCandidate candidate = new SimpleDataTreeCandidate(path, node);
+ final DataTreeCandidate candidate = DataTreeCandidates.newDataTreeCandidate(path, node);
for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
LOG.debug("Enqueueing candidate {} to registration {}", candidate, registrations);
// FIXME: remove the queue for this registration and make sure we clear it
}
+ <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener, final DataTreeSnapshot snapshot) {
+ final AbstractDOMDataTreeChangeListenerRegistration<L> reg = registerTreeChangeListener(treeId, listener);
+
+ final Optional<NormalizedNode<?, ?>> node = snapshot.readNode(treeId);
+ if (node.isPresent()) {
+ final DataTreeCandidate candidate = DataTreeCandidates.fromNormalizedNode(treeId, node.get());
+ notificationManager.submitNotification(reg, candidate);
+ }
+
+ return reg;
+ }
+
synchronized void publishChange(@Nonnull final DataTreeCandidate candidate) {
// Runs synchronized with registrationRemoved()
processCandidateTree(candidate);
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.annotations.Beta;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
* Computes data change events for all affected registered listeners in data
* tree.
*/
-final class ResolveDataChangeEventsTask {
+@Beta
+public final class ResolveDataChangeEventsTask {
private static final Logger LOG = LoggerFactory.getLogger(ResolveDataChangeEventsTask.class);
private final DataTreeCandidate candidate;
private Multimap<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> collectedEvents;
- public ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
+ private ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
this.candidate = Preconditions.checkNotNull(candidate);
this.listenerRoot = Preconditions.checkNotNull(listenerTree);
}
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-
-final class SimpleDataTreeCandidate implements DataTreeCandidate {
- private final YangInstanceIdentifier rootPath;
- private final DataTreeCandidateNode rootNode;
-
- SimpleDataTreeCandidate(final YangInstanceIdentifier rootPath, final DataTreeCandidateNode rootNode) {
- this.rootPath = Preconditions.checkNotNull(rootPath);
- this.rootNode = Preconditions.checkNotNull(rootNode);
- }
-
- @Override
- public DataTreeCandidateNode getRootNode() {
- return rootNode;
- }
-
- @Override
- public YangInstanceIdentifier getRootPath() {
- return rootPath;
- }
-
- @Override
- public String toString() {
- return MoreObjects.toStringHelper(this).add("rootPath", rootPath).add("rootNode", rootNode).toString();
- }
-}
\ No newline at end of file
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedTransactions;
+import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
public class InMemoryDataStoreTest {
private SchemaContext schemaContext;
Mockito.doThrow( new RuntimeException( "mock ex" ) ).when( mockSnapshot )
.readNode( Mockito.any( YangInstanceIdentifier.class ) );
- DOMStoreReadTransaction readTx = new SnapshotBackedReadTransaction("1", true, mockSnapshot);
+ DOMStoreReadTransaction readTx = SnapshotBackedTransactions.newReadTransaction("1", true, mockSnapshot);
doReadAndThrowEx( readTx );
}
Mockito.doThrow( new RuntimeException( "mock ex" ) ).when( mockModification )
.readNode( Mockito.any( YangInstanceIdentifier.class ) );
Mockito.doReturn( mockModification ).when( mockSnapshot ).newModification();
- TransactionReadyPrototype mockReady = Mockito.mock( TransactionReadyPrototype.class );
- DOMStoreReadTransaction readTx = new SnapshotBackedReadWriteTransaction("1", false, mockSnapshot, mockReady);
+ @SuppressWarnings("unchecked")
+ TransactionReadyPrototype<String> mockReady = Mockito.mock( TransactionReadyPrototype.class );
+ DOMStoreReadTransaction readTx = SnapshotBackedTransactions.newReadWriteTransaction("1", false, mockSnapshot, mockReady);
doReadAndThrowEx( readTx );
}
- private void doReadAndThrowEx( final DOMStoreReadTransaction readTx ) throws Throwable {
-
+ private static void doReadAndThrowEx( final DOMStoreReadTransaction readTx ) throws Throwable {
try {
readTx.read(TestModel.TEST_PATH).get();
} catch( ExecutionException e ) {
<parent>\r
<artifactId>sal-parent</artifactId>\r
<groupId>org.opendaylight.controller</groupId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.3.0-SNAPSHOT</version>\r
</parent>\r
\r
<groupId>org.opendaylight.controller</groupId>\r
<dependency>\r
<groupId>org.apache.karaf.shell</groupId>\r
<artifactId>org.apache.karaf.shell.console</artifactId>\r
- <version>3.0.1</version>\r
+ <version>${karaf.version}</version>\r
</dependency>\r
\r
<dependency>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal-dom-xsql</artifactId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.3.0-SNAPSHOT</version>\r
</dependency>\r
</dependencies>\r
\r
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-netconf-connector</artifactId>
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collection;
-import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
* Create rpc implementation capable of handling RPC for monitoring and notifications even before the schemas of remote device are downloaded
*/
static NetconfDeviceRpc getRpcForInitialization(final NetconfDeviceCommunicator listener) {
- return new NetconfDeviceRpc(INIT_SCHEMA_CTX, listener, new NetconfMessageTransformer(INIT_SCHEMA_CTX));
+ return new NetconfDeviceRpc(INIT_SCHEMA_CTX, listener, new NetconfMessageTransformer(INIT_SCHEMA_CTX, false));
}
@VisibleForTesting
void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionPreferences remoteSessionCapabilities, final DOMRpcService deviceRpc) {
- messageTransformer = new NetconfMessageTransformer(result);
+ messageTransformer = new NetconfMessageTransformer(result, true);
updateTransformer(messageTransformer);
// salFacade.onDeviceConnected has to be called before the notification handler is initialized
@Override
public DeviceSources call() throws Exception {
-
- final Set<SourceIdentifier> requiredSources = Sets.newHashSet(Collections2.transform(
- remoteSessionCapabilities.getModuleBasedCaps(), QNAME_TO_SOURCE_ID_FUNCTION));
-
- // If monitoring is not supported, we will still attempt to create schema, sources might be already provided
final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
logger.debug("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
- final Set<SourceIdentifier> providedSources = Sets.newHashSet(Collections2.transform(
- availableSchemas.getAvailableYangSchemasQNames(), QNAME_TO_SOURCE_ID_FUNCTION));
-
- final Set<SourceIdentifier> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
+ final Set<QName> requiredSources = Sets.newHashSet(remoteSessionCapabilities.getModuleBasedCaps());
+ final Set<QName> providedSources = availableSchemas.getAvailableYangSchemasQNames();
+ final Set<QName> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
if (!requiredSourcesNotProvided.isEmpty()) {
logger.warn("{}: Netconf device does not provide all yang models reported in hello message capabilities, required but not provided: {}",
id, requiredSourcesNotProvided);
logger.warn("{}: Attempting to build schema context from required sources", id);
}
-
// Here all the sources reported in netconf monitoring are merged with those reported in hello.
// It is necessary to perform this since submodules are not mentioned in hello but still required.
// This clashes with the option of a user to specify supported yang models manually in configuration for netconf-connector
// and as a result one is not able to fully override yang models of a device. It is only possible to add additional models.
- final Set<SourceIdentifier> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
+ final Set<QName> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
if (!providedSourcesNotRequired.isEmpty()) {
logger.warn("{}: Netconf device provides additional yang models not reported in hello message capabilities: {}",
id, providedSourcesNotRequired);
* Contains RequiredSources - sources from capabilities.
*/
private static final class DeviceSources {
- private final Collection<SourceIdentifier> requiredSources;
- private final Collection<SourceIdentifier> providedSources;
+ private final Set<QName> requiredSources;
+ private final Set<QName> providedSources;
- public DeviceSources(final Collection<SourceIdentifier> requiredSources, final Collection<SourceIdentifier> providedSources) {
+ public DeviceSources(final Set<QName> requiredSources, final Set<QName> providedSources) {
this.requiredSources = requiredSources;
this.providedSources = providedSources;
}
- public Collection<SourceIdentifier> getRequiredSources() {
+ public Set<QName> getRequiredSourcesQName() {
return requiredSources;
}
- public Collection<SourceIdentifier> getProvidedSources() {
+ public Set<QName> getProvidedSourcesQName() {
return providedSources;
}
+ public Collection<SourceIdentifier> getRequiredSources() {
+ return Collections2.transform(requiredSources, QNAME_TO_SOURCE_ID_FUNCTION);
+ }
+
+ public Collection<SourceIdentifier> getProvidedSources() {
+ return Collections2.transform(providedSources, QNAME_TO_SOURCE_ID_FUNCTION);
+ }
+
}
/**
// If no more sources, fail
if(requiredSources.isEmpty()) {
- handleSalInitializationFailure(new IllegalStateException(id + ": No more sources for schema context"), listener);
+ final IllegalStateException cause = new IllegalStateException(id + ": No more sources for schema context");
+ handleSalInitializationFailure(cause, listener);
+ salFacade.onDeviceFailed(cause);
return;
}
@Override
public void onSuccess(final SchemaContext result) {
logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
- final Collection<QName> filteredQNames = Sets.difference(remoteSessionCapabilities.getModuleBasedCaps(), capabilities.getUnresolvedCapabilites().keySet());
+ final Collection<QName> filteredQNames = Sets.difference(deviceSources.getProvidedSourcesQName(), capabilities.getUnresolvedCapabilites().keySet());
capabilities.addCapabilities(filteredQNames);
capabilities.addNonModuleBasedCapabilities(remoteSessionCapabilities.getNonModuleCaps());
handleSalInitializationSuccess(result, remoteSessionCapabilities, getDeviceSpecificRpc(result));
}
private NetconfDeviceRpc getDeviceSpecificRpc(final SchemaContext result) {
- return new NetconfDeviceRpc(result, listener, new NetconfMessageTransformer(result));
+ return new NetconfDeviceRpc(result, listener, new NetconfMessageTransformer(result, true));
}
private Collection<SourceIdentifier> stripMissingSource(final Collection<SourceIdentifier> requiredSources, final SourceIdentifier sIdToRemove) {
}
private Collection<QName> getQNameFromSourceIdentifiers(final Collection<SourceIdentifier> identifiers) {
- final Collection<QName> qNames = new HashSet<>();
- for (final SourceIdentifier source : identifiers) {
- final Optional<QName> qname = getQNameFromSourceIdentifier(source);
- if (qname.isPresent()) {
- qNames.add(qname.get());
+ final Collection<QName> qNames = Collections2.transform(identifiers, new Function<SourceIdentifier, QName>() {
+ @Override
+ public QName apply(final SourceIdentifier sourceIdentifier) {
+ return getQNameFromSourceIdentifier(sourceIdentifier);
}
- }
+ });
+
if (qNames.isEmpty()) {
logger.debug("Unable to map any source identfiers to a capability reported by device : " + identifiers);
}
return qNames;
}
- private Optional<QName> getQNameFromSourceIdentifier(final SourceIdentifier identifier) {
- for (final QName qname : remoteSessionCapabilities.getModuleBasedCaps()) {
- if (qname.getLocalName().equals(identifier.getName())
- && qname.getFormattedRevision().equals(identifier.getRevision())) {
- return Optional.of(qname);
+ private QName getQNameFromSourceIdentifier(final SourceIdentifier identifier) {
+ // Required sources are all required and provided merged in DeviceSourcesResolver
+ for (final QName qname : deviceSources.getRequiredSourcesQName()) {
+ if(qname.getLocalName().equals(identifier.getName()) == false) {
+ continue;
+ }
+
+ if(identifier.getRevision().equals(SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION) &&
+ qname.getRevision() == null) {
+ return qname;
+ }
+
+ if (qname.getFormattedRevision().equals(identifier.getRevision())) {
+ return qname;
}
}
- throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier);
+ throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier + " Available: " + deviceSources.getRequiredSourcesQName());
}
}
}
public final static class RemoteYangSchema {
private final QName qname;
- private RemoteYangSchema(final QName qname) {
+ RemoteYangSchema(final QName qname) {
this.qname = qname;
}
import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.OrderedNormalizedNodeWriter;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
private final Multimap<QName, NotificationDefinition> mappedNotifications;
private final DomToNormalizedNodeParserFactory parserFactory;
- public NetconfMessageTransformer(final SchemaContext schemaContext) {
+ public NetconfMessageTransformer(final SchemaContext schemaContext, final boolean strictParsing) {
this.counter = new MessageCounter();
this.schemaContext = schemaContext;
- parserFactory = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, schemaContext);
+ parserFactory = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, schemaContext, strictParsing);
mappedRpcs = Maps.uniqueIndex(schemaContext.getOperations(), QNAME_FUNCTION);
mappedNotifications = Multimaps.index(schemaContext.getNotifications(), QNAME_NOREV_FUNCTION);
}
private void writeNormalizedRpc(final ContainerNode normalized, final DOMResult result, final SchemaPath schemaPath, final SchemaContext baseNetconfCtx) throws IOException, XMLStreamException {
- final NormalizedNodeWriter normalizedNodeWriter;
+ final OrderedNormalizedNodeWriter normalizedNodeWriter;
NormalizedNodeStreamWriter normalizedNodeStreamWriter = null;
XMLStreamWriter writer = null;
try {
writer = NetconfMessageTransformUtil.XML_FACTORY.createXMLStreamWriter(result);
normalizedNodeStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(writer, baseNetconfCtx, schemaPath);
- normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(normalizedNodeStreamWriter);
-
- for (final DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?> editElement : normalized.getValue()) {
- normalizedNodeWriter.write(editElement);
- }
+ normalizedNodeWriter = new OrderedNormalizedNodeWriter(normalizedNodeStreamWriter, baseNetconfCtx, schemaPath);
+ Collection<DataContainerChild<?, ?>> value = (Collection) normalized.getValue();
+ normalizedNodeWriter.write(value);
normalizedNodeWriter.flush();
} finally {
try {
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connect.netconf.util;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import javax.xml.transform.dom.DOMSource;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.ModifyAction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.AttributesBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeContainerBuilder;
-import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
-import org.opendaylight.yangtools.yang.model.api.AugmentationTarget;
-import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
-import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * Transforms an instance of yang instance identifier to a filter like structure in normalized node format. Can be also used to nest the edit-config rpc content.
- * For each argument of the id, a specific normalized node is created to ensure schema context conformance.
- */
-public abstract class InstanceIdToNodes<T extends PathArgument> implements Identifiable<T> {
-
- private final T identifier;
-
- @Override
- public T getIdentifier() {
- return identifier;
- }
-
- protected InstanceIdToNodes(final T identifier) {
- this.identifier = identifier;
- }
-
- abstract InstanceIdToNodes<?> getChild(final PathArgument child) throws DataNormalizationException;
-
- public abstract NormalizedNode<?, ?> create(YangInstanceIdentifier legacyData, Optional<NormalizedNode<?, ?>> deepestChild, Optional<ModifyAction> operation);
-
- private static abstract class SimpleTypeNormalization<T extends PathArgument> extends InstanceIdToNodes<T> {
-
- protected SimpleTypeNormalization(final T identifier) {
- super(identifier);
- }
-
- @Override
- public NormalizedNode<?, ?> create(final YangInstanceIdentifier id, final Optional<NormalizedNode<?, ?>> deepestChild, final Optional<ModifyAction> operation) {
- checkNotNull(id);
- final PathArgument pathArgument = Iterables.get(id.getPathArguments(), 0);
- final NormalizedNodeAttrBuilder<? extends PathArgument, Object, ? extends NormalizedNode<? extends PathArgument, Object>> builder = getBuilder(pathArgument);
-
- if(deepestChild.isPresent()) {
- builder.withValue(deepestChild.get().getValue());
- }
-
- addModifyOpIfPresent(operation, builder);
- return builder.build();
- }
-
- protected abstract NormalizedNodeAttrBuilder<? extends PathArgument, Object, ? extends NormalizedNode<? extends PathArgument, Object>> getBuilder(PathArgument node);
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) {
- return null;
- }
- }
-
-
- public void addModifyOpIfPresent(final Optional<ModifyAction> operation, final AttributesBuilder<?> builder) {
- if(operation.isPresent()) {
- builder.withAttributes(Collections.singletonMap(NetconfMessageTransformUtil.NETCONF_OPERATION_QNAME, NetconfMessageTransformUtil.modifyOperationToXmlString(operation.get())));
- }
- }
-
- private static final class LeafNormalization extends SimpleTypeNormalization<NodeIdentifier> {
-
- protected LeafNormalization(final LeafSchemaNode potential) {
- super(new NodeIdentifier(potential.getQName()));
- }
-
- @Override
- protected NormalizedNodeAttrBuilder<NodeIdentifier, Object, LeafNode<Object>> getBuilder(final PathArgument node) {
- return Builders.leafBuilder().withNodeIdentifier(getIdentifier());
- }
- }
-
- private static final class LeafListEntryNormalization extends SimpleTypeNormalization<NodeWithValue> {
-
- public LeafListEntryNormalization(final LeafListSchemaNode potential) {
- super(new NodeWithValue(potential.getQName(), null));
- }
-
- @Override
- protected NormalizedNodeAttrBuilder<NodeWithValue, Object, LeafSetEntryNode<Object>> getBuilder(final PathArgument node) {
- Preconditions.checkArgument(node instanceof NodeWithValue);
- return Builders.leafSetEntryBuilder().withNodeIdentifier((NodeWithValue) node).withValue(((NodeWithValue) node).getValue());
- }
-
- }
-
- private static abstract class CompositeNodeNormalizationOperation<T extends PathArgument> extends
- InstanceIdToNodes<T> {
-
- protected CompositeNodeNormalizationOperation(final T identifier) {
- super(identifier);
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public final NormalizedNode<?, ?> create(final YangInstanceIdentifier id, final Optional<NormalizedNode<?, ?>> lastChild, final Optional<ModifyAction> operation) {
- checkNotNull(id);
- final Iterator<PathArgument> iterator = id.getPathArguments().iterator();
- final PathArgument legacyData = iterator.next();
-
- if (!isMixin(this) && getIdentifier().getNodeType() != null) {
- checkArgument(getIdentifier().getNodeType().equals(legacyData.getNodeType()),
- "Node QName must be %s was %s", getIdentifier().getNodeType(), legacyData.getNodeType());
- }
- final NormalizedNodeContainerBuilder builder = createBuilder(legacyData);
-
- if (iterator.hasNext()) {
- final PathArgument childPath = iterator.next();
- final InstanceIdToNodes childOp = getChildOperation(childPath);
-
- final YangInstanceIdentifier childId = YangInstanceIdentifier.create(Iterables.skip(id.getPathArguments(), 1));
- builder.addChild(childOp.create(childId, lastChild, operation));
- } else {
- if(lastChild.isPresent()) {
- builder.withValue(Lists.newArrayList((Collection<?>) lastChild.get().getValue()));
- }
- if(operation.isPresent()) {
- Preconditions.checkArgument(builder instanceof AttributesBuilder<?>);
- addModifyOpIfPresent(operation, ((AttributesBuilder<?>) builder));
- }
- }
-
- return builder.build();
- }
-
- private InstanceIdToNodes getChildOperation(final PathArgument childPath) {
- final InstanceIdToNodes childOp;
- try {
- childOp = getChild(childPath);
- } catch (final DataNormalizationException e) {
- throw new IllegalArgumentException(String.format("Failed to process child node %s", childPath), e);
- }
- checkArgument(childOp != null, "Node %s is not allowed inside %s", childPath, getIdentifier());
- return childOp;
- }
-
- @SuppressWarnings("rawtypes")
- protected abstract NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode);
- }
-
- static boolean isMixin(final InstanceIdToNodes<?> op) {
- return op instanceof MixinNormalizationOp;
- }
-
- private static abstract class DataContainerNormalizationOperation<T extends PathArgument> extends
- CompositeNodeNormalizationOperation<T> {
-
- private final DataNodeContainer schema;
- private final Map<PathArgument, InstanceIdToNodes<?>> byArg;
-
- protected DataContainerNormalizationOperation(final T identifier, final DataNodeContainer schema) {
- super(identifier);
- this.schema = schema;
- this.byArg = new ConcurrentHashMap<>();
- }
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) throws DataNormalizationException {
- InstanceIdToNodes<?> potential = byArg.get(child);
- if (potential != null) {
- return potential;
- }
- potential = fromLocalSchema(child);
- return register(potential);
- }
-
- private InstanceIdToNodes<?> fromLocalSchema(final PathArgument child) throws DataNormalizationException {
- if (child instanceof AugmentationIdentifier) {
- return fromSchemaAndQNameChecked(schema, ((AugmentationIdentifier) child).getPossibleChildNames()
- .iterator().next());
- }
- return fromSchemaAndQNameChecked(schema, child.getNodeType());
- }
-
- private InstanceIdToNodes<?> register(final InstanceIdToNodes<?> potential) {
- if (potential != null) {
- byArg.put(potential.getIdentifier(), potential);
- }
- return potential;
- }
- }
-
- private static final class ListItemNormalization extends
- DataContainerNormalizationOperation<NodeIdentifierWithPredicates> {
-
- protected ListItemNormalization(final NodeIdentifierWithPredicates identifier, final ListSchemaNode schema) {
- super(identifier, schema);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument currentArg) {
- final DataContainerNodeAttrBuilder<NodeIdentifierWithPredicates, MapEntryNode> builder = Builders
- .mapEntryBuilder().withNodeIdentifier((NodeIdentifierWithPredicates) currentArg);
- for (final Entry<QName, Object> keyValue : ((NodeIdentifierWithPredicates) currentArg).getKeyValues().entrySet()) {
- builder.addChild(Builders.leafBuilder()
- //
- .withNodeIdentifier(new NodeIdentifier(keyValue.getKey())).withValue(keyValue.getValue())
- .build());
- }
- return builder;
- }
-
- }
-
- private static final class UnkeyedListItemNormalization extends DataContainerNormalizationOperation<NodeIdentifier> {
-
- protected UnkeyedListItemNormalization(final ListSchemaNode schema) {
- super(new NodeIdentifier(schema.getQName()), schema);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.unkeyedListEntryBuilder().withNodeIdentifier(getIdentifier());
- }
-
- }
-
- private static final class ContainerTransformation extends DataContainerNormalizationOperation<NodeIdentifier> {
-
- protected ContainerTransformation(final ContainerSchemaNode schema) {
- super(new NodeIdentifier(schema.getQName()), schema);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.containerBuilder().withNodeIdentifier(getIdentifier());
- }
- }
-
- /**
- * Marker interface for Mixin nodes normalization operations
- */
- private interface MixinNormalizationOp {}
-
-
- private static final class OrderedLeafListMixinNormalization extends UnorderedLeafListMixinNormalization {
-
-
- public OrderedLeafListMixinNormalization(final LeafListSchemaNode potential) {
- super(potential);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.orderedLeafSetBuilder().withNodeIdentifier(getIdentifier());
- }
- }
-
- private static class UnorderedLeafListMixinNormalization extends CompositeNodeNormalizationOperation<NodeIdentifier> implements MixinNormalizationOp {
-
- private final InstanceIdToNodes<?> innerOp;
-
- public UnorderedLeafListMixinNormalization(final LeafListSchemaNode potential) {
- super(new NodeIdentifier(potential.getQName()));
- innerOp = new LeafListEntryNormalization(potential);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.leafSetBuilder().withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) {
- if (child instanceof NodeWithValue) {
- return innerOp;
- }
- return null;
- }
- }
-
- private static final class AugmentationNormalization extends DataContainerNormalizationOperation<AugmentationIdentifier> implements MixinNormalizationOp {
-
- public AugmentationNormalization(final AugmentationSchema augmentation, final DataNodeContainer schema) {
- //super();
- super(augmentationIdentifierFrom(augmentation), augmentationProxy(augmentation, schema));
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.augmentationBuilder().withNodeIdentifier(getIdentifier());
- }
- }
-
- private static class UnorderedMapMixinNormalization extends CompositeNodeNormalizationOperation<NodeIdentifier> implements MixinNormalizationOp {
-
- private final ListItemNormalization innerNode;
-
- public UnorderedMapMixinNormalization(final ListSchemaNode list) {
- super(new NodeIdentifier(list.getQName()));
- this.innerNode = new ListItemNormalization(new NodeIdentifierWithPredicates(list.getQName(),
- Collections.<QName, Object>emptyMap()), list);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.mapBuilder().withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) {
- if (child.getNodeType().equals(getIdentifier().getNodeType())) {
- return innerNode;
- }
- return null;
- }
- }
-
- private static class UnkeyedListMixinNormalization extends CompositeNodeNormalizationOperation<NodeIdentifier> implements MixinNormalizationOp {
-
- private final UnkeyedListItemNormalization innerNode;
-
- public UnkeyedListMixinNormalization(final ListSchemaNode list) {
- super(new NodeIdentifier(list.getQName()));
- this.innerNode = new UnkeyedListItemNormalization(list);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.unkeyedListBuilder().withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) {
- if (child.getNodeType().equals(getIdentifier().getNodeType())) {
- return innerNode;
- }
- return null;
- }
-
- }
-
- private static final class OrderedMapMixinNormalization extends UnorderedMapMixinNormalization {
-
- public OrderedMapMixinNormalization(final ListSchemaNode list) {
- super(list);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.orderedMapBuilder().withNodeIdentifier(getIdentifier());
- }
-
- }
-
- private static class ChoiceNodeNormalization extends CompositeNodeNormalizationOperation<NodeIdentifier> implements MixinNormalizationOp {
-
- private final ImmutableMap<PathArgument, InstanceIdToNodes<?>> byArg;
-
- protected ChoiceNodeNormalization(final ChoiceSchemaNode schema) {
- super(new NodeIdentifier(schema.getQName()));
- final ImmutableMap.Builder<PathArgument, InstanceIdToNodes<?>> byArgBuilder = ImmutableMap.builder();
-
- for (final ChoiceCaseNode caze : schema.getCases()) {
- for (final DataSchemaNode cazeChild : caze.getChildNodes()) {
- final InstanceIdToNodes<?> childOp = fromDataSchemaNode(cazeChild);
- byArgBuilder.put(childOp.getIdentifier(), childOp);
- }
- }
- byArg = byArgBuilder.build();
- }
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) {
- return byArg.get(child);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder<?, ?, ?, ?> createBuilder(final PathArgument compositeNode) {
- return Builders.choiceBuilder().withNodeIdentifier(getIdentifier());
- }
- }
-
- private static class AnyXmlNormalization extends InstanceIdToNodes<NodeIdentifier> {
-
- protected AnyXmlNormalization(final AnyXmlSchemaNode schema) {
- super(new NodeIdentifier(schema.getQName()));
- }
-
- @Override
- public InstanceIdToNodes<?> getChild(final PathArgument child) throws DataNormalizationException {
- return null;
- }
-
- @Override
- public NormalizedNode<?, ?> create(final YangInstanceIdentifier legacyData, final Optional<NormalizedNode<?, ?>> deepestChild, final Optional<ModifyAction> operation) {
- if(deepestChild.isPresent()) {
- Preconditions.checkState(deepestChild instanceof AnyXmlNode);
- final NormalizedNodeAttrBuilder<NodeIdentifier, DOMSource, AnyXmlNode> anyXmlBuilder =
- Builders.anyXmlBuilder().withNodeIdentifier(getIdentifier()).withValue(((AnyXmlNode) deepestChild).getValue());
- addModifyOpIfPresent(operation, anyXmlBuilder);
- return anyXmlBuilder.build();
- }
-
- final NormalizedNodeAttrBuilder<NodeIdentifier, DOMSource, AnyXmlNode> builder =
- Builders.anyXmlBuilder().withNodeIdentifier(getIdentifier());
- addModifyOpIfPresent(operation, builder);
- return builder.build();
- }
-
- }
-
- private static Optional<DataSchemaNode> findChildSchemaNode(final DataNodeContainer parent, final QName child) {
- DataSchemaNode potential = parent.getDataChildByName(child);
- if (potential == null) {
- final Iterable<ChoiceSchemaNode> choices = FluentIterable.from(parent.getChildNodes()).filter(ChoiceSchemaNode.class);
- potential = findChoice(choices, child);
- }
- return Optional.fromNullable(potential);
- }
-
- private static InstanceIdToNodes<?> fromSchemaAndQNameChecked(final DataNodeContainer schema, final QName child) throws DataNormalizationException {
- final Optional<DataSchemaNode> potential = findChildSchemaNode(schema, child);
- if (!potential.isPresent()) {
- throw new DataNormalizationException(String.format("Supplied QName %s is not valid according to schema %s, potential children nodes: %s", child, schema, schema.getChildNodes()));
- }
-
- final DataSchemaNode result = potential.get();
- // We try to look up if this node was added by augmentation
- if ((schema instanceof DataSchemaNode) && result.isAugmenting()) {
- return fromAugmentation(schema, (AugmentationTarget) schema, result);
- }
- return fromDataSchemaNode(result);
- }
-
- private static ChoiceSchemaNode findChoice(final Iterable<ChoiceSchemaNode> choices, final QName child) {
- org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode foundChoice = null;
- choiceLoop:
- for (final ChoiceSchemaNode choice : choices) {
- for (final ChoiceCaseNode caze : choice.getCases()) {
- if (findChildSchemaNode(caze, child).isPresent()) {
- foundChoice = choice;
- break choiceLoop;
- }
- }
- }
- return foundChoice;
- }
-
- private static AugmentationIdentifier augmentationIdentifierFrom(final AugmentationSchema augmentation) {
- final ImmutableSet.Builder<QName> potentialChildren = ImmutableSet.builder();
- for (final DataSchemaNode child : augmentation.getChildNodes()) {
- potentialChildren.add(child.getQName());
- }
- return new AugmentationIdentifier(potentialChildren.build());
- }
-
- private static DataNodeContainer augmentationProxy(final AugmentationSchema augmentation, final DataNodeContainer schema) {
- final Set<DataSchemaNode> children = new HashSet<>();
- for (final DataSchemaNode augNode : augmentation.getChildNodes()) {
- children.add(schema.getDataChildByName(augNode.getQName()));
- }
- return new NodeContainerProxy(null, children);
- }
-
- /**
- * Returns a SchemaPathUtil for provided child node
- * <p/>
- * If supplied child is added by Augmentation this operation returns
- * a SchemaPathUtil for augmentation,
- * otherwise returns a SchemaPathUtil for child as
- * call for {@link #fromDataSchemaNode(org.opendaylight.yangtools.yang.model.api.DataSchemaNode)}.
- */
- private static InstanceIdToNodes<?> fromAugmentation(final DataNodeContainer parent,
- final AugmentationTarget parentAug, final DataSchemaNode child) {
- AugmentationSchema augmentation = null;
- for (final AugmentationSchema aug : parentAug.getAvailableAugmentations()) {
- final DataSchemaNode potential = aug.getDataChildByName(child.getQName());
- if (potential != null) {
- augmentation = aug;
- break;
- }
-
- }
- if (augmentation != null) {
- return new AugmentationNormalization(augmentation, parent);
- } else {
- return fromDataSchemaNode(child);
- }
- }
-
- private static InstanceIdToNodes<?> fromDataSchemaNode(final DataSchemaNode potential) {
- if (potential instanceof ContainerSchemaNode) {
- return new ContainerTransformation((ContainerSchemaNode) potential);
- } else if (potential instanceof ListSchemaNode) {
- return fromListSchemaNode((ListSchemaNode) potential);
- } else if (potential instanceof LeafSchemaNode) {
- return new LeafNormalization((LeafSchemaNode) potential);
- } else if (potential instanceof ChoiceSchemaNode) {
- return new ChoiceNodeNormalization((ChoiceSchemaNode) potential);
- } else if (potential instanceof LeafListSchemaNode) {
- return fromLeafListSchemaNode((LeafListSchemaNode) potential);
- } else if (potential instanceof AnyXmlSchemaNode) {
- return new AnyXmlNormalization((AnyXmlSchemaNode) potential);
- }
- return null;
- }
-
- private static InstanceIdToNodes<?> fromListSchemaNode(final ListSchemaNode potential) {
- final List<QName> keyDefinition = potential.getKeyDefinition();
- if (keyDefinition == null || keyDefinition.isEmpty()) {
- return new UnkeyedListMixinNormalization(potential);
- }
- if (potential.isUserOrdered()) {
- return new OrderedMapMixinNormalization(potential);
- }
- return new UnorderedMapMixinNormalization(potential);
- }
-
- private static InstanceIdToNodes<?> fromLeafListSchemaNode(final LeafListSchemaNode potential) {
- if (potential.isUserOrdered()) {
- return new OrderedLeafListMixinNormalization(potential);
- }
- return new UnorderedLeafListMixinNormalization(potential);
- }
-
- public static NormalizedNode<?, ?> serialize(final SchemaContext ctx, final YangInstanceIdentifier id) {
- return serialize(ctx, id, Optional.<NormalizedNode<?, ?>>absent(), Optional.<ModifyAction>absent());
- }
-
- public static NormalizedNode<?, ?> serialize(final SchemaContext ctx, final YangInstanceIdentifier id, final NormalizedNode<?, ?> deepestElement) {
- return serialize(ctx, id, Optional.<NormalizedNode<?, ?>>of(deepestElement), Optional.<ModifyAction>absent());
- }
-
- public static NormalizedNode<?, ?> serialize(final SchemaContext ctx, final YangInstanceIdentifier id, final Optional<NormalizedNode<?, ?>> deepestElement, final Optional<ModifyAction> operation) {
- Preconditions.checkNotNull(ctx);
- Preconditions.checkNotNull(id);
- final PathArgument topLevelElement = id.getPathArguments().iterator().next();
- final DataSchemaNode dataChildByName = ctx.getDataChildByName(topLevelElement.getNodeType());
- Preconditions.checkNotNull(dataChildByName, "Cannot find %s node in schema context. Instance identifier has to start from root", topLevelElement);
- try {
- final InstanceIdToNodes<?> instanceIdToNodes = fromSchemaAndQNameChecked(ctx, topLevelElement.getNodeType());
- return instanceIdToNodes.create(id, deepestElement, operation);
- } catch (final DataNormalizationException e) {
- throw new IllegalArgumentException("Unable to serialize: " + id, e);
- }
- }
-}
// Default operation
if(defaultOperation.isPresent()) {
- editBuilder.withChild(Builders.leafBuilder().withNodeIdentifier(toId(NETCONF_DEFAULT_OPERATION_QNAME)).withValue(NetconfMessageTransformUtil.modifyOperationToXmlString(defaultOperation.get())).build());
+ final String opString = defaultOperation.get().name().toLowerCase();
+ editBuilder.withChild(Builders.leafBuilder().withNodeIdentifier(toId(NETCONF_DEFAULT_OPERATION_QNAME)).withValue(opString).build());
}
// Error option
import com.google.common.collect.Lists;
import java.io.IOException;
import java.net.URI;
+import java.util.AbstractMap;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
final NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, DOMSource, AnyXmlNode> anyXmlBuilder = Builders.anyXmlBuilder().withNodeIdentifier(toId(NETCONF_FILTER_QNAME));
anyXmlBuilder.withAttributes(Collections.singletonMap(NETCONF_TYPE_QNAME, SUBTREE));
- final NormalizedNode<?, ?> filterContent = InstanceIdToNodes.serialize(ctx, identifier);
+ final NormalizedNode<?, ?> filterContent = ImmutableNodes.fromInstanceId(ctx, identifier);
final Element element = XmlUtil.createElement(BLANK_DOCUMENT, NETCONF_FILTER_QNAME.getLocalName(), Optional.of(NETCONF_FILTER_QNAME.getNamespace().toString()));
element.setAttributeNS(NETCONF_FILTER_QNAME.getNamespace().toString(), NETCONF_TYPE_QNAME.getLocalName(), "subtree");
"Data has to be either container or a list node when creating structure for top level element, but was: %s", lastChildOverride.get());
configContent = lastChildOverride.get();
} else {
- configContent = InstanceIdToNodes.serialize(ctx, dataPath, lastChildOverride, operation);
+ final Entry<QName, ModifyAction> modifyOperation =
+ operation.isPresent() ? new AbstractMap.SimpleEntry<>(NETCONF_OPERATION_QNAME, operation.get()) : null;
+ configContent = ImmutableNodes.fromInstanceId(ctx, dataPath, lastChildOverride, Optional.fromNullable(modifyOperation));
}
final Element element = XmlUtil.createElement(BLANK_DOCUMENT, NETCONF_CONFIG_QNAME.getLocalName(), Optional.of(NETCONF_CONFIG_QNAME.getNamespace().toString()));
return SchemaPath.create(true, rpc);
}
- public static String modifyOperationToXmlString(final ModifyAction operation) {
- return operation.name().toLowerCase();
- }
-
// FIXME similar code is in netconf-notifications-impl , DRY
public static void writeNormalizedNode(final NormalizedNode<?, ?> normalized, final DOMResult result, final SchemaPath schemaPath, final SchemaContext context)
throws IOException, XMLStreamException {
import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
import org.opendaylight.yangtools.yang.model.api.UsesNode;
-class NodeContainerProxy implements ContainerSchemaNode {
+/**
+ * Simple proxy for container like schema nodes, where user provides a collection of children schema nodes
+ */
+public final class NodeContainerProxy implements ContainerSchemaNode {
private final Map<QName, DataSchemaNode> childNodes;
private final QName qName;
public List<UnknownSchemaNode> getUnknownSchemaNodes() {
return Collections.emptyList();
}
-}
+}
\ No newline at end of file
import com.google.common.base.Optional;
import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Futures;
import java.io.InputStream;
import java.util.ArrayList;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.Module;
public void testNetconfDeviceMissingSource() throws Exception {
final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
final NetconfDeviceCommunicator listener = getListener();
+ final SchemaContext schema = getSchema();
final SchemaContextFactory schemaFactory = getSchemaFactory();
if(((Collection<?>) invocation.getArguments()[0]).size() == 2) {
return Futures.immediateFailedCheckedFuture(schemaResolutionException);
} else {
- return Futures.immediateCheckedFuture(getSchema());
+ return Futures.immediateCheckedFuture(schema);
}
}
}).when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
- = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+ = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, new NetconfStateSchemas.NetconfStateSchemasResolver() {
+ @Override
+ public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
+ final Module first = Iterables.getFirst(schema.getModules(), null);
+ final QName qName = QName.create(first.getQNameModule(), first.getName());
+ final NetconfStateSchemas.RemoteYangSchema source1 = new NetconfStateSchemas.RemoteYangSchema(qName);
+ final NetconfStateSchemas.RemoteYangSchema source2 = new NetconfStateSchemas.RemoteYangSchema(QName.create(first.getQNameModule(), "test-module2"));
+ return new NetconfStateSchemas(Sets.newHashSet(source1, source2));
+ }
+ });
+
final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), true);
// Monitoring supported
final NetconfSessionPreferences sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
final DataSchemaNode schemasNode = ((ContainerSchemaNode) NetconfDevice.INIT_SCHEMA_CTX.getDataChildByName("netconf-state")).getDataChildByName("schemas");
final Document schemasXml = XmlUtil.readXmlToDocument(getClass().getResourceAsStream("/netconf-state.schemas.payload.xml"));
- final ToNormalizedNodeParser<Element, ContainerNode, ContainerSchemaNode> containerNodeParser = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, NetconfDevice.INIT_SCHEMA_CTX).getContainerNodeParser();
+ final ToNormalizedNodeParser<Element, ContainerNode, ContainerSchemaNode> containerNodeParser = DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, NetconfDevice.INIT_SCHEMA_CTX, false).getContainerNodeParser();
final ContainerNode compositeNodeSchemas = containerNodeParser.parse(Collections.singleton(schemasXml.getDocumentElement()), (ContainerSchemaNode) schemasNode);
final NetconfStateSchemas schemas = NetconfStateSchemas.create(new RemoteDeviceId("device", new InetSocketAddress(99)), compositeNodeSchemas);
public void setup() throws Exception {
final SchemaContext schemaContext = getNotificationSchemaContext(getClass());
- messageTransformer = new NetconfMessageTransformer(schemaContext);
+ messageTransformer = new NetconfMessageTransformer(schemaContext, true);
final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setNamespaceAware(true);
cfgCtx = parser.resolveSchemaContext(Sets.union(configModules, notifModules));
assertNotNull(cfgCtx);
- messageTransformer = new NetconfMessageTransformer(cfgCtx);
+ messageTransformer = new NetconfMessageTransformer(cfgCtx, true);
}
private LeafNode<Object> buildLeaf(final QName running, final Object value) {
}
private NetconfMessageTransformer getTransformer(final SchemaContext schema) {
- return new NetconfMessageTransformer(schema);
+ return new NetconfMessageTransformer(schema, true);
}
@Test
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html.
- */
-package org.opendaylight.controller.sal.connect.netconf.util;
-
-import static org.junit.Assert.assertEquals;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Collections2;
-import com.google.common.io.ByteSource;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collections;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.parser.api.YangSyntaxErrorException;
-import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-
-public class InstanceIdToNodesTest {
-
- private static final String NS = "urn:opendaylight:params:xml:ns:yang:controller:md:sal:normalization:test";
- private static final String REVISION = "2014-03-13";
- private static final QName ID = QName.create(NS, REVISION, "id");
- private SchemaContext ctx;
-
- private final YangInstanceIdentifier.NodeIdentifier rootContainer = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "test"));
- private final YangInstanceIdentifier.NodeIdentifier outerContainer = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "outer-container"));
- private final YangInstanceIdentifier.NodeIdentifier augmentedLeaf = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "augmented-leaf"));
- private final YangInstanceIdentifier.AugmentationIdentifier augmentation = new YangInstanceIdentifier.AugmentationIdentifier(Collections.singleton(augmentedLeaf.getNodeType()));
-
- private final YangInstanceIdentifier.NodeIdentifier outerList = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "outer-list"));
- private final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListWithKey = new YangInstanceIdentifier.NodeIdentifierWithPredicates(QName.create(NS, REVISION, "outer-list"), ID, 1);
- private final YangInstanceIdentifier.NodeIdentifier choice = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "outer-choice"));
- private final YangInstanceIdentifier.NodeIdentifier leafFromCase = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "one"));
-
- private final YangInstanceIdentifier.NodeIdentifier leafList = new YangInstanceIdentifier.NodeIdentifier(QName.create(NS, REVISION, "ordered-leaf-list"));
- private final YangInstanceIdentifier.NodeWithValue leafListWithValue = new YangInstanceIdentifier.NodeWithValue(leafList.getNodeType(), "abcd");
-
- static SchemaContext createTestContext() throws IOException, YangSyntaxErrorException {
- final YangParserImpl parser = new YangParserImpl();
- return parser.parseSources(Collections2.transform(Collections.singletonList("/schemas/filter-test.yang"), new Function<String, ByteSource>() {
- @Override
- public ByteSource apply(final String input) {
- return new ByteSource() {
- @Override
- public InputStream openStream() throws IOException {
- return InstanceIdToNodesTest.class.getResourceAsStream(input);
- }
- };
- }
- }));
- }
-
- @Before
- public void setUp() throws Exception {
- ctx = createTestContext();
-
- }
-
- @Test
- public void testInAugment() throws Exception {
- final ContainerNode expectedFilter = Builders.containerBuilder().withNodeIdentifier(rootContainer).withChild(
- Builders.containerBuilder().withNodeIdentifier(outerContainer).withChild(
- Builders.augmentationBuilder().withNodeIdentifier(augmentation).withChild(
- Builders.leafBuilder().withNodeIdentifier(augmentedLeaf).build()
- ).build()
- ).build()
- ).build();
-
- final NormalizedNode<?, ?> filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer, outerContainer, augmentation, augmentedLeaf));
- assertEquals(expectedFilter, filter);
- }
-
- @Test
- public void testInAugmentLeafOverride() throws Exception {
- final LeafNode<Object> lastLeaf = Builders.leafBuilder().withNodeIdentifier(augmentedLeaf).withValue("randomValue").build();
-
- final ContainerNode expectedFilter = Builders.containerBuilder().withNodeIdentifier(rootContainer).withChild(
- Builders.containerBuilder().withNodeIdentifier(outerContainer).withChild(
- Builders.augmentationBuilder().withNodeIdentifier(augmentation).withChild(
- lastLeaf
- ).build()
- ).build()
- ).build();
-
- final NormalizedNode<?, ?> filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer, outerContainer, augmentation, augmentedLeaf), lastLeaf);
- assertEquals(expectedFilter, filter);
- }
-
- @Test
- public void testListChoice() throws Exception {
- final ContainerNode expectedFilter = Builders.containerBuilder().withNodeIdentifier(rootContainer).withChild(
- Builders.mapBuilder().withNodeIdentifier(outerList).withChild(
- Builders.mapEntryBuilder().withNodeIdentifier(outerListWithKey).withChild(
- Builders.leafBuilder().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(ID)).withValue(1).build()
- ).withChild(
- Builders.choiceBuilder().withNodeIdentifier(choice).withChild(
- Builders.leafBuilder().withNodeIdentifier(leafFromCase).build()
- ).build()
- ).build()
- ).build()
- ).build();
-
- final NormalizedNode<?, ?> filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer, outerList, outerListWithKey, choice, leafFromCase));
- assertEquals(expectedFilter, filter);
- }
-
- @Test
- public void testTopContainerLastChildOverride() throws Exception {
- final ContainerNode expectedStructure = Builders.containerBuilder().withNodeIdentifier(rootContainer).withChild(
- Builders.mapBuilder().withNodeIdentifier(outerList).withChild(
- Builders.mapEntryBuilder().withNodeIdentifier(outerListWithKey).withChild(
- Builders.leafBuilder().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(ID)).withValue(1).build()
- ).withChild(
- Builders.choiceBuilder().withNodeIdentifier(choice).withChild(
- Builders.leafBuilder().withNodeIdentifier(leafFromCase).build()
- ).build()
- ).build()
- ).build()
- ).build();
-
- final NormalizedNode<?, ?> filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer), expectedStructure);
- assertEquals(expectedStructure, filter);
- }
-
- @Test
- public void testListLastChildOverride() throws Exception {
- final MapEntryNode outerListEntry = Builders.mapEntryBuilder().withNodeIdentifier(outerListWithKey).withChild(
- Builders.leafBuilder().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(ID)).withValue(1).build()
- ).build();
- final MapNode lastChild = Builders.mapBuilder().withNodeIdentifier(this.outerList).withChild(
- outerListEntry
- ).build();
- final ContainerNode expectedStructure = Builders.containerBuilder().withNodeIdentifier(rootContainer).withChild(
- lastChild
- ).build();
-
- NormalizedNode<?, ?> filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer, outerList, outerListWithKey), outerListEntry);
- assertEquals(expectedStructure, filter);
- filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer, outerList, outerListWithKey));
- assertEquals(expectedStructure, filter);
- }
-
- @Test
- public void testLeafList() throws Exception {
- final ContainerNode expectedFilter = Builders.containerBuilder().withNodeIdentifier(rootContainer).withChild(
- Builders.orderedLeafSetBuilder().withNodeIdentifier(leafList).withChild(
- Builders.leafSetEntryBuilder().withNodeIdentifier(leafListWithValue).withValue(leafListWithValue.getValue()).build()
- ).build()
- ).build();
-
- final NormalizedNode<?, ?> filter = InstanceIdToNodes.serialize(ctx, YangInstanceIdentifier.create(rootContainer, leafList, leafListWithValue));
- assertEquals(expectedFilter, filter);
- }
-}
\ No newline at end of file
+++ /dev/null
-module normalization-test {
- yang-version 1;
- namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:normalization:test";
- prefix "norm-test";
-
- revision "2014-03-13" {
- description "Initial revision.";
- }
-
- grouping outer-grouping {
- }
-
- container test {
- list outer-list {
- key id;
- leaf id {
- type uint16;
- }
- choice outer-choice {
- case one {
- leaf one {
- type string;
- }
- }
- case two-three {
- leaf two {
- type string;
- }
- leaf three {
- type string;
- }
- }
- }
- list inner-list {
- key name;
- ordered-by user;
-
- leaf name {
- type string;
- }
- leaf value {
- type string;
- }
- }
- }
-
- list unkeyed-list {
- leaf name {
- type string;
- }
- }
-
- leaf-list unordered-leaf-list {
- type string;
- }
-
- leaf-list ordered-leaf-list {
- ordered-by user;
- type string;
- }
-
- container outer-container {
- }
-
- anyxml any-xml-data;
- }
-
- augment /norm-test:test/norm-test:outer-container {
-
- leaf augmented-leaf {
- type string;
- }
- }
-}
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-remote</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-remoterpc-connector</artifactId>
<packaging>bundle</packaging>
@Override
public void onComplete(final Throwable failure, final Object reply) throws Throwable {
if(failure != null) {
- LOG.error("InvokeRpc failed", failure);
+
+ // When we return a failure to the caller they can choose to log it if they like
+ // so here we just do basic warn logging by default and log the stack trace only when debug
+ // is enabled
+
+ LOG.warn("InvokeRpc failed rpc = {}, identifier = {}", rpcMsg.getRpc(), rpcMsg.getIdentifier());
+
+ if(LOG.isDebugEnabled()){
+ LOG.debug("Detailed Error", failure);
+ }
final String message = String.format("Execution of RPC %s failed", rpcMsg.getRpc());
Collection<RpcError> errors = ((RpcErrorsException)failure).getRpcErrors();
LOG.debug("Create rpc registry and broker actors");
rpcRegistry =
- getContext().actorOf(Props.create(RpcRegistry.class).
+ getContext().actorOf(RpcRegistry.props().
withMailbox(config.getMailBoxName()), config.getRpcRegistryName());
rpcBroker =
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
+import java.util.Set;
+
import org.opendaylight.controller.remote.rpc.registry.gossip.Copier;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
}
}
+ public Set<RpcRouter.RouteIdentifier<?, ?, ?>> getRoutes() {
+ return table.keySet();
+ }
+
public void addRoute(RpcRouter.RouteIdentifier<?,?,?> routeId){
table.put(routeId, System.currentTimeMillis());
}
package org.opendaylight.controller.remote.rpc.registry;
import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.japi.Creator;
import akka.japi.Option;
import akka.japi.Pair;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore;
+import org.opendaylight.controller.remote.rpc.registry.mbeans.RemoteRpcRegistryMXBean;
+import org.opendaylight.controller.remote.rpc.registry.mbeans.RemoteRpcRegistryMXBeanImpl;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
import org.opendaylight.controller.sal.connector.api.RpcRouter.RouteIdentifier;
getLocalBucket().setData(new RoutingTable());
}
+ public static Props props() {
+ return Props.create(new RpcRegistryCreator());
+ }
+
@Override
protected void handleReceive(Object message) throws Exception {
//TODO: if sender is remote, reject message
}
}
}
+
+ private static class RpcRegistryCreator implements Creator<RpcRegistry> {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public RpcRegistry create() throws Exception {
+ RpcRegistry registry = new RpcRegistry();
+ RemoteRpcRegistryMXBean mxBean = new RemoteRpcRegistryMXBeanImpl(registry);
+ return registry;
+ }
+ }
}
import akka.actor.Address;
import akka.actor.Props;
import akka.cluster.ClusterActorRefProvider;
-import com.google.common.annotations.VisibleForTesting;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
}
}
- protected BucketImpl<T> getLocalBucket() {
+ public BucketImpl<T> getLocalBucket() {
return localBucket;
}
versions.put(selfAddress, localBucket.getVersion());
}
- protected Map<Address, Bucket<T>> getRemoteBuckets() {
+ public Map<Address, Bucket<T>> getRemoteBuckets() {
return remoteBuckets;
}
- @VisibleForTesting
- Map<Address, Long> getVersions() {
+ public Map<Address, Long> getVersions() {
return versions;
}
}
--- /dev/null
+package org.opendaylight.controller.remote.rpc.registry.mbeans;
+
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * JMX bean to check remote rpc registry
+ */
+
+public interface RemoteRpcRegistryMXBean {
+
+ Set<String> getGlobalRpc();
+
+ String getBucketVersions();
+
+ Set<String> getLocalRegisteredRoutedRpc();
+
+ Map<String,String> findRpcByName(String name);
+
+ Map<String,String> findRpcByRoute(String route);
+}
--- /dev/null
+package org.opendaylight.controller.remote.rpc.registry.mbeans;
+
+import akka.actor.Address;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+import org.opendaylight.controller.remote.rpc.registry.RoutingTable;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+
+public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements RemoteRpcRegistryMXBean {
+
+ protected final Logger log = LoggerFactory.getLogger(getClass());
+
+ private final String NULL_CONSTANT = "null";
+
+ private final String LOCAL_CONSTANT = "local";
+
+ private final String ROUTE_CONSTANT = "route:";
+
+ private final String NAME_CONSTANT = " | name:";
+
+ private final RpcRegistry rpcRegistry;
+
+ public RemoteRpcRegistryMXBeanImpl(final RpcRegistry rpcRegistry) {
+ super("RemoteRpcRegistry", "RemoteRpcBroker", null);
+ this.rpcRegistry = rpcRegistry;
+ registerMBean();
+ }
+
+ @Override
+ public Set<String> getGlobalRpc() {
+ RoutingTable table = rpcRegistry.getLocalBucket().getData();
+ Set<String> globalRpc = new HashSet<>(table.getRoutes().size());
+ for(RpcRouter.RouteIdentifier<?, ?, ?> route : table.getRoutes()){
+ if(route.getRoute() == null) {
+ globalRpc.add(route.getType() != null ? route.getType().toString() : NULL_CONSTANT);
+ }
+ }
+ if(log.isDebugEnabled()) {
+ log.debug("Locally registered global RPCs {}", globalRpc);
+ }
+ return globalRpc;
+ }
+
+ @Override
+ public Set<String> getLocalRegisteredRoutedRpc() {
+ RoutingTable table = rpcRegistry.getLocalBucket().getData();
+ Set<String> routedRpc = new HashSet<>(table.getRoutes().size());
+ for(RpcRouter.RouteIdentifier<?, ?, ?> route : table.getRoutes()){
+ if(route.getRoute() != null) {
+ StringBuilder builder = new StringBuilder(ROUTE_CONSTANT);
+ builder.append(route.getRoute().toString()).append(NAME_CONSTANT).append(route.getType() != null ?
+ route.getType().toString() : NULL_CONSTANT);
+ routedRpc.add(builder.toString());
+ }
+ }
+ if(log.isDebugEnabled()) {
+ log.debug("Locally registered routed RPCs {}", routedRpc);
+ }
+ return routedRpc;
+ }
+
+ @Override
+ public Map<String, String> findRpcByName(final String name) {
+ RoutingTable localTable = rpcRegistry.getLocalBucket().getData();
+ // Get all RPCs from local bucket
+ Map<String, String> rpcMap = new HashMap<>(getRpcMemberMapByName(localTable, name, LOCAL_CONSTANT));
+
+ // Get all RPCs from remote bucket
+ Map<Address, Bucket<RoutingTable>> buckets = rpcRegistry.getRemoteBuckets();
+ for(Address address : buckets.keySet()) {
+ RoutingTable table = buckets.get(address).getData();
+ rpcMap.putAll(getRpcMemberMapByName(table, name, address.toString()));
+ }
+ if(log.isDebugEnabled()) {
+ log.debug("list of RPCs {} searched by name {}", rpcMap, name);
+ }
+ return rpcMap;
+ }
+
+ @Override
+ public Map<String, String> findRpcByRoute(String routeId) {
+ RoutingTable localTable = rpcRegistry.getLocalBucket().getData();
+ Map<String, String> rpcMap = new HashMap<>(getRpcMemberMapByRoute(localTable, routeId, LOCAL_CONSTANT));
+
+ Map<Address, Bucket<RoutingTable>> buckets = rpcRegistry.getRemoteBuckets();
+ for(Address address : buckets.keySet()) {
+ RoutingTable table = buckets.get(address).getData();
+ rpcMap.putAll(getRpcMemberMapByRoute(table, routeId, address.toString()));
+
+ }
+ if(log.isDebugEnabled()) {
+ log.debug("list of RPCs {} searched by route {}", rpcMap, routeId);
+ }
+ return rpcMap;
+ }
+
+ /**
+ * Search if the routing table route String contains routeName
+ */
+
+ private Map<String,String> getRpcMemberMapByRoute(final RoutingTable table, final String routeName,
+ final String address) {
+ Set<RpcRouter.RouteIdentifier<?, ?, ?>> routes = table.getRoutes();
+ Map<String, String> rpcMap = new HashMap<>(routes.size());
+ for(RpcRouter.RouteIdentifier<?, ?, ?> route : table.getRoutes()){
+ if(route.getRoute() != null) {
+ String routeString = route.getRoute().toString();
+ if(routeString.contains(routeName)) {
+ StringBuilder builder = new StringBuilder(ROUTE_CONSTANT);
+ builder.append(routeString).append(NAME_CONSTANT).append(route.getType() != null ?
+ route.getType().toString() : NULL_CONSTANT);
+ rpcMap.put(builder.toString(), address);
+ }
+ }
+ }
+ return rpcMap;
+ }
+
+ /**
+ * Search if the routing table route type contains name
+ */
+ private Map<String, String> getRpcMemberMapByName(final RoutingTable table, final String name,
+ final String address) {
+ Set<RpcRouter.RouteIdentifier<?, ?, ?>> routes = table.getRoutes();
+ Map<String, String> rpcMap = new HashMap<>(routes.size());
+ for(RpcRouter.RouteIdentifier<?, ?, ?> route : routes){
+ if(route.getType() != null) {
+ String type = route.getType().toString();
+ if(type.contains(name)) {
+ StringBuilder builder = new StringBuilder(ROUTE_CONSTANT);
+ builder.append(route.getRoute() != null ? route.getRoute().toString(): NULL_CONSTANT)
+ .append(NAME_CONSTANT).append(type);
+ rpcMap.put(builder.toString(), address);
+ }
+ }
+ }
+ return rpcMap;
+ }
+
+
+
+ @Override
+ public String getBucketVersions() {
+ return rpcRegistry.getVersions().toString();
+ }
+
+}
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-connector-config</artifactId>
<description>Configuration files for sal-rest-connector</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-connector</artifactId>
<packaging>bundle</packaging>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-model-export</artifactId>
<!-- FIXME: remove explicit version, once model export package is part of yangtools-artefacts -->
- <version>0.7.0-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
</dependency>
<dependency>
private final ControllerContext salContext;
private static final Splitter SLASH_SPLITTER = Splitter.on("/");
- private static final Splitter AT_SPLITTER = Splitter.on("@");
private static final String MOUNT_ARG = ControllerContext.MOUNT;
public SchemaRetrievalServiceImpl(final ControllerContext controllerContext) {
}
private SchemaContext getMountSchemaContext(final String identifier) {
- final InstanceIdentifierContext mountContext = salContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext<?> mountContext = salContext.toMountPointIdentifier(identifier);
return mountContext.getSchemaContext();
}
package org.opendaylight.controller.sal.rest.impl;
-import com.google.common.base.Optional;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Request;
import javax.ws.rs.core.UriInfo;
return uriInfo.getPathParameters(false).getFirst(RestconfConstants.IDENTIFIER);
}
- protected final Optional<InstanceIdentifierContext> getIdentifierWithSchema() {
- return Optional.of(getInstanceIdentifierContext());
- }
-
- protected InstanceIdentifierContext getInstanceIdentifierContext() {
+ protected InstanceIdentifierContext<?> getInstanceIdentifierContext() {
return ControllerContext.getInstance().toInstanceIdentifier(getIdentifier());
}
import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
final MultivaluedMap<String, String> httpHeaders, final InputStream entityStream) throws IOException,
WebApplicationException {
try {
- final InstanceIdentifierContext<?> path = getIdentifierWithSchema().get();
+ final InstanceIdentifierContext<?> path = getInstanceIdentifierContext();
if (entityStream.available() < 1) {
return new NormalizedNodeContext(path, null);
}
final JsonReader reader = new JsonReader(new InputStreamReader(entityStream));
jsonParser.parse(reader);
- final NormalizedNode<?, ?> partialResult = resultHolder.getResult();
+ NormalizedNode<?, ?> partialResult = resultHolder.getResult();
final NormalizedNode<?, ?> result;
- if(partialResult instanceof MapNode) {
+
+ // FIXME: Also II should be updated unwrap result from augmentation and choice nodes on PUT
+ if (!isPost()) {
+ while (partialResult instanceof AugmentationNode || partialResult instanceof ChoiceNode) {
+ final Object childNode = ((DataContainerNode) partialResult).getValue().iterator().next();
+ partialResult = (NormalizedNode<?, ?>) childNode;
+ }
+ }
+
+ if (partialResult instanceof MapNode && !isPost()) {
result = Iterables.getOnlyElement(((MapNode) partialResult).getValue());
} else {
result = partialResult;
}
return new NormalizedNodeContext(path,result);
+ } catch (final RestconfDocumentedException e) {
+ throw e;
} catch (final Exception e) {
LOG.debug("Error parsing json input", e);
}
@Override
+ @Deprecated
public NormalizedNodeContext invokeRpc(final String identifier, final String noPayload, final UriInfo uriInfo) {
return restconf.invokeRpc(identifier, noPayload, uriInfo);
}
}
errContBuild.withChild(listErorsBuilder.build());
- final NormalizedNodeContext errContext = new NormalizedNodeContext(new InstanceIdentifierContext<DataSchemaNode>(null,
+ final NormalizedNodeContext errContext = new NormalizedNodeContext(new InstanceIdentifierContext<>(null,
(DataSchemaNode) errorsSchemaNode, null, context.getGlobalSchema()), errContBuild.build());
Object responseBody;
final ByteArrayOutputStream outStream = new ByteArrayOutputStream();
NormalizedNode<?, ?> data = errorsNode.getData();
- final InstanceIdentifierContext<DataSchemaNode> context = (InstanceIdentifierContext<DataSchemaNode>) errorsNode.getInstanceIdentifierContext();
- final DataSchemaNode schema = context.getSchemaNode();
+ final InstanceIdentifierContext<?> context = errorsNode.getInstanceIdentifierContext();
+ final DataSchemaNode schema = (DataSchemaNode) context.getSchemaNode();
SchemaPath path = context.getSchemaNode().getPath();
final OutputStreamWriter outputWriter = new OutputStreamWriter(outStream, Charsets.UTF_8);
private Object toXMLResponseBody(final NormalizedNodeContext errorsNode, final DataNodeContainer errorsSchemaNode) {
- final InstanceIdentifierContext<DataSchemaNode> pathContext = (InstanceIdentifierContext<DataSchemaNode>) errorsNode.getInstanceIdentifierContext();
+ final InstanceIdentifierContext<?> pathContext = errorsNode.getInstanceIdentifierContext();
final ByteArrayOutputStream outStream = new ByteArrayOutputStream();
XMLStreamWriter xmlWriter;
*/
package org.opendaylight.controller.sal.rest.impl;
-import com.google.common.base.Optional;
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
+import org.opendaylight.yangtools.yang.model.api.AugmentationTarget;
+import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
final MultivaluedMap<String, String> httpHeaders, final InputStream entityStream) throws IOException,
WebApplicationException {
try {
- final Optional<InstanceIdentifierContext> path = getIdentifierWithSchema();
+ final InstanceIdentifierContext<?> path = getInstanceIdentifierContext();
if (entityStream.available() < 1) {
// represent empty nopayload input
- return new NormalizedNodeContext(path.get(), null);
+ return new NormalizedNodeContext(path, null);
}
final DocumentBuilder dBuilder;
}
final Document doc = dBuilder.parse(entityStream);
- final NormalizedNode<?, ?> result = parse(path.get(),doc);
- return new NormalizedNodeContext(path.get(),result);
+ final NormalizedNode<?, ?> result = parse(path,doc);
+ return new NormalizedNodeContext(path,result);
+ } catch (final RestconfDocumentedException e){
+ throw e;
} catch (final Exception e) {
LOG.debug("Error parsing xml input", e);
final List<Element> elements = Collections.singletonList(doc.getDocumentElement());
final SchemaNode schemaNodeContext = pathContext.getSchemaNode();
- DataSchemaNode schemaNode = null;
+ DataSchemaNode schemaNode;
if (schemaNodeContext instanceof RpcDefinition) {
schemaNode = ((RpcDefinition) schemaNodeContext).getInput();
} else if (schemaNodeContext instanceof DataSchemaNode) {
final String docRootElm = doc.getDocumentElement().getLocalName();
final String schemaNodeName = pathContext.getSchemaNode().getQName().getLocalName();
+ // FIXME the factory instance should be cached if the schema context is the same
+ final DomToNormalizedNodeParserFactory parserFactory =
+ DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, pathContext.getSchemaContext());
+
if (!schemaNodeName.equalsIgnoreCase(docRootElm)) {
- final Collection<DataSchemaNode> children = ((DataNodeContainer) schemaNode).getChildNodes();
- for (final DataSchemaNode child : children) {
- if (child.getQName().getLocalName().equalsIgnoreCase(docRootElm)) {
- schemaNode = child;
- break;
+ final DataSchemaNode foundSchemaNode = findSchemaNodeOrParentChoiceByName(schemaNode, docRootElm);
+ if (foundSchemaNode != null) {
+ if (schemaNode instanceof AugmentationTarget) {
+ final AugmentationSchema augmentSchemaNode = findCorrespondingAugment(schemaNode, foundSchemaNode);
+ if (augmentSchemaNode != null) {
+ return parserFactory.getAugmentationNodeParser().parse(elements, augmentSchemaNode);
+ }
}
+ schemaNode = foundSchemaNode;
}
}
- // FIXME the factory instance should be cached if the schema context is the same
- final DomToNormalizedNodeParserFactory parserFactory =
- DomToNormalizedNodeParserFactory.getInstance(XmlUtils.DEFAULT_XML_CODEC_PROVIDER, pathContext.getSchemaContext());
+ NormalizedNode<?, ?> parsed = null;
if(schemaNode instanceof ContainerSchemaNode) {
return parserFactory.getContainerNodeParser().parse(Collections.singletonList(doc.getDocumentElement()), (ContainerSchemaNode) schemaNode);
} else if(schemaNode instanceof ListSchemaNode) {
final ListSchemaNode casted = (ListSchemaNode) schemaNode;
return parserFactory.getMapEntryNodeParser().parse(elements, casted);
- } // FIXME : add another DataSchemaNode extensions e.g. LeafSchemaNode
+ } else if (schemaNode instanceof ChoiceSchemaNode) {
+ final ChoiceSchemaNode casted = (ChoiceSchemaNode) schemaNode;
+ return parserFactory.getChoiceNodeParser().parse(elements, casted);
+ }
+ // FIXME : add another DataSchemaNode extensions e.g. LeafSchemaNode
+
+ return parsed;
+ }
+
+ private static DataSchemaNode findSchemaNodeOrParentChoiceByName(DataSchemaNode schemaNode, String elementName) {
+ final ArrayList<ChoiceSchemaNode> choiceSchemaNodes = new ArrayList<>();
+ final Collection<DataSchemaNode> children = ((DataNodeContainer) schemaNode).getChildNodes();
+ for (final DataSchemaNode child : children) {
+ if (child instanceof ChoiceSchemaNode) {
+ choiceSchemaNodes.add((ChoiceSchemaNode) child);
+ } else if (child.getQName().getLocalName().equalsIgnoreCase(elementName)) {
+ return child;
+ }
+ }
+
+ for (final ChoiceSchemaNode choiceNode : choiceSchemaNodes) {
+ for (final ChoiceCaseNode caseNode : choiceNode.getCases()) {
+ final DataSchemaNode resultFromRecursion = findSchemaNodeOrParentChoiceByName(caseNode, elementName);
+ if (resultFromRecursion != null) {
+ // this returns top choice node in which child element is found
+ return choiceNode;
+ }
+ }
+ }
+ return null;
+ }
+
+ private static AugmentationSchema findCorrespondingAugment(final DataSchemaNode parent, final DataSchemaNode child) {
+ if (parent instanceof AugmentationTarget && !((parent instanceof ChoiceCaseNode) || (parent instanceof ChoiceSchemaNode))) {
+ for (AugmentationSchema augmentation : ((AugmentationTarget) parent).getAvailableAugmentations()) {
+ DataSchemaNode childInAugmentation = augmentation.getDataChildByName(child.getQName());
+ if (childInAugmentation != null) {
+ return augmentation;
+ }
+ }
+ }
return null;
}
}
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.ArrayList;
import javax.ws.rs.core.Response.Status;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// PUT configuration
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPut(
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
+ final SchemaContext globalSchema, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
checkPreconditions();
- final DataNormalizationOperation<?> rootOp = ControllerContext.getInstance().getRootOperation();
- return putDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, rootOp);
+ return putDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, globalSchema);
}
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPut(
final DOMMountPoint mountPoint, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
final Optional<DOMDataBroker> domDataBrokerService = mountPoint.getService(DOMDataBroker.class);
if (domDataBrokerService.isPresent()) {
- final DataNormalizationOperation<?> rootOp = new DataNormalizer(mountPoint.getSchemaContext()).getRootOperation();
return putDataViaTransaction(domDataBrokerService.get().newReadWriteTransaction(), CONFIGURATION, path,
- payload, rootOp);
+ payload, mountPoint.getSchemaContext());
}
throw new RestconfDocumentedException("DOM data broker service isn't available for mount point.");
}
// POST configuration
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPost(
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
+ final SchemaContext globalSchema, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
checkPreconditions();
- final DataNormalizationOperation<?> rootOp = ControllerContext.getInstance().getRootOperation();
- return postDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, rootOp);
+ return postDataViaTransaction(domDataBroker.newReadWriteTransaction(), CONFIGURATION, path, payload, globalSchema);
}
public CheckedFuture<Void, TransactionCommitFailedException> commitConfigurationDataPost(
final DOMMountPoint mountPoint, final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload) {
final Optional<DOMDataBroker> domDataBrokerService = mountPoint.getService(DOMDataBroker.class);
if (domDataBrokerService.isPresent()) {
- final DataNormalizationOperation<?> rootOp = new DataNormalizer(mountPoint.getSchemaContext()).getRootOperation();
return postDataViaTransaction(domDataBrokerService.get().newReadWriteTransaction(), CONFIGURATION, path,
- payload, rootOp);
+ payload, mountPoint.getSchemaContext());
}
throw new RestconfDocumentedException("DOM data broker service isn't available for mount point.");
}
private CheckedFuture<Void, TransactionCommitFailedException> postDataViaTransaction(
final DOMDataReadWriteTransaction rWTransaction, final LogicalDatastoreType datastore,
- final YangInstanceIdentifier parentPath, final NormalizedNode<?, ?> payload, final DataNormalizationOperation<?> root) {
+ final YangInstanceIdentifier parentPath, final NormalizedNode<?, ?> payload, final SchemaContext schemaContext) {
// FIXME: This is doing correct post for container and list children
// not sure if this will work for choice case
- final YangInstanceIdentifier path;
- if(payload instanceof MapEntryNode) {
- path = parentPath.node(payload.getNodeType()).node(payload.getIdentifier());
+ if(payload instanceof MapNode) {
+ final YangInstanceIdentifier mapPath = parentPath.node(payload.getIdentifier());
+ final NormalizedNode<?, ?> emptySubtree = ImmutableNodes.fromInstanceId(schemaContext, mapPath);
+ rWTransaction.merge(datastore, YangInstanceIdentifier.create(emptySubtree.getIdentifier()), emptySubtree);
+ ensureParentsByMerge(datastore, mapPath, rWTransaction, schemaContext);
+ for(final MapEntryNode child : ((MapNode) payload).getValue()) {
+ final YangInstanceIdentifier childPath = mapPath.node(child.getIdentifier());
+ checkItemDoesNotExists(rWTransaction, datastore, childPath);
+ rWTransaction.put(datastore, childPath, child);
+ }
} else {
- path = parentPath.node(payload.getIdentifier());
+ final YangInstanceIdentifier path;
+ if(payload instanceof MapEntryNode) {
+ path = parentPath.node(payload.getNodeType()).node(payload.getIdentifier());
+ } else {
+ path = parentPath.node(payload.getIdentifier());
+ }
+ checkItemDoesNotExists(rWTransaction,datastore, path);
+ ensureParentsByMerge(datastore, path, rWTransaction, schemaContext);
+ rWTransaction.put(datastore, path, payload);
}
+ return rWTransaction.submit();
+ }
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> futureDatastoreData = rWTransaction.read(datastore, path);
+ private void checkItemDoesNotExists(final DOMDataReadWriteTransaction rWTransaction,final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ final ListenableFuture<Boolean> futureDatastoreData = rWTransaction.exists(store, path);
try {
- final Optional<NormalizedNode<?, ?>> optionalDatastoreData = futureDatastoreData.get();
- if (optionalDatastoreData.isPresent() && payload.equals(optionalDatastoreData.get())) {
+ if (futureDatastoreData.get()) {
final String errMsg = "Post Configuration via Restconf was not executed because data already exists";
- LOG.trace(errMsg + ":{}", path);
+ LOG.debug(errMsg + ":{}", path);
rWTransaction.cancel();
throw new RestconfDocumentedException("Data already exists for path: " + path, ErrorType.PROTOCOL,
ErrorTag.DATA_EXISTS);
LOG.trace("It wasn't possible to get data loaded from datastore at path " + path);
}
- ensureParentsByMerge(datastore, path, rWTransaction, root);
- rWTransaction.merge(datastore, path, payload);
- LOG.trace("Post " + datastore.name() + " via Restconf: {}", path);
- return rWTransaction.submit();
}
private CheckedFuture<Void, TransactionCommitFailedException> putDataViaTransaction(
final DOMDataReadWriteTransaction writeTransaction, final LogicalDatastoreType datastore,
- final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload, final DataNormalizationOperation<?> root) {
+ final YangInstanceIdentifier path, final NormalizedNode<?, ?> payload, final SchemaContext schemaContext) {
LOG.trace("Put " + datastore.name() + " via Restconf: {}", path);
- ensureParentsByMerge(datastore, path, writeTransaction, root);
+ ensureParentsByMerge(datastore, path, writeTransaction, schemaContext);
writeTransaction.put(datastore, path, payload);
return writeTransaction.submit();
}
this.domDataBroker = domDataBroker;
}
- private final void ensureParentsByMerge(final LogicalDatastoreType store,
- final YangInstanceIdentifier normalizedPath, final DOMDataReadWriteTransaction rwTx,
- final DataNormalizationOperation<?> root) {
- final List<PathArgument> currentArguments = new ArrayList<>();
- final Iterator<PathArgument> iterator = normalizedPath.getPathArguments().iterator();
- DataNormalizationOperation<?> currentOp = root;
- while (iterator.hasNext()) {
- final PathArgument currentArg = iterator.next();
- try {
- currentOp = currentOp.getChild(currentArg);
- } catch (final DataNormalizationException e) {
- rwTx.cancel();
- throw new IllegalArgumentException(
- String.format("Invalid child encountered in path %s", normalizedPath), e);
- }
- currentArguments.add(currentArg);
- final YangInstanceIdentifier currentPath = YangInstanceIdentifier.create(currentArguments);
-
- final Boolean exists;
+ private void ensureParentsByMerge(final LogicalDatastoreType store,
+ final YangInstanceIdentifier normalizedPath, final DOMDataReadWriteTransaction rwTx, final SchemaContext schemaContext) {
+ final List<PathArgument> normalizedPathWithoutChildArgs = new ArrayList<>();
+ YangInstanceIdentifier rootNormalizedPath = null;
- try {
+ final Iterator<PathArgument> it = normalizedPath.getPathArguments().iterator();
- final CheckedFuture<Boolean, ReadFailedException> future = rwTx.exists(store, currentPath);
- exists = future.checkedGet();
- } catch (final ReadFailedException e) {
- LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
- rwTx.cancel();
- throw new IllegalStateException("Failed to read pre-existing data", e);
+ while(it.hasNext()) {
+ final PathArgument pathArgument = it.next();
+ if(rootNormalizedPath == null) {
+ rootNormalizedPath = YangInstanceIdentifier.create(pathArgument);
}
- if (!exists && iterator.hasNext()) {
- rwTx.merge(store, currentPath, currentOp.createDefault(currentArg));
+ // Skip last element, its not a parent
+ if(it.hasNext()) {
+ normalizedPathWithoutChildArgs.add(pathArgument);
}
}
+
+ // No parent structure involved, no need to ensure parents
+ if(normalizedPathWithoutChildArgs.isEmpty()) {
+ return;
+ }
+
+ Preconditions.checkArgument(rootNormalizedPath != null, "Empty path received");
+
+ final NormalizedNode<?, ?> parentStructure =
+ ImmutableNodes.fromInstanceId(schemaContext, YangInstanceIdentifier.create(normalizedPathWithoutChildArgs));
+ rwTx.merge(store, rootNormalizedPath, parentStructure);
}
}
onGlobalContextUpdated(schemas);
}
- public InstanceIdentifierContext toInstanceIdentifier(final String restconfInstance) {
+ public InstanceIdentifierContext<?> toInstanceIdentifier(final String restconfInstance) {
return toIdentifier(restconfInstance, false);
}
return globalSchema;
}
- public InstanceIdentifierContext toMountPointIdentifier(final String restconfInstance) {
+ public InstanceIdentifierContext<?> toMountPointIdentifier(final String restconfInstance) {
return toIdentifier(restconfInstance, true);
}
- private InstanceIdentifierContext toIdentifier(final String restconfInstance, final boolean toMountPointIdentifier) {
+ private InstanceIdentifierContext<?> toIdentifier(final String restconfInstance, final boolean toMountPointIdentifier) {
checkPreconditions();
if(restconfInstance == null) {
final InstanceIdentifierBuilder builder = YangInstanceIdentifier.builder();
final Module latestModule = globalSchema.findModuleByName(startModule, null);
- final InstanceIdentifierContext iiWithSchemaNode = collectPathArguments(builder, pathArgs, latestModule, null,
+
+ if (latestModule == null) {
+ throw new RestconfDocumentedException("The module named '" + startModule + "' does not exist.", ErrorType.PROTOCOL, ErrorTag.UNKNOWN_ELEMENT);
+ }
+
+ final InstanceIdentifierContext<?> iiWithSchemaNode = collectPathArguments(builder, pathArgs, latestModule, null,
toMountPointIdentifier);
if (iiWithSchemaNode == null) {
return object == null ? "" : URLEncoder.encode(codec.serialize(object).toString(), ControllerContext.URI_ENCODING_CHAR_SET);
}
- private InstanceIdentifierContext collectPathArguments(final InstanceIdentifierBuilder builder,
+ private InstanceIdentifierContext<?> collectPathArguments(final InstanceIdentifierBuilder builder,
final List<String> strings, final DataNodeContainer parentNode, final DOMMountPoint mountPoint,
final boolean returnJustMountPoint) {
Preconditions.<List<String>> checkNotNull(strings);
if (returnJustMountPoint || strings.size() == 1) {
final YangInstanceIdentifier instance = YangInstanceIdentifier.builder().toInstance();
- return new InstanceIdentifierContext(instance, mountPointSchema, mount,mountPointSchema);
+ return new InstanceIdentifierContext<>(instance, mountPointSchema, mount,mountPointSchema);
}
final String moduleNameBehindMountPoint = toModuleName(strings.get(1));
return createContext(builder.build(), targetNode, mountPoint,mountPoint != null ? mountPoint.getSchemaContext() : globalSchema);
}
- private InstanceIdentifierContext createContext(final YangInstanceIdentifier instance, final DataSchemaNode dataSchemaNode,
+ private InstanceIdentifierContext<?> createContext(final YangInstanceIdentifier instance, final DataSchemaNode dataSchemaNode,
final DOMMountPoint mountPoint, final SchemaContext schemaContext) {
final YangInstanceIdentifier instanceIdentifier = new DataNormalizer(schemaContext).toNormalized(instance);
- return new InstanceIdentifierContext(instanceIdentifier, dataSchemaNode, mountPoint,schemaContext);
+ return new InstanceIdentifierContext<>(instanceIdentifier, dataSchemaNode, mountPoint,schemaContext);
}
public static DataSchemaNode findInstanceDataChildByNameAndNamespace(final DataNodeContainer container, final String name,
import java.net.URISyntaxException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.util.ExtendedType;
import org.opendaylight.yangtools.yang.model.util.SchemaContextUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
}
- private static class TypeDef {
- public final TypeDefinition<? extends Object> typedef;
- public final QName qName;
-
- TypeDef(final TypeDefinition<? extends Object> typedef, final QName qName) {
- this.typedef = typedef;
- this.qName = qName;
- }
- }
-
- private final static RestconfImpl INSTANCE = new RestconfImpl();
+ private static final RestconfImpl INSTANCE = new RestconfImpl();
private static final int NOTIFICATION_PORT = 8181;
private static final int CHAR_NOT_FOUND = -1;
- private final static String MOUNT_POINT_MODULE_NAME = "ietf-netconf";
+ private static final String MOUNT_POINT_MODULE_NAME = "ietf-netconf";
- private final static SimpleDateFormat REVISION_FORMAT = new SimpleDateFormat("yyyy-MM-dd");
+ private static final SimpleDateFormat REVISION_FORMAT = new SimpleDateFormat("yyyy-MM-dd");
- private final static String SAL_REMOTE_NAMESPACE = "urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote";
+ private static final String SAL_REMOTE_NAMESPACE = "urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote";
- private final static String SAL_REMOTE_RPC_SUBSRCIBE = "create-data-change-event-subscription";
+ private static final String SAL_REMOTE_RPC_SUBSRCIBE = "create-data-change-event-subscription";
private BrokerFacade broker;
Builders.containerBuilder((ContainerSchemaNode) modulesSchemaNode);
moduleContainerBuilder.withChild(allModuleMap);
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, modulesSchemaNode,
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, modulesSchemaNode,
null, schemaContext), moduleContainerBuilder.build());
}
throw new RestconfDocumentedException(errMsg, ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
}
- final InstanceIdentifierContext mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext<?> mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
final DOMMountPoint mountPoint = mountPointIdentifier.getMountPoint();
final Set<Module> modules = controllerContext.getAllModules(mountPoint);
- final SchemaContext schemaContext = mountPoint.getSchemaContext();
final MapNode mountPointModulesMap = makeModuleMapNode(modules);
final Module restconfModule = getRestconfModule();
Builders.containerBuilder((ContainerSchemaNode) modulesSchemaNode);
moduleContainerBuilder.withChild(mountPointModulesMap);
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, modulesSchemaNode,
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, modulesSchemaNode,
mountPoint, controllerContext.getGlobalSchema()), moduleContainerBuilder.build());
}
DOMMountPoint mountPoint = null;
final SchemaContext schemaContext;
if (identifier.contains(ControllerContext.MOUNT)) {
- final InstanceIdentifierContext mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext<?> mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
mountPoint = mountPointIdentifier.getMountPoint();
module = controllerContext.findModuleByNameAndRevision(mountPoint, moduleNameAndRevision);
schemaContext = mountPoint.getSchemaContext();
restconfModule, Draft02.RestConfModule.MODULE_LIST_SCHEMA_NODE);
Preconditions.checkState(moduleSchemaNode instanceof ListSchemaNode);
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, moduleSchemaNode, mountPoint,
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, moduleSchemaNode, mountPoint,
schemaContext), moduleMap);
}
streamsContainerBuilder.withChild(listStreamsBuilder.build());
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, streamsContainerSchemaNode, null,
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, streamsContainerSchemaNode, null,
schemaContext), streamsContainerBuilder.build());
}
Set<Module> modules = null;
DOMMountPoint mountPoint = null;
if (identifier.contains(ControllerContext.MOUNT)) {
- final InstanceIdentifierContext mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext<?> mountPointIdentifier = controllerContext.toMountPointIdentifier(identifier);
mountPoint = mountPointIdentifier.getMountPoint();
modules = controllerContext.getAllModules(mountPoint);
return retValue;
}
throw new RestconfDocumentedException("RpcError message", null, retValue.getErrors());
- }
- catch (final InterruptedException e) {
+ } catch (final InterruptedException e) {
throw new RestconfDocumentedException(
"The operation was interrupted while executing and did not complete.", ErrorType.RPC,
ErrorTag.PARTIAL_OPERATION);
- }
- catch (final ExecutionException e) {
+ } catch (final ExecutionException e) {
Throwable cause = e.getCause();
- if (cause instanceof CancellationException) {
- throw new RestconfDocumentedException("The operation was cancelled while executing.", ErrorType.RPC,
- ErrorTag.PARTIAL_OPERATION);
- } else if (cause != null) {
+ if (cause != null) {
while (cause.getCause() != null) {
cause = cause.getCause();
}
throw new RestconfDocumentedException("The operation encountered an unexpected error while executing.",
e);
}
+ } catch (final CancellationException e) {
+ throw new RestconfDocumentedException("The operation was cancelled while executing.", ErrorType.RPC,
+ ErrorTag.PARTIAL_OPERATION);
}
}
final SchemaContext schemaContext;
if (identifier.contains(ControllerContext.MOUNT)) {
// mounted RPC call - look up mount instance.
- final InstanceIdentifierContext mountPointId = controllerContext.toMountPointIdentifier(identifier);
+ final InstanceIdentifierContext<?> mountPointId = controllerContext.toMountPointIdentifier(identifier);
mountPoint = mountPointId.getMountPoint();
schemaContext = mountPoint.getSchemaContext();
final int startOfRemoteRpcName = identifier.lastIndexOf(ControllerContext.MOUNT)
resultNodeSchema = rpcDataSchemaNode.getDataChildByName(result.getResult().getNodeType());
}
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, resultNodeSchema, mountPoint,
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, resultNodeSchema, mountPoint,
schemaContext), resultData);
}
@Override
public NormalizedNodeContext readConfigurationData(final String identifier, final UriInfo uriInfo) {
- final InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier(identifier);
+ final InstanceIdentifierContext<?> iiWithData = controllerContext.toInstanceIdentifier(identifier);
final DOMMountPoint mountPoint = iiWithData.getMountPoint();
NormalizedNode<?, ?> data = null;
final YangInstanceIdentifier normalizedII = iiWithData.getInstanceIdentifier();
return new NormalizedNodeContext(iiWithData, data);
}
+ // FIXME: Move this to proper place
+ @SuppressWarnings("unused")
private Integer parseDepthParameter(final UriInfo info) {
final String param = info.getQueryParameters(false).getFirst(UriParameters.DEPTH.toString());
if (Strings.isNullOrEmpty(param) || "unbounded".equals(param)) {
@Override
public NormalizedNodeContext readOperationalData(final String identifier, final UriInfo info) {
- final InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier(identifier);
+ final InstanceIdentifierContext<?> iiWithData = controllerContext.toInstanceIdentifier(identifier);
final DOMMountPoint mountPoint = iiWithData.getMountPoint();
NormalizedNode<?, ?> data = null;
final YangInstanceIdentifier normalizedII = iiWithData.getInstanceIdentifier();
return new NormalizedNodeContext(iiWithData, data);
}
- private boolean parsePrettyPrintParameter(final UriInfo info) {
- final String param = info.getQueryParameters(false).getFirst(UriParameters.PRETTY_PRINT.toString());
- return Boolean.parseBoolean(param);
- }
-
@Override
public Response updateConfigurationData(final String identifier, final NormalizedNodeContext payload) {
Preconditions.checkNotNull(identifier);
- final InstanceIdentifierContext<DataSchemaNode> iiWithData =
- (InstanceIdentifierContext<DataSchemaNode>) payload.getInstanceIdentifierContext();
+ final InstanceIdentifierContext<?> iiWithData = payload.getInstanceIdentifierContext();
validateInput(iiWithData.getSchemaNode(), payload);
validateTopLevelNodeName(payload, iiWithData.getInstanceIdentifier());
if (mountPoint != null) {
broker.commitConfigurationDataPut(mountPoint, normalizedII, payload.getData()).checkedGet();
} else {
- broker.commitConfigurationDataPut(normalizedII, payload.getData()).checkedGet();
+ broker.commitConfigurationDataPut(controllerContext.getGlobalSchema(), normalizedII, payload.getData()).checkedGet();
}
break;
* if key values or key count in payload and URI isn't equal
*
*/
- private void validateListKeysEqualityInPayloadAndUri(final InstanceIdentifierContext<DataSchemaNode> iiWithData,
+ private void validateListKeysEqualityInPayloadAndUri(final InstanceIdentifierContext<?> iiWithData,
final NormalizedNode<?, ?> payload) {
if (iiWithData.getSchemaNode() instanceof ListSchemaNode) {
final List<QName> keyDefinitions = ((ListSchemaNode) iiWithData.getSchemaNode()).getKeyDefinition();
if (payloadNodeQname.compareTo(yangIdent.getLastPathArgument().getNodeType()) > 0) {
return yangIdent;
}
- final InstanceIdentifierContext parentContext = payload.getInstanceIdentifierContext();
+ final InstanceIdentifierContext<?> parentContext = payload.getInstanceIdentifierContext();
final SchemaNode parentSchemaNode = parentContext.getSchemaNode();
if(parentSchemaNode instanceof DataNodeContainer) {
final DataNodeContainer cast = (DataNodeContainer) parentSchemaNode;
throw new RestconfDocumentedException("Input is required.", ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE);
}
- final URI payloadNS = payload.getData().getNodeType().getNamespace();
- if (payloadNS == null) {
- throw new RestconfDocumentedException(
- "Data has bad format. Root element node must have namespace (XML format) or module name(JSON format)",
- ErrorType.PROTOCOL, ErrorTag.UNKNOWN_NAMESPACE);
- }
+ // FIXME: move this to parsing stage (we can have augmentation nodes here which do not have namespace)
+// final URI payloadNS = payload.getData().getNodeType().getNamespace();
+// if (payloadNS == null) {
+// throw new RestconfDocumentedException(
+// "Data has bad format. Root element node must have namespace (XML format) or module name(JSON format)",
+// ErrorType.PROTOCOL, ErrorTag.UNKNOWN_NAMESPACE);
+// }
final DOMMountPoint mountPoint = payload.getInstanceIdentifierContext().getMountPoint();
- final InstanceIdentifierContext<DataSchemaNode> iiWithData = (InstanceIdentifierContext<DataSchemaNode>) payload.getInstanceIdentifierContext();
+ final InstanceIdentifierContext<?> iiWithData = payload.getInstanceIdentifierContext();
final YangInstanceIdentifier normalizedII = iiWithData.getInstanceIdentifier();
- final YangInstanceIdentifier resultII;
try {
if (mountPoint != null) {
broker.commitConfigurationDataPost(mountPoint, normalizedII, payload.getData()).checkedGet();
} else {
- broker.commitConfigurationDataPost(normalizedII, payload.getData()).checkedGet();
+ broker.commitConfigurationDataPost(controllerContext.getGlobalSchema(), normalizedII, payload.getData()).checkedGet();
}
} catch(final RestconfDocumentedException e) {
throw e;
@Override
public Response deleteConfigurationData(final String identifier) {
- final InstanceIdentifierContext<DataSchemaNode> iiWithData = controllerContext.toInstanceIdentifier(identifier);
+ final InstanceIdentifierContext<?> iiWithData = controllerContext.toInstanceIdentifier(identifier);
final DOMMountPoint mountPoint = iiWithData.getMountPoint();
final YangInstanceIdentifier normalizedII = iiWithData.getInstanceIdentifier();
return result;
}
- private boolean endsWithMountPoint(final String identifier) {
- return identifier.endsWith(ControllerContext.MOUNT) || identifier.endsWith(ControllerContext.MOUNT + "/");
- }
-
- private String addMountPointIdentifier(final String identifier) {
- final boolean endsWith = identifier.endsWith("/");
- if (endsWith) {
- return (identifier + ControllerContext.MOUNT);
- }
-
- return identifier + "/" + ControllerContext.MOUNT;
- }
-
- private TypeDef typeDefinition(final TypeDefinition<?> type, final QName nodeQName) {
- TypeDefinition<?> baseType = type;
- QName qName = nodeQName;
- while (baseType.getBaseType() != null) {
- if (baseType instanceof ExtendedType) {
- qName = baseType.getQName();
- }
- baseType = baseType.getBaseType();
- }
-
- return new TypeDef(baseType, qName);
-
- }
-
- private TypeDef typeDefinition(final DataSchemaNode node) {
- if (node instanceof LeafListSchemaNode) {
- return typeDefinition(((LeafListSchemaNode)node).getType(), node.getQName());
- } else if (node instanceof LeafSchemaNode) {
- return typeDefinition(((LeafSchemaNode)node).getType(), node.getQName());
- } else if (node instanceof AnyXmlSchemaNode) {
- return null;
- } else {
- throw new IllegalArgumentException("Unhandled parameter types: " + Arrays.<Object> asList(node).toString());
- }
- }
-
- private InstanceIdentifierContext normalizeInstanceIdentifierWithSchemaNode(
- final InstanceIdentifierContext iiWithSchemaNode) {
- return normalizeInstanceIdentifierWithSchemaNode(iiWithSchemaNode, false);
- }
-
- private InstanceIdentifierContext normalizeInstanceIdentifierWithSchemaNode(
- final InstanceIdentifierContext iiWithSchemaNode, final boolean unwrapLastListNode) {
- return new InstanceIdentifierContext(instanceIdentifierToReadableFormForNormalizeNode(
- iiWithSchemaNode.getInstanceIdentifier(), unwrapLastListNode), iiWithSchemaNode.getSchemaNode(),
- iiWithSchemaNode.getMountPoint(),iiWithSchemaNode.getSchemaContext());
- }
-
- private YangInstanceIdentifier instanceIdentifierToReadableFormForNormalizeNode(
- final YangInstanceIdentifier instIdentifier, final boolean unwrapLastListNode) {
- Preconditions.checkNotNull(instIdentifier, "Instance identifier can't be null");
- final List<PathArgument> result = new ArrayList<PathArgument>();
- final Iterator<PathArgument> iter = instIdentifier.getPathArguments().iterator();
- while (iter.hasNext()) {
- final PathArgument pathArgument = iter.next();
- if (pathArgument instanceof NodeIdentifierWithPredicates && (iter.hasNext() || unwrapLastListNode)) {
- result.add(new YangInstanceIdentifier.NodeIdentifier(pathArgument.getNodeType()));
- }
- result.add(pathArgument);
- }
- return YangInstanceIdentifier.create(result);
- }
-
- private boolean isDataContainerNode(final DataSchemaNode schemaNode) {
- if (schemaNode instanceof ContainerSchemaNode || schemaNode instanceof ListSchemaNode) {
- return true;
- }
- return false;
- }
-
public BigInteger getOperationalReceived() {
// TODO Auto-generated method stub
return null;
import java.util.Collection;
import java.util.Collections;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Config;
+import org.opendaylight.controller.config.yang.md.sal.rest.connector.Delete;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Get;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Operational;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Post;
@Override
public Config getConfig() {
final Config config = new Config();
+
final Get get = new Get();
get.setReceivedRequests(stats.getConfigGet());
+ get.setSuccessfulResponses(stats.getSuccessGetConfig());
+ get.setFailedResponses(stats.getFailureGetConfig());
config.setGet(get);
+
final Post post = new Post();
post.setReceivedRequests(stats.getConfigPost());
+ post.setSuccessfulResponses(stats.getSuccessPost());
+ post.setFailedResponses(stats.getFailurePost());
config.setPost(post);
+
final Put put = new Put();
put.setReceivedRequests(stats.getConfigPut());
+ put.setSuccessfulResponses(stats.getSuccessPut());
+ put.setFailedResponses(stats.getFailurePut());
config.setPut(put);
+
+ final Delete delete = new Delete();
+ delete.setReceivedRequests(stats.getConfigDelete());
+ delete.setSuccessfulResponses(stats.getSuccessDelete());
+ delete.setFailedResponses(stats.getFailureDelete());
+ config.setDelete(delete);
+
return config;
}
final Operational operational = new Operational();
final Get get = new Get();
get.setReceivedRequests(opGet);
+ get.setSuccessfulResponses(stats.getSuccessGetOperational());
+ get.setFailedResponses(stats.getFailureGetOperational());
operational.setGet(get);
return operational;
}
final BigInteger rpcInvoke = stats.getRpc();
final Rpcs rpcs = new Rpcs();
rpcs.setReceivedRequests(rpcInvoke);
- return rpcs ;
+ return rpcs;
}
-}
+}
\ No newline at end of file
import java.math.BigInteger;
import java.util.concurrent.atomic.AtomicLong;
import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.UriInfo;
import org.opendaylight.controller.sal.rest.api.RestconfService;
AtomicLong configPost = new AtomicLong();
AtomicLong configPut = new AtomicLong();
AtomicLong configDelete = new AtomicLong();
+ AtomicLong successGetConfig = new AtomicLong();
+ AtomicLong successGetOperational = new AtomicLong();
+ AtomicLong successPost = new AtomicLong();
+ AtomicLong successPut = new AtomicLong();
+ AtomicLong successDelete = new AtomicLong();
+ AtomicLong failureGetConfig = new AtomicLong();
+ AtomicLong failureGetOperational = new AtomicLong();
+ AtomicLong failurePost = new AtomicLong();
+ AtomicLong failurePut = new AtomicLong();
+ AtomicLong failureDelete = new AtomicLong();
private static final StatisticsRestconfServiceWrapper INSTANCE = new StatisticsRestconfServiceWrapper(RestconfImpl.getInstance());
@Override
public NormalizedNodeContext readConfigurationData(final String identifier, final UriInfo uriInfo) {
configGet.incrementAndGet();
- return delegate.readConfigurationData(identifier, uriInfo);
+ NormalizedNodeContext normalizedNodeContext = null;
+ try {
+ normalizedNodeContext = delegate.readConfigurationData(identifier, uriInfo);
+ if (normalizedNodeContext.getData() != null) {
+ successGetConfig.incrementAndGet();
+ }
+ else {
+ failureGetConfig.incrementAndGet();
+ }
+ } catch (Exception e) {
+ failureGetConfig.incrementAndGet();
+ throw e;
+ }
+ return normalizedNodeContext;
}
@Override
public NormalizedNodeContext readOperationalData(final String identifier, final UriInfo uriInfo) {
operationalGet.incrementAndGet();
- return delegate.readOperationalData(identifier, uriInfo);
+ NormalizedNodeContext normalizedNodeContext = null;
+ try {
+ normalizedNodeContext = delegate.readOperationalData(identifier, uriInfo);
+ if (normalizedNodeContext.getData() != null) {
+ successGetOperational.incrementAndGet();
+ }
+ else {
+ failureGetOperational.incrementAndGet();
+ }
+ } catch (Exception e) {
+ failureGetOperational.incrementAndGet();
+ throw e;
+ }
+ return normalizedNodeContext;
}
@Override
public Response updateConfigurationData(final String identifier, final NormalizedNodeContext payload) {
configPut.incrementAndGet();
- return delegate.updateConfigurationData(identifier, payload);
+ Response response = null;
+ try {
+ response = delegate.updateConfigurationData(identifier, payload);
+ if (response.getStatus() == Status.OK.getStatusCode()) {
+ successPut.incrementAndGet();
+ }
+ else {
+ failurePut.incrementAndGet();
+ }
+ } catch (Exception e) {
+ failurePut.incrementAndGet();
+ throw e;
+ }
+ return response;
}
@Override
public Response createConfigurationData(final String identifier, final NormalizedNodeContext payload, final UriInfo uriInfo) {
configPost.incrementAndGet();
- return delegate.createConfigurationData(identifier, payload, uriInfo);
+ Response response = null;
+ try {
+ response = delegate.createConfigurationData(identifier, payload, uriInfo);
+ if (response.getStatus() == Status.OK.getStatusCode()) {
+ successPost.incrementAndGet();
+ }
+ else {
+ failurePost.incrementAndGet();
+ }
+ } catch (Exception e) {
+ failurePost.incrementAndGet();
+ throw e;
+ }
+ return response;
}
@Override
public Response createConfigurationData(final NormalizedNodeContext payload, final UriInfo uriInfo) {
configPost.incrementAndGet();
- return delegate.createConfigurationData(payload, uriInfo);
+ Response response = null;
+ try {
+ response = delegate.createConfigurationData(payload, uriInfo);
+ if (response.getStatus() == Status.OK.getStatusCode()) {
+ successPost.incrementAndGet();
+ }
+ else {
+ failurePost.incrementAndGet();
+ }
+ }catch (Exception e) {
+ failurePost.incrementAndGet();
+ throw e;
+ }
+ return response;
}
@Override
public Response deleteConfigurationData(final String identifier) {
- return delegate.deleteConfigurationData(identifier);
+ configDelete.incrementAndGet();
+ Response response = null;
+ try {
+ response = delegate.deleteConfigurationData(identifier);
+ if (response.getStatus() == Status.OK.getStatusCode()) {
+ successDelete.incrementAndGet();
+ }
+ else {
+ failureDelete.incrementAndGet();
+ }
+ } catch (Exception e) {
+ failureDelete.incrementAndGet();
+ throw e;
+ }
+ return response;
}
@Override
public BigInteger getRpc() {
return BigInteger.valueOf(rpc.get());
}
-}
+
+ public BigInteger getSuccessGetConfig() {
+ return BigInteger.valueOf(successGetConfig.get());
+ }
+
+ public BigInteger getSuccessGetOperational() {
+ return BigInteger.valueOf(successGetOperational.get());
+ }
+
+ public BigInteger getSuccessPost() {
+ return BigInteger.valueOf(successPost.get());
+ }
+
+ public BigInteger getSuccessPut() {
+ return BigInteger.valueOf(successPut.get());
+ }
+
+ public BigInteger getSuccessDelete() {
+ return BigInteger.valueOf(successDelete.get());
+ }
+
+ public BigInteger getFailureGetConfig() {
+ return BigInteger.valueOf(failureGetConfig.get());
+ }
+
+ public BigInteger getFailureGetOperational() {
+ return BigInteger.valueOf(failureGetOperational.get());
+ }
+
+ public BigInteger getFailurePost() {
+ return BigInteger.valueOf(failurePost.get());
+ }
+
+ public BigInteger getFailurePut() {
+ return BigInteger.valueOf(failurePut.get());
+ }
+
+ public BigInteger getFailureDelete() {
+ return BigInteger.valueOf(failureDelete.get());
+ }
+}
\ No newline at end of file
leaf received-requests {
type uint64;
}
+
+ leaf successful-responses {
+ type uint64;
+ }
+
+ leaf failed-responses {
+ type uint64;
+ }
}
augment "/config:modules/config:module/config:configuration" {
container put {
uses statistics;
}
+
+ container delete {
+ uses statistics;
+ }
}
container operational {
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.cnsn.to.json.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import com.google.gson.stream.JsonReader;
-import com.google.gson.stream.JsonToken;
-import java.io.IOException;
-import java.io.StringReader;
-import java.util.Map;
-import java.util.Set;
-import org.junit.BeforeClass;
-import org.opendaylight.controller.sal.restconf.impl.test.YangAndXmlAndDataSchemaLoader;
-import org.opendaylight.controller.sal.restconf.impl.test.structures.Cont;
-import org.opendaylight.controller.sal.restconf.impl.test.structures.Lf;
-import org.opendaylight.controller.sal.restconf.impl.test.structures.LfLst;
-import org.opendaylight.controller.sal.restconf.impl.test.structures.Lst;
-import org.opendaylight.controller.sal.restconf.impl.test.structures.LstItem;
-
-public class CnSnJsonBasicYangTypesTest extends YangAndXmlAndDataSchemaLoader {
-
- @BeforeClass
- public static void initialize() {
- dataLoad("/cnsn-to-json/simple-yang-types", 1, "simple-yang-types", "cont1");
- }
-
- private void verifyJsonOutputForEmptyData(final String jsonOutput) {
- assertNotNull(jsonOutput);
- final StringReader strReader = new StringReader(jsonOutput);
- final JsonReader jReader = new JsonReader(strReader);
-
- String exception = null;
- Cont dataFromJson = null;
- try {
- dataFromJson = jsonReadCont1(jReader);
- } catch (final IOException e) {
- exception = e.getMessage();
- }
-
- assertNotNull("Data structures from json are missing.", dataFromJson);
- checkDataFromJsonEmpty(dataFromJson);
-
- assertNull("Error during reading Json output: " + exception, exception);
- }
-
- private void verifyJsonOutput(final String jsonOutput) {
- assertNotNull(jsonOutput);
- final StringReader strReader = new StringReader(jsonOutput);
- final JsonReader jReader = new JsonReader(strReader);
-
- String exception = null;
- Cont dataFromJson = null;
- try {
- dataFromJson = jsonReadCont1(jReader);
- } catch (final IOException e) {
- exception = e.getMessage();
- }
-
- assertNotNull("Data structures from json are missing.", dataFromJson);
- checkDataFromJson(dataFromJson);
-
- assertNull("Error during reading Json output: " + exception, exception);
- }
-
- private Cont jsonReadCont1(final JsonReader jReader) throws IOException {
- jReader.beginObject();
- assertNotNull("cont1 is missing.", jReader.hasNext());
-
- Cont dataFromJson = new Cont(jReader.nextName());
- dataFromJson = jsonReadCont1Elements(jReader, dataFromJson);
-
- assertFalse("cont shouldn't have other element.", jReader.hasNext());
- jReader.endObject();
- return dataFromJson;
-
- }
-
- private Cont jsonReadCont1Elements(final JsonReader jReader, final Cont redData) throws IOException {
- jReader.beginObject();
- while (jReader.hasNext()) {
- final String keyName = jReader.nextName();
- if (keyName.equals("lf11")) {
- redData.addLf(new Lf(keyName, nextValue(jReader)));
- } else if (keyName.equals("lflst11")) {
- LfLst lfLst = new LfLst(keyName);
- lfLst = jsonReadLflstValues(jReader, lfLst);
- redData.addLfLst(lfLst);
- } else if (keyName.equals("lflst12")) {
- final LfLst lfLst = new LfLst(keyName);
- jsonReadLflstValues(jReader, lfLst);
- redData.addLfLst(lfLst);
- } else if (keyName.equals("lst11")) {
- Lst lst = new Lst(keyName);
- lst = jsonReadLst11(jReader, lst);
- redData.addLst(lst);
- } else {
- assertTrue("Key " + keyName + " doesn't exists in yang file.", false);
- }
- }
- jReader.endObject();
- return redData;
-
- }
-
- private Lst jsonReadLst11(final JsonReader jReader, final Lst lst) throws IOException {
- jReader.beginArray();
-
- while (jReader.hasNext()) {
- final LstItem lstItem = jsonReadLst11Elements(jReader);
- lst.addLstItem(lstItem);
- }
- jReader.endArray();
- return lst;
- }
-
- private LstItem jsonReadLst11Elements(final JsonReader jReader) throws IOException {
- final LstItem lstItem = new LstItem();
- jReader.beginObject();
- while (jReader.hasNext()) {
- final String keyName = jReader.nextName();
- if (keyName.equals("lf111")) {
- lstItem.addLf(new Lf(keyName, nextValue(jReader)));
- } else if (keyName.equals("lf112")) {
- lstItem.addLf(new Lf(keyName, nextValue(jReader)));
- } else if (keyName.equals("cont111")) {
- Cont cont = new Cont(keyName);
- cont = jsonReadCont111(jReader, cont);
- lstItem.addCont(cont);
- } else if (keyName.equals("lst111")) {
- Lst lst = new Lst(keyName);
- lst = jsonReadLst111(jReader, lst);
- lstItem.addLst(lst);
- } else if (keyName.equals("lst112")) {
- Lst lst = new Lst(keyName);
- lst = jsonReadLst112(jReader, lst);
- lstItem.addLst(lst);
- } else {
- assertTrue("Key " + keyName + " doesn't exists in yang file.", false);
- }
- }
- jReader.endObject();
- return lstItem;
- }
-
- private Lst jsonReadLst112(final JsonReader jReader, final Lst lst) throws IOException {
- jReader.beginArray();
- while (jReader.hasNext()) {
- final LstItem lstItem = jsonReadLst112Elements(jReader);
- lst.addLstItem(lstItem);
- }
- jReader.endArray();
- return lst;
- }
-
- private LstItem jsonReadLst112Elements(final JsonReader jReader) throws IOException {
- final LstItem lstItem = new LstItem();
- jReader.beginObject();
- if (jReader.hasNext()) {
- final String keyName = jReader.nextName();
- if (keyName.equals("lf1121")) {
- lstItem.addLf(new Lf(keyName, nextValue(jReader)));
- }
- }
- jReader.endObject();
- return lstItem;
-
- }
-
- private Lst jsonReadLst111(final JsonReader jReader, final Lst lst) throws IOException {
- jReader.beginArray();
- while (jReader.hasNext()) {
- final LstItem lstItem = jsonReadLst111Elements(jReader);
- lst.addLstItem(lstItem);
- }
- jReader.endArray();
- return lst;
- }
-
- private LstItem jsonReadLst111Elements(final JsonReader jReader) throws IOException {
- final LstItem lstItem = new LstItem();
- jReader.beginObject();
- if (jReader.hasNext()) {
- final String keyName = jReader.nextName();
- if (keyName.equals("lf1111")) {
- lstItem.addLf(new Lf(keyName, nextValue(jReader)));
- }
- }
- jReader.endObject();
- return lstItem;
- }
-
- private Object nextValue(final JsonReader jReader) throws IOException {
- if (jReader.peek().equals(JsonToken.NULL)) {
- jReader.nextNull();
- return null;
- } else if (jReader.peek().equals(JsonToken.NUMBER)) {
- return jReader.nextInt();
- } else {
- return jReader.nextString();
- }
- }
-
- private Cont jsonReadCont111(final JsonReader jReader, Cont cont) throws IOException {
- jReader.beginObject();
- cont = jsonReadCont111Elements(jReader, cont);
- jReader.endObject();
- return cont;
- }
-
- private Cont jsonReadCont111Elements(final JsonReader jReader, final Cont cont) throws IOException {
- while (jReader.hasNext()) {
- final String keyName = jReader.nextName();
- if (keyName.equals("lf1111")) {
- cont.addLf(new Lf(keyName, nextValue(jReader)));
- } else if (keyName.equals("lflst1111")) {
- LfLst lfLst = new LfLst(keyName);
- lfLst = jsonReadLflstValues(jReader, lfLst);
- cont.addLfLst(lfLst);
- } else if (keyName.equals("lst1111")) {
- Lst lst = new Lst(keyName);
- lst = jsonReadLst1111(jReader, lst);
- cont.addLst(lst);
- } else {
- assertTrue("Key " + keyName + " doesn't exists in yang file.", false);
- }
- }
- return cont;
-
- }
-
- private Lst jsonReadLst1111(final JsonReader jReader, final Lst lst) throws IOException {
- jReader.beginArray();
- while (jReader.hasNext()) {
- final LstItem lstItem = jsonReadLst1111Elements(jReader);
- lst.addLstItem(lstItem);
- }
- jReader.endArray();
- return lst;
- }
-
- private LstItem jsonReadLst1111Elements(final JsonReader jReader) throws IOException {
- jReader.beginObject();
- final LstItem lstItem = new LstItem();
- while (jReader.hasNext()) {
- final String keyName = jReader.nextName();
- if (keyName.equals("lf1111A") || keyName.equals("lf1111B")) {
- lstItem.addLf(new Lf(keyName, nextValue(jReader)));
- }
- }
- jReader.endObject();
- return lstItem;
- }
-
- private LfLst jsonReadLflstValues(final JsonReader jReader, final LfLst lfLst) throws IOException {
- jReader.beginArray();
- while (jReader.hasNext()) {
- lfLst.addLf(new Lf(nextValue(jReader)));
- }
- jReader.endArray();
- return lfLst;
- }
-
- private void checkDataFromJsonEmpty(final Cont dataFromJson) {
- assertTrue(dataFromJson.getLfs().isEmpty());
- assertTrue(dataFromJson.getLfLsts().isEmpty());
- assertTrue(dataFromJson.getConts().isEmpty());
-
- final Map<String, Lst> lsts = dataFromJson.getLsts();
- assertEquals(1, lsts.size());
- final Lst lst11 = lsts.get("lst11");
- assertNotNull(lst11);
- final Set<LstItem> lstItems = lst11.getLstItems();
- assertNotNull(lstItems);
-
- LstItem lst11_1 = null;
- LstItem lst11_2 = null;
- LstItem lst11_3 = null;
- for (final LstItem lstItem : lstItems) {
- if (lstItem.getLfs().get("lf111").getValue().equals(1)) {
- lst11_1 = lstItem;
- } else if (lstItem.getLfs().get("lf111").getValue().equals(2)) {
- lst11_2 = lstItem;
- } else if (lstItem.getLfs().get("lf111").getValue().equals(3)) {
- lst11_3 = lstItem;
- }
- }
-
- assertNotNull(lst11_1);
- assertNotNull(lst11_2);
- assertNotNull(lst11_3);
-
- // lst11_1
- assertTrue(lst11_1.getLfLsts().isEmpty());
- assertEquals(1, lst11_1.getLfs().size());
- assertEquals(1, lst11_1.getConts().size());
- assertEquals(1, lst11_1.getLsts().size());
- assertEquals(lst11_1.getLsts().get("lst111"), new Lst("lst111").addLstItem(new LstItem().addLf("lf1111", 35))
- .addLstItem(new LstItem().addLf("lf1111", 34)).addLstItem(new LstItem()).addLstItem(new LstItem()));
- assertEquals(lst11_1.getConts().get("cont111"), new Cont("cont111"));
- // : lst11_1
-
- // lst11_2
- assertTrue(lst11_2.getLfLsts().isEmpty());
- assertEquals(1, lst11_2.getLfs().size());
- assertEquals(1, lst11_2.getConts().size());
- assertEquals(1, lst11_2.getLsts().size());
-
- final Cont lst11_2_cont111 = lst11_2.getConts().get("cont111");
-
- // -cont111
- assertNotNull(lst11_2_cont111);
- assertTrue(lst11_2_cont111.getLfs().isEmpty());
- assertEquals(1, lst11_2_cont111.getLfLsts().size());
- assertEquals(1, lst11_2_cont111.getLsts().size());
- assertTrue(lst11_2_cont111.getConts().isEmpty());
-
- assertEquals(new LfLst("lflst1111").addLf(1024).addLf(4096), lst11_2_cont111.getLfLsts().get("lflst1111"));
- assertEquals(
- new Lst("lst1111").addLstItem(new LstItem().addLf("lf1111B", 4)).addLstItem(
- new LstItem().addLf("lf1111A", "lf1111A str12")), lst11_2_cont111.getLsts().get("lst1111"));
- // :-cont111
- assertEquals(lst11_2.getLsts().get("lst112"), new Lst("lst112").addLstItem(new LstItem()));
- // : lst11_2
-
- // lst11_3
- assertEquals(1, lst11_3.getLfs().size());
- assertTrue(lst11_3.getLfLsts().isEmpty());
- assertTrue(lst11_3.getLsts().isEmpty());
- assertTrue(lst11_3.getLsts().isEmpty());
-
- // -cont111
- final Cont lst11_3_cont111 = lst11_3.getConts().get("cont111");
- assertEquals(0, lst11_3_cont111.getLfs().size());
- assertEquals(0, lst11_3_cont111.getLfLsts().size());
- assertEquals(1, lst11_3_cont111.getLsts().size());
- assertTrue(lst11_3_cont111.getConts().isEmpty());
-
- assertEquals(new Lst("lst1111").addLstItem(new LstItem()).addLstItem(new LstItem()), lst11_3_cont111.getLsts()
- .get("lst1111"));
- // :-cont111
- // : lst11_3
-
- }
-
- private void checkDataFromJson(final Cont dataFromJson) {
- assertNotNull(dataFromJson.getLfs().get("lf11"));
- assertEquals(dataFromJson.getLfs().get("lf11"), new Lf("lf11", "lf"));
-
- LfLst lflst11 = null;
- LfLst lflst12 = null;
-
- lflst11 = dataFromJson.getLfLsts().get("lflst11");
- lflst12 = dataFromJson.getLfLsts().get("lflst12");
-
- assertNotNull(lflst11);
- assertNotNull(lflst12);
-
- assertEquals(3, lflst11.getLfs().size());
- assertTrue(lflst11.getLfs().contains(new Lf(55)));
- assertTrue(lflst11.getLfs().contains(new Lf(56)));
- assertTrue(lflst11.getLfs().contains(new Lf(57)));
-
- assertEquals(3, lflst12.getLfs().size());
- assertTrue(lflst12.getLfs().contains(new Lf("lflst12 str1")));
- assertTrue(lflst12.getLfs().contains(new Lf("lflst12 str2")));
- assertTrue(lflst12.getLfs().contains(new Lf("lflst12 str3")));
-
- assertEquals(1, dataFromJson.getLsts().size());
- final Lst lst11 = dataFromJson.getLsts().get("lst11");
- assertNotNull(lst11);
- assertEquals(2, lst11.getLstItems().size());
-
- LstItem lst11_1 = null;
- LstItem lst11_2 = null;
- for (final LstItem lstItem : lst11.getLstItems()) {
- final Lf lf = lstItem.getLfs().get("lf111");
- if (lf != null && lf.getValue().equals(140)) {
- lst11_1 = lstItem;
- } else if (lf != null && lf.getValue().equals(141)) {
- lst11_2 = lstItem;
- }
- }
-
- checkLst11_1(lst11_1);
- checkLst11_2(lst11_2);
- }
-
- private void checkLst11_2(final LstItem lst11_2) {
- assertNotNull(lst11_2);
- assertEquals(2, lst11_2.getLfs().size());
- assertEquals(1, lst11_2.getConts().size());
- assertEquals(2, lst11_2.getLsts().size());
-
- assertEquals(lst11_2.getLfs().get("lf112"), new Lf("lf112", "lf112 str2"));
-
- final Cont lst11_2_cont = lst11_2.getConts().get("cont111");
- assertEquals(0, lst11_2_cont.getConts().size());
- assertEquals(1, lst11_2_cont.getLfLsts().size());
- assertEquals(1, lst11_2_cont.getLfs().size());
- assertEquals(1, lst11_2_cont.getLsts().size());
-
- // cont111 check
- assertEquals(new Lf("lf1111", "lf1111 str2"), lst11_2_cont.getLfs().get("lf1111"));
- assertEquals(new LfLst("lflst1111").addLf(new Lf(2049)).addLf(new Lf(1025)).addLf(new Lf(4097)), lst11_2_cont
- .getLfLsts().get("lflst1111"));
-
- assertNotNull(lst11_2_cont.getLsts().get("lst1111"));
- checkLst1111(lst11_2_cont.getLsts().get("lst1111").getLstItems(), new Lf("lf1111A", "lf1111A str21"), new Lf(
- "lf1111B", 5), new Lf("lf1111A", "lf1111A str22"), new Lf("lf1111B", 8));
-
- checkLst11x(lst11_2.getLsts().get("lst111"), new LstItem().addLf(new Lf("lf1111", 55)),
- new LstItem().addLf(new Lf("lf1111", 56)));
- checkLst11x(lst11_2.getLsts().get("lst112"), new LstItem().addLf(new Lf("lf1121", "lf1121 str22")),
- new LstItem().addLf(new Lf("lf1121", "lf1121 str21")));
- }
-
- private void checkLst11_1(final LstItem lst11_1) {
- assertNotNull(lst11_1);
-
- assertEquals(2, lst11_1.getLfs().size());
- assertEquals(1, lst11_1.getConts().size());
- assertEquals(2, lst11_1.getLsts().size());
-
- assertEquals(lst11_1.getLfs().get("lf112"), new Lf("lf112", "lf112 str"));
-
- final Cont lst11_1_cont = lst11_1.getConts().get("cont111");
- assertEquals(0, lst11_1_cont.getConts().size());
- assertEquals(1, lst11_1_cont.getLfLsts().size());
- assertEquals(1, lst11_1_cont.getLfs().size());
- assertEquals(1, lst11_1_cont.getLsts().size());
-
- // cont111 check
- assertEquals(new Lf("lf1111", "lf1111 str"), lst11_1_cont.getLfs().get("lf1111"));
- assertEquals(new LfLst("lflst1111").addLf(new Lf(2048)).addLf(new Lf(1024)).addLf(new Lf(4096)), lst11_1_cont
- .getLfLsts().get("lflst1111"));
-
- assertNotNull(lst11_1_cont.getLsts().get("lst1111"));
- checkLst1111(lst11_1_cont.getLsts().get("lst1111").getLstItems(), new Lf("lf1111A", "lf1111A str11"), new Lf(
- "lf1111B", 4), new Lf("lf1111A", "lf1111A str12"), new Lf("lf1111B", 7));
-
- checkLst11x(lst11_1.getLsts().get("lst111"), new LstItem().addLf(new Lf("lf1111", 65)));
- checkLst11x(lst11_1.getLsts().get("lst112"), new LstItem().addLf(new Lf("lf1121", "lf1121 str11")));
- }
-
- private void checkLst11x(final Lst lst, final LstItem... lstItems) {
- assertNotNull(lst);
-
- final Lst requiredLst = new Lst(lst.getName());
- for (final LstItem lstItem : lstItems) {
- requiredLst.addLstItem(lstItem);
- }
-
- assertEquals(requiredLst, lst);
-
- }
-
- private void checkLst1111(final Set<LstItem> lstItems, final Lf lf11, final Lf lf12, final Lf lf21, final Lf lf22) {
- LstItem lst11_1_cont_lst1111_1 = null;
- LstItem lst11_1_cont_lst1111_2 = null;
- for (final LstItem lstItem : lstItems) {
- if (new LstItem().addLf(lf11).addLf(lf12).equals(lstItem)) {
- lst11_1_cont_lst1111_1 = lstItem;
- } else if (new LstItem().addLf(lf21).addLf(lf22).equals(lstItem)) {
- lst11_1_cont_lst1111_2 = lstItem;
- }
- }
-
- assertNotNull(lst11_1_cont_lst1111_1);
- assertNotNull(lst11_1_cont_lst1111_2);
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.cnsn.to.json.test;
-
-import java.util.List;
-import java.util.Set;
-import org.junit.BeforeClass;
-import org.opendaylight.controller.sal.restconf.impl.test.YangAndXmlAndDataSchemaLoader;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.ConstraintDefinition;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.GroupingDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.opendaylight.yangtools.yang.model.api.Status;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.UsesNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class CnSnToJsonIncorrectTopLevelTest extends YangAndXmlAndDataSchemaLoader {
-
- private static final Logger LOG = LoggerFactory.getLogger(CnSnToJsonIncorrectTopLevelTest.class);
-
- @BeforeClass
- public static void initialize() {
- dataLoad("/cnsn-to-json/simple-data-types");
- }
-
- private class IncorrectDataSchema implements DataSchemaNode, DataNodeContainer {
-
- @Override
- public String getDescription() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public SchemaPath getPath() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public QName getQName() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public String getReference() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Status getStatus() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<UnknownSchemaNode> getUnknownSchemaNodes() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Set<DataSchemaNode> getChildNodes() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public DataSchemaNode getDataChildByName(final QName arg0) {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public DataSchemaNode getDataChildByName(final String arg0) {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Set<GroupingDefinition> getGroupings() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Set<TypeDefinition<?>> getTypeDefinitions() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Set<UsesNode> getUses() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public ConstraintDefinition getConstraints() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public boolean isAddedByUses() {
- // TODO Auto-generated method stub
- return false;
- }
-
- @Override
- public boolean isAugmenting() {
- // TODO Auto-generated method stub
- return false;
- }
-
- @Override
- public boolean isConfiguration() {
- // TODO Auto-generated method stub
- return false;
- }
-
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.cnsn.to.json.test;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import org.junit.BeforeClass;
-import org.opendaylight.controller.sal.restconf.impl.test.YangAndXmlAndDataSchemaLoader;
-
-/**
- *
- * All tests are commented now because leafref isn't supported now
- *
- */
-
-public class CnSnToJsonLeafrefType extends YangAndXmlAndDataSchemaLoader {
-
- @BeforeClass
- public static void initialization() {
- dataLoad("/cnsn-to-json/leafref", 2, "main-module", "cont");
- }
-
- private void validateJson(final String regex, final String value) {
- assertNotNull(value);
- final Pattern ptrn = Pattern.compile(regex, Pattern.DOTALL);
- final Matcher mtch = ptrn.matcher(value);
- assertTrue(mtch.matches());
- }
-
-}
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
import org.opendaylight.yangtools.yang.parser.builder.impl.ContainerSchemaNodeBuilder;
import org.opendaylight.yangtools.yang.parser.builder.impl.LeafSchemaNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class CnSnToJsonNotExistingLeafTypeTest extends YangAndXmlAndDataSchemaLoader {
- private static final Logger LOG = LoggerFactory.getLogger(CnSnToJsonNotExistingLeafTypeTest.class);
-
@BeforeClass
public static void initialize() {
dataLoad("/cnsn-to-json/simple-data-types");
}
+ @SuppressWarnings("unused")
private DataSchemaNode prepareDataSchemaNode() {
final ContainerSchemaNodeBuilder contBuild = new ContainerSchemaNodeBuilder("module", 1, TestUtils.buildQName("cont",
"simple:uri", "2012-12-17"), SchemaPath.create(true, QName.create("dummy")));
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.CheckedFuture;
import java.io.FileNotFoundException;
restconfImpl = RestconfImpl.getInstance();
restconfImpl.setBroker(brokerFacade);
restconfImpl.setControllerContext(controllerContext);
- when(brokerFacade.commitConfigurationDataPut(any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
+ when(brokerFacade.commitConfigurationDataPut(any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
.thenReturn(mock(CheckedFuture.class));
}
testNodeContainer.withChild(leafKey2.build());
}
- final NormalizedNodeContext testCompositeContext = new NormalizedNodeContext(new InstanceIdentifierContext(
+ final NormalizedNodeContext testCompositeContext = new NormalizedNodeContext(new InstanceIdentifierContext<>(
null, testNodeSchemaNode, null, schemaContextTestModule), testNodeContainer.build());
restconfImpl.updateConfigurationData(toUri(uriKey1, uriKey2), testCompositeContext);
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
/**
SchemaPath type = SchemaPath.create(true, qname);
- YangInstanceIdentifier instanceID = YangInstanceIdentifier.builder().node(qname).toInstance();
+ YangInstanceIdentifier instanceID = YangInstanceIdentifier.builder().node(qname).build();
@Mock
DOMDataReadOnlyTransaction rTransaction;
brokerFacade.readOperationalData(instanceID);
}
- @SuppressWarnings("unchecked")
@Test
public void testInvokeRpc() throws Exception {
final DOMRpcResult expResult = mock(DOMRpcResult.class);
@Ignore
@Test
public void testCommitConfigurationDataPut() {
+ @SuppressWarnings("unchecked")
final CheckedFuture<Void, TransactionCommitFailedException> expFuture = mock(CheckedFuture.class);
when(wTransaction.submit()).thenReturn(expFuture);
- final Future<Void> actualFuture = brokerFacade.commitConfigurationDataPut(instanceID, dummyNode);
+ final Future<Void> actualFuture = brokerFacade.commitConfigurationDataPut((SchemaContext)null, instanceID, dummyNode);
assertSame("commitConfigurationDataPut", expFuture, actualFuture);
@Test
public void testCommitConfigurationDataPost() {
+ @SuppressWarnings("unchecked")
final CheckedFuture<Void, TransactionCommitFailedException> expFuture = mock(CheckedFuture.class);
- final NormalizedNode<?, ?> dummyNode2 = createDummyNode("dummy:namespace2", "2014-07-01", "dummy local name2");
-
- when(rwTransaction.read(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
- wrapDummyNode(dummyNode2));
-
when(rwTransaction.exists(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
- wrapExistence(true));
+ wrapExistence(false));
when(rwTransaction.submit()).thenReturn(expFuture);
final CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade.commitConfigurationDataPost(
- YangInstanceIdentifier.builder().build(), dummyNode);
+ (SchemaContext)null, YangInstanceIdentifier.builder().build(), dummyNode);
assertSame("commitConfigurationDataPost", expFuture, actualFuture);
final InOrder inOrder = inOrder(domDataBroker, rwTransaction);
inOrder.verify(domDataBroker).newReadWriteTransaction();
- inOrder.verify(rwTransaction).merge(LogicalDatastoreType.CONFIGURATION, instanceID, dummyNode);
+ inOrder.verify(rwTransaction).exists(LogicalDatastoreType.CONFIGURATION, instanceID);
+ inOrder.verify(rwTransaction).put(LogicalDatastoreType.CONFIGURATION, instanceID, dummyNode);
inOrder.verify(rwTransaction).submit();
}
@Test(expected = RestconfDocumentedException.class)
public void testCommitConfigurationDataPostAlreadyExists() {
- when(rwTransaction.read(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
- dummyNodeInFuture);
+ final CheckedFuture<Boolean, ReadFailedException> successFuture = Futures.immediateCheckedFuture(Boolean.TRUE);
+ when(rwTransaction.exists(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
+ successFuture);
try {
- brokerFacade.commitConfigurationDataPost(instanceID, dummyNode);
+ // Schema context is only necessary for ensuring parent structure
+ brokerFacade.commitConfigurationDataPost((SchemaContext)null, instanceID, dummyNode);
} catch (final RestconfDocumentedException e) {
assertEquals("getErrorTag", RestconfError.ErrorTag.DATA_EXISTS, e.getErrors().get(0).getErrorTag());
throw e;
@Test
public void testCommitConfigurationDataDelete() {
+ @SuppressWarnings("unchecked")
final CheckedFuture<Void, TransactionCommitFailedException> expFuture = mock(CheckedFuture.class);
when(wTransaction.submit()).thenReturn(expFuture);
- final NormalizedNode<?, ?> dummyNode2 = createDummyNode("dummy:namespace2", "2014-07-01", "dummy local name2");
-
-
final CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade
.commitConfigurationDataDelete(instanceID);
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
+
import java.io.ByteArrayInputStream;
import java.net.URI;
import java.net.URISyntaxException;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
contNode.withChild(lfNode);
container.withChild(contNode.build());
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, rpcInputSchemaNode, null, schema), container.build());
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, rpcInputSchemaNode, null, schema), container.build());
}
@Test
final DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> containerBuilder =
Builders.containerBuilder(rpcInputSchemaNode);
- final NormalizedNodeContext payload = new NormalizedNodeContext(new InstanceIdentifierContext(null, rpcInputSchemaNode,
+ final NormalizedNodeContext payload = new NormalizedNodeContext(new InstanceIdentifierContext<>(null, rpcInputSchemaNode,
null, schemaContext), containerBuilder.build());
final BrokerFacade brokerFacade = mock(BrokerFacade.class);
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import com.google.common.util.concurrent.CheckedFuture;
import java.io.FileNotFoundException;
import java.io.UnsupportedEncodingException;
return resourceConfig;
}
+ @SuppressWarnings("unchecked")
@Test
public void deleteConfigStatusCodes() throws UnsupportedEncodingException {
final String uri = "/config/test-interface:interfaces";
@BeforeClass
public static void init() throws FileNotFoundException {
- SchemaContext schemaContextTestModule = TestUtils.loadSchemaContext("/common/augment/yang");
+ final SchemaContext schemaContextTestModule = TestUtils.loadSchemaContext("/common/augment/yang");
controllerContext.setSchemas(schemaContextTestModule);
}
@Test
public void augmentedNodesInUri() {
- InstanceIdentifierContext iiWithData = controllerContext.toInstanceIdentifier("main:cont/augment-main-a:cont1");
+ InstanceIdentifierContext<?> iiWithData = controllerContext.toInstanceIdentifier("main:cont/augment-main-a:cont1");
assertEquals("ns:augment:main:a", iiWithData.getSchemaNode().getQName().getNamespace().toString());
iiWithData = controllerContext.toInstanceIdentifier("main:cont/augment-main-b:cont1");
assertEquals("ns:augment:main:b", iiWithData.getSchemaNode().getQName().getNamespace().toString());
try {
controllerContext.toInstanceIdentifier("main:cont/cont1");
fail("Expected exception");
- } catch (RestconfDocumentedException e) {
+ } catch (final RestconfDocumentedException e) {
assertTrue(e.getErrors().get(0).getErrorMessage().contains("is added as augment from more than one module"));
}
}
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.FileNotFoundException;
import java.io.UnsupportedEncodingException;
private static RestconfImpl restconfImpl;
private static SchemaContext schemaContextYangsIetf;
private static SchemaContext schemaContextTestModule;
+ @SuppressWarnings("rawtypes")
private static NormalizedNode answerFromGet;
private static SchemaContext schemaContextModules;
/**
* MountPoint test. URI represents mount point.
*/
+ @SuppressWarnings("unchecked")
@Test
public void getDataWithUrlMountPoint() throws UnsupportedEncodingException, URISyntaxException, ParseException {
when(brokerFacade.readConfigurationData(any(DOMMountPoint.class), any(YangInstanceIdentifier.class))).thenReturn(
* Slashes in URI behind mount point. lst1 element with key GigabitEthernet0%2F0%2F0%2F0 (GigabitEthernet0/0/0/0) is
* requested via GET HTTP operation. It is tested whether %2F character is replaced with simple / in
* InstanceIdentifier parameter in method
- * {@link BrokerFacade#readConfigurationDataBehindMountPoint(MountInstance, YangInstanceIdentifier)} which is called in
+ * {@link BrokerFacade#readConfigurationData(DOMMountPoint, YangInstanceIdentifier)} which is called in
* method {@link RestconfImpl#readConfigurationData}
*
* @throws ParseException
}
- private void prepareMockForModulesTest(final ControllerContext mockedControllerContext)
- throws FileNotFoundException {
- final SchemaContext schemaContext = TestUtils.loadSchemaContext("/modules");
- mockedControllerContext.setGlobalSchema(schemaContext);
- // when(mockedControllerContext.getGlobalSchema()).thenReturn(schemaContext);
- }
-
private int get(final String uri, final String mediaType) {
return target(uri).request(mediaType).get().getStatus();
}
type string;
}
*/
+ @SuppressWarnings("rawtypes")
private NormalizedNode prepareCnDataForMountPointTest(final boolean wrapToCont) throws URISyntaxException, ParseException {
final String testModuleDate = "2014-01-09";
final ContainerNode contChild = Builders
}
+ @SuppressWarnings("unchecked")
private void mockReadOperationalDataMethod() {
when(brokerFacade.readOperationalData(any(YangInstanceIdentifier.class))).thenReturn(answerFromGet);
}
+ @SuppressWarnings("unchecked")
private void mockReadConfigurationDataMethod() {
when(brokerFacade.readConfigurationData(any(YangInstanceIdentifier.class))).thenReturn(answerFromGet);
}
+ @SuppressWarnings("rawtypes")
private NormalizedNode prepareCnDataForSlashesBehindMountPointTest() throws ParseException {
return ImmutableMapEntryNodeBuilder
.create()
getDataWithInvalidDepthParameterTest(mockInfo);
}
+ @SuppressWarnings({"rawtypes", "unchecked"})
private void getDataWithInvalidDepthParameterTest(final UriInfo uriInfo) {
try {
final QName qNameDepth1Cont = QName.create("urn:nested:module", "2014-06-3", "depth1-cont");
}
}
+ @SuppressWarnings("unused")
private void verifyXMLResponse(final Response response, final NodeData nodeData) {
final Document doc = response.readEntity(Document.class);
// Document doc = TestUtils.loadDocumentFrom((InputStream) response.getEntity());
}
}
- private NodeData expectContainer(final String name, final NodeData... childData) {
- return new NodeData(name, Lists.newArrayList(childData));
- }
-
- private NodeData expectEmptyContainer(final String name) {
- return new NodeData(name, null);
- }
-
- private NodeData expectLeaf(final String name, final Object value) {
- return new NodeData(name, value);
- }
-
- private QName toNestedQName(final String localName) {
- return QName.create("urn:nested:module", "2014-06-3", localName);
- }
-
- private NodeData toCompositeNodeData(final QName key, final NodeData... childData) {
- return new NodeData(key, Lists.newArrayList(childData));
- }
-
- private NodeData toSimpleNodeData(final QName key, final Object value) {
- return new NodeData(key, value);
- }
-
}
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.opendaylight.controller.sal.restconf.impl.test.RestOperationUtils.XML;
+
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.CheckedFuture;
restconfImpl.setControllerContext(context);
}
+ @SuppressWarnings("unchecked")
@Test
@Ignore /// xmlData* need netconf-yang
public void postDataViaUrlMountPoint() throws UnsupportedEncodingException {
assertEquals(400, post(uri, MediaType.APPLICATION_JSON, ""));
}
+ @SuppressWarnings("unchecked")
@Test
@Ignore //jenkins has problem with JerseyTest - we expecting problems with singletons ControllerContext as schemaContext holder
public void createConfigurationDataTest() throws UnsupportedEncodingException, ParseException {
final RpcResult<TransactionStatus> rpcResult = new DummyRpcResult.Builder<TransactionStatus>().result(
TransactionStatus.COMMITED).build();
- when(brokerFacade.commitConfigurationDataPost(any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
+ when(brokerFacade.commitConfigurationDataPost((SchemaContext)null, any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
.thenReturn(mock(CheckedFuture.class));
final ArgumentCaptor<YangInstanceIdentifier> instanceIdCaptor = ArgumentCaptor.forClass(YangInstanceIdentifier.class);
+ @SuppressWarnings("rawtypes")
final ArgumentCaptor<NormalizedNode> compNodeCaptor = ArgumentCaptor.forClass(NormalizedNode.class);
// FIXME : NEVER test a nr. of call some service in complex test suite
// verify(brokerFacade, times(2))
verify(brokerFacade, times(1))
- .commitConfigurationDataPost(instanceIdCaptor.capture(), compNodeCaptor.capture());
+ .commitConfigurationDataPost((SchemaContext)null, instanceIdCaptor.capture(), compNodeCaptor.capture());
// identifier = "[(urn:ietf:params:xml:ns:yang:test-interface?revision=2014-07-01)interfaces, (urn:ietf:params:xml:ns:yang:test-interface?revision=2014-07-01)block]";
assertEquals(identifier, ImmutableList.copyOf(instanceIdCaptor.getValue().getPathArguments()).toString());
}
public void createConfigurationDataNullTest() throws UnsupportedEncodingException {
initMocking();
- when(brokerFacade.commitConfigurationDataPost(any(YangInstanceIdentifier.class),any(NormalizedNode.class)))
+ when(brokerFacade.commitConfigurationDataPost(any(SchemaContext.class), any(YangInstanceIdentifier.class),any(NormalizedNode.class)))
.thenReturn(Futures.<Void, TransactionCommitFailedException>immediateCheckedFuture(null));
//FIXME : find who is set schemaContext
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import java.io.FileNotFoundException;
@Test
public void putConfigStatusCodesEmptyBody() throws UnsupportedEncodingException {
final String uri = "/config/ietf-interfaces:interfaces/interface/eth0";
+ @SuppressWarnings("unused")
final Response resp = target(uri).request(MediaType.APPLICATION_JSON).put(
Entity.entity("", MediaType.APPLICATION_JSON));
assertEquals(400, put(uri, MediaType.APPLICATION_JSON, ""));
public void testRpcResultCommitedToStatusCodesWithMountPoint() throws UnsupportedEncodingException,
FileNotFoundException, URISyntaxException {
+ @SuppressWarnings("unchecked")
final CheckedFuture<Void, TransactionCommitFailedException> dummyFuture = mock(CheckedFuture.class);
when(
@Test
public void putDataMountPointIntoHighestElement() throws UnsupportedEncodingException, URISyntaxException {
+ @SuppressWarnings("unchecked")
final CheckedFuture<Void, TransactionCommitFailedException> dummyFuture = mock(CheckedFuture.class);
when(
brokerFacade.commitConfigurationDataPut(any(DOMMountPoint.class), any(YangInstanceIdentifier.class),
doThrow(OptimisticLockFailedException.class).
when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
assertEquals(500, put(uri, MediaType.APPLICATION_XML, xmlData));
doThrow(OptimisticLockFailedException.class).doReturn(mock(CheckedFuture.class)).
when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
assertEquals(200, put(uri, MediaType.APPLICATION_XML, xmlData));
}
doThrow(TransactionCommitFailedException.class).
when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ (SchemaContext)null, any(YangInstanceIdentifier.class), any(NormalizedNode.class));
assertEquals(500, put(uri, MediaType.APPLICATION_XML, xmlData));
}
private void mockCommitConfigurationDataPutMethod(final boolean noErrors) {
if (noErrors) {
doReturn(mock(CheckedFuture.class)).when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
} else {
doThrow(RestconfDocumentedException.class).when(brokerFacade).commitConfigurationDataPut(
- any(YangInstanceIdentifier.class), any(NormalizedNode.class));
+ any(SchemaContext.class), any(YangInstanceIdentifier.class), any(NormalizedNode.class));
}
}
@BeforeClass
public static void init() throws FileNotFoundException {
- Set<Module> allModules = TestUtils.loadModulesFrom("/full-versions/yangs");
+ final Set<Module> allModules = TestUtils.loadModulesFrom("/full-versions/yangs");
assertNotNull(allModules);
- SchemaContext schemaContext = TestUtils.loadSchemaContext(allModules);
+ final SchemaContext schemaContext = TestUtils.loadSchemaContext(allModules);
controllerContext = spy(ControllerContext.getInstance());
controllerContext.setSchemas(schemaContext);
restconfImpl.setControllerContext(controllerContext);
}
+ @SuppressWarnings("unchecked")
@Test
public void testExample() throws FileNotFoundException, ParseException {
+ @SuppressWarnings("rawtypes")
+ final
NormalizedNode normalizedNodeData = TestUtils.prepareNormalizedNodeWithIetfInterfacesInterfacesData();
- BrokerFacade brokerFacade = mock(BrokerFacade.class);
+ final BrokerFacade brokerFacade = mock(BrokerFacade.class);
when(brokerFacade.readOperationalData(any(YangInstanceIdentifier.class))).thenReturn(normalizedNodeData);
assertEquals(normalizedNodeData,
brokerFacade.readOperationalData(null));
package org.opendaylight.controller.sal.restconf.impl.test;
import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.net.URISyntaxException;
import java.sql.Date;
import java.text.ParseException;
-import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
-import org.opendaylight.controller.sal.restconf.impl.BrokerFacade;
-import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
-import org.opendaylight.controller.sal.restconf.impl.RestconfImpl;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
private static final Logger LOG = LoggerFactory.getLogger(TestUtils.class);
- private final static YangContextParser parser = new YangParserImpl();
+ private final static YangContextParser PARSER = new YangParserImpl();
private static Set<Module> loadModules(final String resourceDirectory) throws FileNotFoundException {
final File testDir = new File(resourceDirectory);
testFiles.add(new File(testDir, fileName));
}
}
- return parser.parseYangModels(testFiles);
+ return PARSER.parseYangModels(testFiles);
}
public static Set<Module> loadModulesFrom(final String yangPath) {
}
public static SchemaContext loadSchemaContext(final Set<Module> modules) {
- return parser.resolveSchemaContext(modules);
+ return PARSER.resolveSchemaContext(modules);
}
public static SchemaContext loadSchemaContext(final String resourceDirectory) throws FileNotFoundException {
- return parser.resolveSchemaContext(loadModulesFrom(resourceDirectory));
+ return PARSER.resolveSchemaContext(loadModulesFrom(resourceDirectory));
}
public static Module findModule(final Set<Module> modules, final String moduleName) {
return buildQName(name, "", null);
}
- private static void prepareMocksForRestconf(final Set<Module> modules, final RestconfImpl restconf) {
- final ControllerContext controllerContext = ControllerContext.getInstance();
- final BrokerFacade mockedBrokerFacade = mock(BrokerFacade.class);
-
- controllerContext.setSchemas(TestUtils.loadSchemaContext(modules));
-
- when(mockedBrokerFacade.commitConfigurationDataPut(any(YangInstanceIdentifier.class), any(NormalizedNode.class)))
- .thenReturn(mock(CheckedFuture.class));
-
- restconf.setControllerContext(controllerContext);
- restconf.setBroker(mockedBrokerFacade);
- }
-
public static String loadTextFile(final String filePath) throws IOException {
final FileReader fileReader = new FileReader(filePath);
final BufferedReader bufReader = new BufferedReader(fileReader);
public static YangInstanceIdentifier.NodeIdentifierWithPredicates getNodeIdentifierPredicate(final String localName,
final String namespace, final String revision, final String... keysAndValues) throws ParseException {
- final java.util.Date date = new SimpleDateFormat("yyyy-MM-dd").parse(revision);
if (keysAndValues.length % 2 != 0) {
new IllegalArgumentException("number of keys argument have to be divisible by 2 (map)");
}
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil.getRevisionFormat;
+
import java.io.FileNotFoundException;
import java.text.ParseException;
import java.util.Date;
+ scopeValue);
final UriInfo mockedUriInfo = mock(UriInfo.class);
+ @SuppressWarnings("unchecked")
final MultivaluedMap<String, String> mockedMultivaluedMap = mock(MultivaluedMap.class);
when(mockedMultivaluedMap.getFirst(eq("datastore"))).thenReturn(datastoreValue);
when(mockedMultivaluedMap.getFirst(eq("scope"))).thenReturn(scopeValue);
.withValue(scope)).build();
container.withChild(scopeNode);
- return new NormalizedNodeContext(new InstanceIdentifierContext(null, rpcInputSchemaNode, null, schema), container.build());
+ return new NormalizedNodeContext(new InstanceIdentifierContext<>(null, rpcInputSchemaNode, null, schema), container.build());
}
}
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
@Test
public void testToInstanceIdentifierList() throws FileNotFoundException {
- InstanceIdentifierContext instanceIdentifier = controllerContext
+ InstanceIdentifierContext<?> instanceIdentifier = controllerContext
.toInstanceIdentifier("simple-nodes:userWithoutClass/foo");
assertEquals(instanceIdentifier.getSchemaNode().getQName().getLocalName(), "userWithoutClass");
@Test
public void testToInstanceIdentifierContainer() throws FileNotFoundException {
- final InstanceIdentifierContext instanceIdentifier = controllerContext.toInstanceIdentifier("simple-nodes:users");
+ final InstanceIdentifierContext<?> instanceIdentifier = controllerContext.toInstanceIdentifier("simple-nodes:users");
assertEquals(instanceIdentifier.getSchemaNode().getQName().getLocalName(), "users");
assertTrue(instanceIdentifier.getSchemaNode() instanceof ContainerSchemaNode);
assertEquals(2, ((ContainerSchemaNode) instanceIdentifier.getSchemaNode()).getChildNodes().size());
@Test
@Ignore //jenkins has problem with JerseyTest - we expecting problems with singletons ControllerContext as schemaContext holder
public void testToInstanceIdentifierChoice() throws FileNotFoundException {
- final InstanceIdentifierContext instanceIdentifier = controllerContext
+ final InstanceIdentifierContext<?> instanceIdentifier = controllerContext
.toInstanceIdentifier("simple-nodes:food/nonalcoholic");
assertEquals(instanceIdentifier.getSchemaNode().getQName().getLocalName(), "nonalcoholic");
}
@Test
public void testMountPointWithExternModul() throws FileNotFoundException {
initMountService(true);
- final InstanceIdentifierContext instanceIdentifier = controllerContext
+ final InstanceIdentifierContext<?> instanceIdentifier = controllerContext
.toInstanceIdentifier("simple-nodes:users/yang-ext:mount/test-interface2:class/student/name");
assertEquals(
"[(urn:ietf:params:xml:ns:yang:test-interface2?revision=2014-08-01)class, (urn:ietf:params:xml:ns:yang:test-interface2?revision=2014-08-01)student, (urn:ietf:params:xml:ns:yang:test-interface2?revision=2014-08-01)student[{(urn:ietf:params:xml:ns:yang:test-interface2?revision=2014-08-01)name=name}]]",
@Test
public void testMountPointWithoutExternModul() throws FileNotFoundException {
initMountService(true);
- final InstanceIdentifierContext instanceIdentifier = controllerContext
+ final InstanceIdentifierContext<?> instanceIdentifier = controllerContext
.toInstanceIdentifier("simple-nodes:users/yang-ext:mount/");
assertTrue(Iterables.isEmpty(instanceIdentifier.getInstanceIdentifier().getPathArguments()));
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import org.junit.Test;
-
-public class CompareLf {
-
- @Test
- public void test() {
- Lf lf1 = new Lf("name", "value");
- Lf lf2 = new Lf("name", "value");
- Lf lf3 = new Lf("name1", "value");
- Lf lf4 = new Lf("name", "value1");
-
- assertTrue(lf1.equals(lf2));
- assertFalse(lf1.equals(lf3));
- assertFalse(lf1.equals(lf4));
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-public class Cont extends LstItem {
- String name = null;
-
- public Cont(String name) {
- super();
- this.name = name;
- }
-
- public String getName() {
- return name;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!this.getClass().equals(obj.getClass())) {
- return false;
- }
- if (!super.equals(obj)) {
- return false;
- }
- Cont cont = (Cont) obj;
- if (this.name == null) {
- if (cont.name != null) {
- return false;
- }
- } else if (!this.name.equals(cont.name)) {
- return false;
- }
- return true;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + ((this.name == null) ? 0 : this.name.hashCode());
- return result;
- }
-
- @Override
- public String toString() {
- return name;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-public class Lf extends YangElement {
- private Object value;
- private int numOfEqualItems = 0;
-
- public Lf(String name, Object value) {
- super(name);
- this.value = value;
- }
-
- public Lf(Object value) {
- super("");
- this.value = value;
- }
-
- public Object getValue() {
- return value;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!this.getClass().equals(obj.getClass())) {
- return false;
- }
- if (!super.equals(obj)) {
- return false;
- }
- Lf lf = (Lf) obj;
- if (this.value == null) {
- if (lf.value != null) {
- return false;
- }
- } else if (!this.value.equals(lf.value)) {
- return false;
- }
- if (this.numOfEqualItems != lf.numOfEqualItems) {
- return false;
- }
- return true;
- }
-
- public void incNumOfEqualItems() {
- this.numOfEqualItems++;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + ((value == null) ? 0 : value.hashCode());
- result = prime * result + numOfEqualItems;
- return result;
- }
-
- @Override
- public String toString() {
- return super.toString() + ":" + value;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-import java.util.HashSet;
-import java.util.Set;
-
-public class LfLst extends YangElement {
- Set<Lf> lfs;
-
- public LfLst(String name) {
- super(name);
- lfs = new HashSet<>();
- }
-
- public LfLst addLf(Object value) {
- return addLf(new Lf(value));
- }
-
- public LfLst addLf(Lf lf) {
- while (this.lfs.contains(lf)) {
- lf.incNumOfEqualItems();
- }
- this.lfs.add(lf);
- return this;
- }
-
- public Set<Lf> getLfs() {
- return lfs;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!this.getClass().equals(obj.getClass())) {
- return false;
- }
- if (!super.equals(obj)) {
- return false;
- }
- LfLst lfLst = (LfLst) obj;
- if (this.lfs == null) {
- if (lfLst.lfs != null) {
- return false;
- }
- } else if (!this.lfs.equals(lfLst.lfs)) {
- return false;
- }
- return true;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + ((lfs == null) ? 0 : lfs.hashCode());
- return result;
- }
-
- @Override
- public String toString() {
-
- return super.toString();
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-import java.util.HashSet;
-import java.util.Set;
-
-public class Lst extends YangElement {
- private final Set<LstItem> lstItems;
-
- public Lst(final String name) {
- super(name);
- lstItems = new HashSet<>();
- }
-
- public Lst addLstItem(final LstItem lstItem) {
- lstItem.setLstName(name);
- while (this.lstItems.contains(lstItem)) {
- lstItem.incNumOfEqualItems();
- }
- this.lstItems.add(lstItem);
- return this;
- }
-
- public Set<LstItem> getLstItems() {
- return lstItems;
- }
-
- @Override
- public boolean equals(final Object obj) {
- if (this == obj) {
- return true;
- }
- if (!this.getClass().equals(obj.getClass())) {
- return false;
- }
- if (!super.equals(obj)) {
- return false;
- }
- Lst lst = (Lst) obj;
- if (this.lstItems == null) {
- if (lst.lstItems != null) {
- return false;
- }
- } else if (!this.lstItems.equals(lst.lstItems)) {
- return false;
- }
- return true;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + ((lstItems == null) ? 0 : lstItems.hashCode());
- return result;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-import static org.junit.Assert.assertFalse;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class LstItem {
- String lstName;
- Map<String, Lf> lfs;
- Map<String, LfLst> lfLsts;
- Map<String, Lst> lsts;
- Map<String, Cont> conts;
- private int numOfEqualItems = 0;
-
- public LstItem() {
- lfs = new HashMap<>();
- conts = new HashMap<>();
- lfLsts = new HashMap<>();
- lsts = new HashMap<>();
- }
-
- public Map<String, Lst> getLsts() {
- return lsts;
- }
-
- public Map<String, Cont> getConts() {
- return conts;
- }
-
- public Map<String, LfLst> getLfLsts() {
- return lfLsts;
- }
-
- public Map<String, Lf> getLfs() {
- return lfs;
- }
-
- public String getLstName() {
- return lstName;
- }
-
- public LstItem addLf(Lf lf) {
- lfs.put(lf.getName(), lf);
- return this;
- }
-
- public LstItem addLf(String name, Object value) {
- lfs.put(name, new Lf(name, value));
- return this;
- }
-
- public void addLfLst(LfLst lfLst) {
- assertFalse("Found multiple leaf list elements for " + lfLst.getName(), lfLsts.containsKey(lfLst.getName()));
- lfLsts.put(lfLst.getName(), lfLst);
- }
-
- public void addLst(Lst lst) {
- assertFalse("Found multiple list elements for " + lst.getName(), lsts.containsKey(lst.getName()));
- lsts.put(lst.getName(), lst);
- }
-
- public void addCont(Cont cont) {
- conts.put(cont.getName(), cont);
- }
-
- public void incNumOfEqualItems() {
- this.numOfEqualItems++;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!this.getClass().equals(obj.getClass())) {
- return false;
- }
- LstItem lstItem = (LstItem) obj;
- if (this.conts == null) {
- if (lstItem.conts != null) {
- return false;
- }
- } else if (!this.conts.equals(lstItem.conts)) {
- return false;
- }
- if (this.lfs == null) {
- if (lstItem.lfs != null) {
- return false;
- }
- } else if (!this.lfs.equals(lstItem.lfs)) {
- return false;
- }
- if (this.lfLsts == null) {
- if (lstItem.lfLsts != null) {
- return false;
- }
- } else if (!this.lfLsts.equals(lstItem.lfLsts)) {
- return false;
- }
- if (this.lsts == null) {
- if (lstItem.lsts != null) {
- return false;
- }
- } else if (!this.lsts.equals(lstItem.lsts)) {
- return false;
- }
- if (this.numOfEqualItems != lstItem.numOfEqualItems) {
- return false;
- }
- return true;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((lfs == null) ? 0 : lfs.hashCode());
- result = prime * result + ((lfLsts == null) ? 0 : lfLsts.hashCode());
- result = prime * result + ((lsts == null) ? 0 : lsts.hashCode());
- result = prime * result + ((conts == null) ? 0 : conts.hashCode());
- result = prime * result + numOfEqualItems;
- return result;
- }
-
- @Override
- public String toString() {
- return "lst item of " + lstName;
- }
-
- public void setLstName(String name) {
- this.lstName = name;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.test.structures;
-
-public class YangElement {
- protected String name;
-
- protected YangElement(String name) {
- this.name = name;
- }
-
- public String getName() {
- return name;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!this.getClass().equals(obj.getClass())) {
- return false;
- }
- YangElement yangElement = (YangElement) obj;
- if (this.name == null) {
- if (yangElement.name != null) {
- return false;
- }
- } else if (!this.name.equals(yangElement.name)) {
- return false;
- }
- return true;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((name == null) ? 0 : name.hashCode());
- return result;
- }
-
- @Override
- public String toString() {
- return name;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl.xml.to.cnsn.test;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class XmlLeafrefToCnSnTest {
- private static final Logger LOG = LoggerFactory.getLogger(XmlLeafrefToCnSnTest.class);
-
-}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-docgen-maven</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-docgen</artifactId>
<param-name>javax.ws.rs.Application</param-name>
<param-value>org.opendaylight.controller.sal.rest.doc.jaxrs.ApiDocApplication</param-value>
</init-param>
+ <!-- AAA Auth Filter -->
+ <init-param>
+ <param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
+ <param-value> org.opendaylight.aaa.sts.TokenAuthFilter</param-value>
+ </init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<security-constraint>
<web-resource-collection>
- <web-resource-name>free access</web-resource-name>
- <url-pattern>/explorer/css/*</url-pattern>
- <url-pattern>/explorer/images/*</url-pattern>
- <url-pattern>/explorer/lib/*</url-pattern>
- <url-pattern>/explorer/*</url-pattern>
+ <web-resource-name>API Doc</web-resource-name>
+ <url-pattern>/*</url-pattern>
</web-resource-collection>
</security-constraint>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sal-restconf-broker</artifactId>
<packaging>bundle</packaging>
<parent>
<artifactId>sal-parent</artifactId>
<groupId>org.opendaylight.controller</groupId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>clustering-it</artifactId>
<groupId>org.opendaylight.controller.samples</groupId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it-config</artifactId>
<packaging>jar</packaging>
<parent>
<artifactId>clustering-it</artifactId>
<groupId>org.opendaylight.controller.samples</groupId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it-model</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it</artifactId>
<packaging>pom</packaging>
<parent>
<artifactId>clustering-it</artifactId>
<groupId>org.opendaylight.controller.samples</groupId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it-provider</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.samples</groupId>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>toaster-config</artifactId>
<description>Configuration files for toaster</description>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster-consumer</artifactId>
<packaging>bundle</packaging>
<properties>
- <sal-binding-api.version>1.2.0-SNAPSHOT</sal-binding-api.version>
+ <sal-binding-api.version>1.3.0-SNAPSHOT</sal-binding-api.version>
</properties>
<dependencies>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster-it</artifactId>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster-provider</artifactId>
<packaging>bundle</packaging>
<properties>
- <sal-binding-api.version>1.2.0-SNAPSHOT</sal-binding-api.version>
+ <sal-binding-api.version>1.3.0-SNAPSHOT</sal-binding-api.version>
</properties>
<dependencies>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.3.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>config-netconf-connector</artifactId>
<packaging>bundle</packaging>
}
Date revision = module.getRevision();
- Preconditions.checkState(!revisionsByNamespace.containsKey(revision),
- "Duplicate revision %s for namespace %s", revision, namespace);
IdentityMapping identityMapping = revisionsByNamespace.get(revision);
if(identityMapping == null) {
}
public static Map<String/* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleConfig>> transformMbeToModuleConfigs
- (final BeanReader configRegistryClient, Map<String/* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> mBeanEntries) {
+ Map<String /* Name of module entry from yang file */, ModuleConfig>> transformMbeToModuleConfigs (
+ final BeanReader configRegistryClient, Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
}
public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
- if(ref.get() == null || ref.get().get() == null) {
- getYangStoreSnapshot();
+
+ YangStoreContext context = ref.get().get();
+
+ if(context == null) {
+ context = getYangStoreSnapshot();
}
this.listeners.add(listener);
- listener.onCapabilitiesAdded(NetconfOperationServiceFactoryImpl.setupCapabilities(ref.get().get()));
+ listener.onCapabilitiesAdded(NetconfOperationServiceFactoryImpl.setupCapabilities(context));
return new AutoCloseable() {
@Override
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.NoSuchElementException;
import java.util.Set;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleIdentifierImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private final Map<String /* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> moduleMXBeanEntryMap;
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> moduleMXBeanEntryMap;
private final Map<QName, Map<String, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries;
@Override
public Set<Module> getModules() {
- return schemaContext.getModules();
+ final Set<Module> modules = Sets.newHashSet(schemaContext.getModules());
+ for (final Module module : schemaContext.getModules()) {
+ modules.addAll(module.getSubmodules());
+ }
+ return modules;
}
@Override
public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
- return schemaContext.getModuleSource(moduleIdentifier).get();
+ final Optional<String> moduleSource = schemaContext.getModuleSource(moduleIdentifier);
+ if(moduleSource.isPresent()) {
+ return moduleSource.get();
+ } else {
+ try {
+ return Iterables.find(getModules(), new Predicate<Module>() {
+ @Override
+ public boolean apply(final Module input) {
+ final ModuleIdentifierImpl id = new ModuleIdentifierImpl(input.getName(), Optional.fromNullable(input.getNamespace()), Optional.fromNullable(input.getRevision()));
+ return id.equals(moduleIdentifier);
+ }
+ }).getSource();
+ } catch (final NoSuchElementException e) {
+ throw new IllegalArgumentException("Source for yang module " + moduleIdentifier + " not found", e);
+ }
+ }
}
@Override
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
+import io.netty.channel.Channel;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreService;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.NetconfServerSessionListener;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
private void closeSession() throws NetconfDocumentedException, ParserConfigurationException, SAXException,
IOException {
+ final Channel channel = mock(Channel.class);
+ doReturn("channel").when(channel).toString();
+ final NetconfServerSessionListener listener = mock(NetconfServerSessionListener.class);
+ final NetconfServerSession session =
+ new NetconfServerSession(listener, channel, 1L,
+ NetconfHelloMessageAdditionalHeader.fromString("[netconf;10.12.0.102:48528;ssh;;;;;;]"));
DefaultCloseSession closeOp = new DefaultCloseSession(NETCONF_SESSION_ID, sessionCloseable);
+ closeOp.setNetconfSession(session);
executeOp(closeOp, "netconfMessages/closeSession.xml");
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>config-persister-impl</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>ietf-netconf-monitoring-extension</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>ietf-netconf-monitoring</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>ietf-netconf-notifications</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>ietf-netconf</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>mdsal-netconf-connector</artifactId>
<packaging>bundle</packaging>
<artifactId>xmlunit</artifactId>
<scope>test</scope>
</dependency>
-
</dependencies>
<build>
import org.opendaylight.controller.netconf.mdsal.connector.MdsalNetconfOperationServiceFactory;
-public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule {
+public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule{
public NetconfMdsalMapperModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
@Override
public java.lang.AutoCloseable createInstance() {
- final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory = new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency(), getDomBrokerDependency()) {
- @Override
- public void close() throws Exception {
- super.close();
- getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
- }
- };
+ final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory =
+ new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency()) {
+ @Override
+ public void close() throws Exception {
+ super.close();
+ getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
+ }
+ };
+ getDomBrokerDependency().registerConsumer(mdsalNetconfOperationServiceFactory);
getMapperAggregatorDependency().onAddNetconfOperationServiceFactory(mdsalNetconfOperationServiceFactory);
return mdsalNetconfOperationServiceFactory;
}
import java.util.Set;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
private final OperationProvider operationProvider;
public MdsalNetconfOperationService(final CurrentSchemaContext schemaContext, final String netconfSessionIdForReporting,
- final DOMDataBroker dataBroker) {
- this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker);
+ final DOMDataBroker dataBroker, final DOMRpcService rpcService) {
+ this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker, rpcService);
}
@Override
package org.opendaylight.controller.netconf.mdsal.connector;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.util.capability.BasicCapability;
import org.opendaylight.controller.netconf.util.capability.YangModuleCapability;
+import org.opendaylight.controller.sal.core.api.Broker.ConsumerSession;
+import org.opendaylight.controller.sal.core.api.Consumer;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, AutoCloseable {
+public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, Consumer, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(MdsalNetconfOperationServiceFactory.class);
- private final DOMDataBroker dataBroker;
+ private ConsumerSession session = null;
+ private DOMDataBroker dataBroker = null;
+ private DOMRpcService rpcService = null;
private final CurrentSchemaContext currentSchemaContext;
- public MdsalNetconfOperationServiceFactory(final SchemaService schemaService, final DOMDataBroker domDataBroker) {
+ public MdsalNetconfOperationServiceFactory(final SchemaService schemaService) {
this.currentSchemaContext = new CurrentSchemaContext(Preconditions.checkNotNull(schemaService));
- this.dataBroker = Preconditions.checkNotNull(domDataBroker);
}
@Override
public MdsalNetconfOperationService createService(final String netconfSessionIdForReporting) {
- return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker);
+ Preconditions.checkState(dataBroker != null, "MD-SAL provider not yet initialized");
+ return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker, rpcService);
}
@Override
return transformCapabilities(currentSchemaContext.getCurrentContext());
}
- static Set<Capability> transformCapabilities(final SchemaContext currentContext1) {
+ static Set<Capability> transformCapabilities(final SchemaContext currentContext) {
final Set<Capability> capabilities = new HashSet<>();
// [RFC6241] 8.3. Candidate Configuration Capability
capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
- final SchemaContext currentContext = currentContext1;
final Set<Module> modules = currentContext.getModules();
for (final Module module : modules) {
- if(currentContext.getModuleSource(module).isPresent()) {
- capabilities.add(new YangModuleCapability(module, currentContext.getModuleSource(module).get()));
- } else {
- LOG.warn("Missing source for module {}. This module will not be available from netconf server",
- module);
+ Optional<YangModuleCapability> cap = moduleToCapability(module);
+ if(cap.isPresent()) {
+ capabilities.add(cap.get());
+ }
+ for (final Module submodule : module.getSubmodules()) {
+ cap = moduleToCapability(submodule);
+ if(cap.isPresent()) {
+ capabilities.add(cap.get());
+ }
}
}
return capabilities;
}
+ private static Optional<YangModuleCapability> moduleToCapability(final Module module) {
+ final String source = module.getSource();
+ if(source !=null) {
+ return Optional.of(new YangModuleCapability(module, source));
+ } else {
+ LOG.warn("Missing source for module {}. This module will not be available from netconf server",
+ module);
+ }
+ return Optional.absent();
+ }
+
@Override
public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
return currentSchemaContext.registerCapabilityListener(listener);
}
+ @Override
+ public void onSessionInitiated(ConsumerSession session) {
+ this.session = Preconditions.checkNotNull(session);
+ this.dataBroker = this.session.getService(DOMDataBroker.class);
+ this.rpcService = this.session.getService(DOMRpcService.class);
+ }
+
+ @Override
+ public Collection<ConsumerFunctionality> getConsumerFunctionality() {
+ return Collections.emptySet();
+ }
}
import com.google.common.collect.Sets;
import java.util.Set;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Commit;
import org.opendaylight.controller.netconf.mdsal.connector.ops.DiscardChanges;
import org.opendaylight.controller.netconf.mdsal.connector.ops.EditConfig;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Lock;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.RuntimeRpc;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Unlock;
import org.opendaylight.controller.netconf.mdsal.connector.ops.get.Get;
import org.opendaylight.controller.netconf.mdsal.connector.ops.get.GetConfig;
private final String netconfSessionIdForReporting;
private final CurrentSchemaContext schemaContext;
private final DOMDataBroker dataBroker;
+ private final DOMRpcService rpcService;
private final TransactionProvider transactionProvider;
- public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final DOMDataBroker dataBroker) {
+ public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext,
+ final DOMDataBroker dataBroker, final DOMRpcService rpcService) {
this.netconfSessionIdForReporting = netconfSessionIdForReporting;
this.schemaContext = schemaContext;
this.dataBroker = dataBroker;
- this.transactionProvider = new TransactionProvider(dataBroker, netconfSessionIdForReporting);
-
+ this.rpcService = rpcService;
+ this.transactionProvider = new TransactionProvider(this.dataBroker, netconfSessionIdForReporting);
}
Set<NetconfOperation> getOperations() {
new Get(netconfSessionIdForReporting, schemaContext, transactionProvider),
new GetConfig(netconfSessionIdForReporting, schemaContext, transactionProvider),
new Lock(netconfSessionIdForReporting),
- new Unlock(netconfSessionIdForReporting)
+ new Unlock(netconfSessionIdForReporting),
+ new RuntimeRpc(netconfSessionIdForReporting, schemaContext, rpcService)
);
}
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class Commit extends AbstractLastNetconfOperation{
+public class Commit extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(Commit.class);
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class DiscardChanges extends AbstractLastNetconfOperation{
+public class DiscardChanges extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(DiscardChanges.class);
protected String getOperationName() {
return OPERATION_NAME;
}
+
}
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
-import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.ModifyAction;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
-public class EditConfig extends AbstractLastNetconfOperation {
+public class EditConfig extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(EditConfig.class);
LOG.debug("DataNode from module is not ContainerSchemaNode nor ListSchemaNode, aborting..");
return Optional.absent();
}
-
}
private Datastore extractTargetParameter(final XmlElement operationElement) throws NetconfDocumentedException {
- final XmlElement targetChildNode;
- try {
- final XmlElement targetElement = operationElement.getOnlyChildElementWithSameNamespace(TARGET_KEY);
- targetChildNode = targetElement.getOnlyChildElementWithSameNamespace();
- } catch (final MissingNameSpaceException | UnexpectedNamespaceException e) {
- LOG.trace("Can't get only child element with same namespace", e);
- throw NetconfDocumentedException.wrap(e);
+ final NodeList elementsByTagName = operationElement.getDomElement().getElementsByTagName(TARGET_KEY);
+ // Direct lookup instead of using XmlElement class due to performance
+ if (elementsByTagName.getLength() == 0) {
+ throw new NetconfDocumentedException("Missing target element", ErrorType.rpc, ErrorTag.missing_attribute, ErrorSeverity.error);
+ } else if (elementsByTagName.getLength() > 1) {
+ throw new NetconfDocumentedException("Multiple target elements", ErrorType.rpc, ErrorTag.unknown_attribute, ErrorSeverity.error);
+ } else {
+ final XmlElement targetChildNode = XmlElement.fromDomElement((Element) elementsByTagName.item(0)).getOnlyChildElement();
+ return Datastore.valueOf(targetChildNode.getName());
}
-
- return Datastore.valueOf(targetChildNode.getName());
}
- private ModifyAction getDefaultOperation(final XmlElement operationElement) throws NetconfDocumentedException{
- try {
- return ModifyAction.fromXmlValue(getElement(operationElement, DEFAULT_OPERATION_KEY).getTextContent());
- } catch (NetconfDocumentedException e) {
- if (e.getErrorType() == ErrorType.protocol
- && e.getErrorSeverity() == ErrorSeverity.error
- && e.getErrorTag() == ErrorTag.missing_element) {
- return ModifyAction.MERGE;
- }
- else {
- throw e;
- }
+ private ModifyAction getDefaultOperation(final XmlElement operationElement) throws NetconfDocumentedException {
+ final NodeList elementsByTagName = operationElement.getDomElement().getElementsByTagName(DEFAULT_OPERATION_KEY);
+ if(elementsByTagName.getLength() == 0) {
+ return ModifyAction.MERGE;
+ } else if(elementsByTagName.getLength() > 1) {
+ throw new NetconfDocumentedException("Multiple " + DEFAULT_OPERATION_KEY + " elements",
+ ErrorType.rpc, ErrorTag.unknown_attribute, ErrorSeverity.error);
+ } else {
+ return ModifyAction.fromXmlValue(elementsByTagName.item(0).getTextContent());
}
+
}
private XmlElement getElement(final XmlElement operationElement, String elementName) throws NetconfDocumentedException {
protected String getOperationName() {
return OPERATION_NAME;
}
+
}
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class Lock extends AbstractLastNetconfOperation{
+public class Lock extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(Lock.class);
protected String getOperationName() {
return OPERATION_NAME;
}
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import javax.annotation.Nullable;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.OrderedNormalizedNodeWriter;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Attr;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+public class RuntimeRpc extends AbstractSingletonNetconfOperation {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpc.class);
+
+ private final CurrentSchemaContext schemaContext;
+ private static final XMLOutputFactory XML_OUTPUT_FACTORY;
+
+ static {
+ XML_OUTPUT_FACTORY = XMLOutputFactory.newFactory();
+ XML_OUTPUT_FACTORY.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
+ }
+
+ private final DOMRpcService rpcService;
+
+ public RuntimeRpc(final String netconfSessionIdForReporting, CurrentSchemaContext schemaContext, DOMRpcService rpcService) {
+ super(netconfSessionIdForReporting);
+ this.schemaContext = schemaContext;
+ this.rpcService = rpcService;
+ }
+
+ @Override
+ protected HandlingPriority canHandle(final String netconfOperationName, final String namespace) {
+ final URI namespaceURI = createNsUri(namespace);
+ final Optional<Module> module = getModule(namespaceURI);
+
+ if (!module.isPresent()) {
+ LOG.debug("Cannot handle rpc: {}, {}", netconfOperationName, namespace);
+ return HandlingPriority.CANNOT_HANDLE;
+ }
+
+ getRpcDefinitionFromModule(module.get(), namespaceURI, netconfOperationName);
+ return HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY;
+
+ }
+
+ @Override
+ protected String getOperationName() {
+ throw new UnsupportedOperationException("Runtime rpc does not have a stable name");
+ }
+
+ private URI createNsUri(final String namespace) {
+ final URI namespaceURI;
+ try {
+ namespaceURI = new URI(namespace);
+ } catch (URISyntaxException e) {
+ // Cannot occur, namespace in parsed XML cannot be invalid URI
+ throw new IllegalStateException("Unable to parse URI " + namespace, e);
+ }
+ return namespaceURI;
+ }
+
+ //this returns module with the newest revision if more then 1 module with same namespace is found
+ private Optional<Module> getModule(final URI namespaceURI) {
+ return Optional.fromNullable(schemaContext.getCurrentContext().findModuleByNamespaceAndRevision(namespaceURI, null));
+ }
+
+ private Optional<RpcDefinition> getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
+ for (RpcDefinition rpcDef : module.getRpcs()) {
+ if (rpcDef.getQName().getNamespace().equals(namespaceURI)
+ && rpcDef.getQName().getLocalName().equals(name)) {
+ return Optional.of(rpcDef);
+ }
+ }
+ return Optional.absent();
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+
+ final String netconfOperationName = operationElement.getName();
+ final String netconfOperationNamespace;
+ try {
+ netconfOperationNamespace = operationElement.getNamespace();
+ } catch (MissingNameSpaceException e) {
+ LOG.debug("Cannot retrieve netconf operation namespace from message due to ", e);
+ throw new NetconfDocumentedException("Cannot retrieve netconf operation namespace from message",
+ ErrorType.protocol, ErrorTag.unknown_namespace, ErrorSeverity.error);
+ }
+
+ final URI namespaceURI = createNsUri(netconfOperationNamespace);
+ final Optional<Module> moduleOptional = getModule(namespaceURI);
+
+ if (!moduleOptional.isPresent()) {
+ throw new NetconfDocumentedException("Unable to find module in Schema Context with namespace and name : " +
+ namespaceURI + " " + netconfOperationName + schemaContext.getCurrentContext(),
+ ErrorType.application, ErrorTag.bad_element, ErrorSeverity.error);
+ }
+
+ final Optional<RpcDefinition> rpcDefinitionOptional = getRpcDefinitionFromModule(moduleOptional.get(), namespaceURI, netconfOperationName);
+
+ if (!rpcDefinitionOptional.isPresent()) {
+ throw new NetconfDocumentedException("Unable to find RpcDefinition with namespace and name : " + namespaceURI + " " + netconfOperationName,
+ ErrorType.application, ErrorTag.bad_element, ErrorSeverity.error);
+ }
+
+ final RpcDefinition rpcDefinition = rpcDefinitionOptional.get();
+ final SchemaPath schemaPath = SchemaPath.create(Collections.singletonList(rpcDefinition.getQName()), true);
+ final NormalizedNode<?, ?> inputNode = rpcToNNode(operationElement, rpcDefinition.getInput());
+
+ final CheckedFuture<DOMRpcResult, DOMRpcException> rpcFuture = rpcService.invokeRpc(schemaPath, inputNode);
+ try {
+ final DOMRpcResult result = rpcFuture.checkedGet();
+ if (result.getResult() == null) {
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
+ }
+ return (Element) transformNormalizedNode(document, result.getResult(), rpcDefinition.getOutput().getPath());
+ } catch (DOMRpcException e) {
+ throw NetconfDocumentedException.wrap(e);
+ }
+ }
+
+ @Override
+ public Document handle(final Document requestMessage,
+ final NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+
+ final XmlElement requestElement = getRequestElementWithCheck(requestMessage);
+
+ final Document document = XmlUtil.newDocument();
+
+ final XmlElement operationElement = requestElement.getOnlyChildElement();
+ final Map<String, Attr> attributes = requestElement.getAttributes();
+
+ final Element response = handle(document, operationElement, subsequentOperation);
+ final Element rpcReply = XmlUtil.createElement(document, XmlNetconfConstants.RPC_REPLY_KEY, Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
+
+ if(XmlElement.fromDomElement(response).hasNamespace()) {
+ rpcReply.appendChild(response);
+ } else {
+ final NodeList list = response.getChildNodes();
+ if (list.getLength() == 0) {
+ rpcReply.appendChild(response);
+ } else {
+ while (list.getLength() != 0) {
+ rpcReply.appendChild(list.item(0));
+ }
+ }
+ }
+
+ for (Attr attribute : attributes.values()) {
+ rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
+ }
+ document.appendChild(rpcReply);
+ return document;
+ }
+
+ //TODO move all occurences of this method in mdsal netconf(and xml factories) to a utility class
+ private Node transformNormalizedNode(final Document document, final NormalizedNode<?, ?> data, final SchemaPath rpcOutputPath) {
+ final DOMResult result = new DOMResult(document.createElement(XmlNetconfConstants.RPC_REPLY_KEY));
+
+ final XMLStreamWriter xmlWriter = getXmlStreamWriter(result);
+
+ final NormalizedNodeStreamWriter nnStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(xmlWriter,
+ schemaContext.getCurrentContext(), rpcOutputPath);
+
+ final OrderedNormalizedNodeWriter nnWriter = new OrderedNormalizedNodeWriter(nnStreamWriter, schemaContext.getCurrentContext(), rpcOutputPath);
+
+ writeRootElement(xmlWriter, nnWriter, (ContainerNode) data);
+ try {
+ nnStreamWriter.close();
+ xmlWriter.close();
+ } catch (IOException | XMLStreamException e) {
+ LOG.warn("Error while closing streams", e);
+ }
+
+ return result.getNode();
+ }
+
+ private XMLStreamWriter getXmlStreamWriter(final DOMResult result) {
+ try {
+ return XML_OUTPUT_FACTORY.createXMLStreamWriter(result);
+ } catch (final XMLStreamException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void writeRootElement(final XMLStreamWriter xmlWriter, final OrderedNormalizedNodeWriter nnWriter, final ContainerNode data) {
+ try {
+ Collection<DataContainerChild<?, ?>> value = (Collection) data.getValue();
+ nnWriter.write(value);
+ nnWriter.flush();
+ xmlWriter.flush();
+ } catch (XMLStreamException | IOException e) {
+ Throwables.propagate(e);
+ }
+ }
+
+ /**
+ * Parses xml element rpc input into normalized node or null if rpc does not take any input
+ * @param oElement rpc xml element
+ * @param input input container schema node, or null if rpc does not take any input
+ * @return parsed rpc into normalized node, or null if input schema is null
+ */
+ @Nullable
+ private NormalizedNode<?, ?> rpcToNNode(final XmlElement oElement, @Nullable final ContainerSchemaNode input) {
+ return input == null ? null : DomToNormalizedNodeParserFactory
+ .getInstance(DomUtils.defaultValueCodecProvider(), schemaContext.getCurrentContext())
+ .getContainerNodeParser()
+ .parse(Collections.singletonList(oElement.getDomElement()), input);
+ }
+
+}
import com.google.common.base.Optional;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class Unlock extends AbstractLastNetconfOperation{
+public class Unlock extends AbstractSingletonNetconfOperation {
private static final Logger LOG = LoggerFactory.getLogger(Unlock.class);
package org.opendaylight.controller.netconf.mdsal.connector.ops.get;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Throwables;
import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
import javax.xml.transform.dom.DOMResult;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
+import org.w3c.dom.Element;
import org.w3c.dom.Node;
-public abstract class AbstractGet extends AbstractLastNetconfOperation {
+public abstract class AbstractGet extends AbstractSingletonNetconfOperation {
- protected static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractGet.class);
+ protected static final String FILTER = "filter";
+ static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
protected final CurrentSchemaContext schemaContext;
-
public AbstractGet(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext) {
super(netconfSessionIdForReporting);
this.schemaContext = schemaContext;
return result.getNode();
}
+
private XMLStreamWriter getXmlStreamWriter(final DOMResult result) {
try {
return XML_OUTPUT_FACTORY.createXMLStreamWriter(result);
// TODO this code is located in Restconf already
private void writeRootElement(final XMLStreamWriter xmlWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) {
try {
- for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
- nnWriter.write(child);
+ if (data.getNodeType().equals(SchemaContext.NAME)) {
+ for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
+ nnWriter.write(child);
+ }
+ } else {
+ nnWriter.write(data);
}
nnWriter.flush();
xmlWriter.flush();
}
}
+ private DataSchemaNode getSchemaNodeFromNamespace(final XmlElement element) throws NetconfDocumentedException {
+
+ try {
+ final Module module = schemaContext.getCurrentContext().findModuleByNamespaceAndRevision(new URI(element.getNamespace()), null);
+ DataSchemaNode dataSchemaNode = module.getDataChildByName(element.getName());
+ if (dataSchemaNode != null) {
+ return dataSchemaNode;
+ }
+ } catch (URISyntaxException e) {
+ LOG.debug("Error during parsing of element namespace, this should not happen since namespace of an xml " +
+ "element is valid and if the xml was parsed then the URI should be as well");
+ throw new IllegalArgumentException("Unable to parse element namespace, this should not happen since " +
+ "namespace of an xml element is valid and if the xml was parsed then the URI should be as well");
+ }
+ throw new NetconfDocumentedException("Unable to find node with namespace: " + element.getNamespace() + "in schema context: " + schemaContext.getCurrentContext().toString(),
+ ErrorType.application,
+ ErrorTag.unknown_namespace,
+ ErrorSeverity.error);
+ }
+
+ protected Element serializeNodeWithParentStructure(Document document, YangInstanceIdentifier dataRoot, NormalizedNode node) {
+ if (!dataRoot.equals(ROOT)) {
+ return (Element) transformNormalizedNode(document,
+ ImmutableNodes.fromInstanceId(schemaContext.getCurrentContext(), dataRoot, node),
+ ROOT);
+ }
+ return (Element) transformNormalizedNode(document, node, ROOT);
+ }
+
+ /**
+ *
+ * @param operationElement operation element
+ * @return if Filter is present and not empty returns Optional of the InstanceIdentifier to the read location in datastore.
+ * empty filter returns Optional.absent() which should equal an empty <data/> container in the response.
+ * if filter is not present we want to read the entire datastore - return ROOT.
+ * @throws NetconfDocumentedException
+ */
+ protected Optional<YangInstanceIdentifier> getDataRootFromFilter(XmlElement operationElement) throws NetconfDocumentedException {
+ Optional<XmlElement> filterElement = operationElement.getOnlyChildElementOptionally(FILTER);
+ if (filterElement.isPresent()) {
+ if (filterElement.get().getChildElements().size() == 0) {
+ return Optional.absent();
+ }
+ return Optional.of(getInstanceIdentifierFromFilter(filterElement.get()));
+ } else {
+ return Optional.of(ROOT);
+ }
+ }
+
+ @VisibleForTesting
+ protected YangInstanceIdentifier getInstanceIdentifierFromFilter(XmlElement filterElement) throws NetconfDocumentedException {
+
+ if (filterElement.getChildElements().size() != 1) {
+ throw new NetconfDocumentedException("Multiple filter roots not supported yet",
+ ErrorType.application, ErrorTag.operation_not_supported, ErrorSeverity.error);
+ }
+
+ XmlElement element = filterElement.getOnlyChildElement();
+ DataSchemaNode schemaNode = getSchemaNodeFromNamespace(element);
+
+ return getReadPointFromNode(YangInstanceIdentifier.builder().build(), filterToNormalizedNode(element, schemaNode));
+ }
+
+ private YangInstanceIdentifier getReadPointFromNode(final YangInstanceIdentifier pathArg, final NormalizedNode nNode) {
+ final YangInstanceIdentifier path = pathArg.node(nNode.getIdentifier());
+ if (nNode instanceof DataContainerNode) {
+ DataContainerNode node = (DataContainerNode) nNode;
+ if (node.getValue().size() == 1) {
+ return getReadPointFromNode(path, (NormalizedNode) Lists.newArrayList(node.getValue()).get(0));
+ }
+ }
+ return path;
+ }
+
+ private NormalizedNode filterToNormalizedNode(XmlElement element, DataSchemaNode schemaNode) throws NetconfDocumentedException {
+ DomToNormalizedNodeParserFactory parserFactory = DomToNormalizedNodeParserFactory
+ .getInstance(DomUtils.defaultValueCodecProvider(), schemaContext.getCurrentContext());
+
+ final NormalizedNode parsedNode;
+
+ if (schemaNode instanceof ContainerSchemaNode) {
+ parsedNode = parserFactory.getContainerNodeParser().parse(Collections.singletonList(element.getDomElement()), (ContainerSchemaNode) schemaNode);
+ } else if (schemaNode instanceof ListSchemaNode) {
+ parsedNode = parserFactory.getMapNodeParser().parse(Collections.singletonList(element.getDomElement()), (ListSchemaNode) schemaNode);
+ } else {
+ throw new NetconfDocumentedException("Schema node of the top level element is not an instance of container or list",
+ ErrorType.application, ErrorTag.unknown_element, ErrorSeverity.error);
+ }
+ return parsedNode;
+ }
+
protected static final class GetConfigExecution {
- private final Optional<Datastore> datastore;
+ private final Optional<Datastore> datastore;
public GetConfigExecution(final Optional<Datastore> datastore) {
this.datastore = datastore;
}
throw new NetconfDocumentedException("Get-config source attribute error: " + e.getMessage(), e.getErrorType(), e.getErrorTag(), e.getErrorSeverity(), e.getErrorInfo());
}
- // Add filter
-
return new GetConfigExecution(sourceDatastore);
}
xml.checkName(operationName);
xml.checkNamespace(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
}
+
}
}
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
private static final Logger LOG = LoggerFactory.getLogger(Get.class);
private static final String OPERATION_NAME = "get";
-
private final TransactionProvider transactionProvider;
public Get(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final TransactionProvider transactionProvider) {
@Override
protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
- final YangInstanceIdentifier dataRoot = ROOT;
+ final Optional<YangInstanceIdentifier> dataRootOptional = getDataRootFromFilter(operationElement);
+ if (!dataRootOptional.isPresent()) {
+ return XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ }
+
+ final YangInstanceIdentifier dataRoot = dataRootOptional.get();
+
DOMDataReadWriteTransaction rwTx = getTransaction(Datastore.running);
try {
final Optional<NormalizedNode<?, ?>> normalizedNodeOptional = rwTx.read(LogicalDatastoreType.OPERATIONAL, dataRoot).checkedGet();
transactionProvider.abortRunningTransaction(rwTx);
- return (Element) transformNormalizedNode(document, normalizedNodeOptional.get(), dataRoot);
+
+ if (!normalizedNodeOptional.isPresent()) {
+ return XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ }
+
+ return serializeNodeWithParentStructure(document, dataRoot, normalizedNodeOptional.get());
} catch (ReadFailedException e) {
LOG.warn("Unable to read data: {}", dataRoot, e);
throw new IllegalStateException("Unable to read data " + dataRoot, e);
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
private static final Logger LOG = LoggerFactory.getLogger(GetConfig.class);
private static final String OPERATION_NAME = "get-config";
-
private final TransactionProvider transactionProvider;
public GetConfig(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final TransactionProvider transactionProvider) {
throw e;
}
- final YangInstanceIdentifier dataRoot = ROOT;
+ final Optional<YangInstanceIdentifier> dataRootOptional = getDataRootFromFilter(operationElement);
+ if (!dataRootOptional.isPresent()) {
+ return XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ }
+
+ final YangInstanceIdentifier dataRoot = dataRootOptional.get();
+
// Proper exception should be thrown
Preconditions.checkState(getConfigExecution.getDatastore().isPresent(), "Source element missing from request");
if (getConfigExecution.getDatastore().get() == Datastore.running) {
transactionProvider.abortRunningTransaction(rwTx);
}
- return (Element) transformNormalizedNode(document, normalizedNodeOptional.get(), dataRoot);
+
+ if (!normalizedNodeOptional.isPresent()) {
+ return XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ }
+
+ return serializeNodeWithParentStructure(document, dataRoot, normalizedNodeOptional.get());
} catch (ReadFailedException e) {
LOG.warn("Unable to read data: {}", dataRoot, e);
throw new IllegalStateException("Unable to read data " + dataRoot, e);
container dom-broker {
uses config:service-ref {
refine type {
- mandatory false;
- config:required-identity md-sal-dom:dom-async-data-broker;
+ mandatory true;
+ config:required-identity md-sal-dom:dom-broker-osgi-registry;
}
}
}
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.EnumMap;
import java.util.List;
import java.util.concurrent.ExecutorService;
import org.opendaylight.controller.netconf.mdsal.connector.ops.get.Get;
import org.opendaylight.controller.netconf.mdsal.connector.ops.get.GetConfig;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
private static final String RPC_REPLY_ELEMENT = "rpc-reply";
private static final String DATA_ELEMENT = "data";
+ private static final String FILTER_NODE = "filter";
+ private static final String GET_CONFIG = "get-config";
+ private static final QName TOP = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "top");
+ private static final QName USERS = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "users");
+ private static final QName USER = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "user");
+ private static final QName MODULES = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "modules");
+ private static final QName AUGMENTED_CONTAINER = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "augmented-container");
+ private static final QName AUGMENTED_STRING_IN_CONT = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "identifier");
+ private static final QName CHOICE_NODE = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "choice-node");
+ private static final QName AUGMENTED_CASE = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "augmented-case");
+ private static final QName CHOICE_WRAPPER = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "choice-wrapper");
+ private static final QName INNER_CHOICE = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "inner-choice");
+ private static final QName INNER_CHOICE_TEXT = QName.create("urn:opendaylight:mdsal:mapping:test", "2015-02-26", "text");
+
+ private static final YangInstanceIdentifier AUGMENTED_CONTAINER_IN_MODULES =
+ YangInstanceIdentifier.builder().node(TOP).node(MODULES).build().node(new AugmentationIdentifier(Collections.singleton(AUGMENTED_CONTAINER)));
private static Document RPC_REPLY_OK = null;
private TransactionProvider transactionProvider = null;
-
@Before
public void setUp() throws Exception {
this.schemaContext = parseSchemas(getYangSchemas());
schemaContext.getModules();
- SchemaService schemaService = createSchemaService();
+ final SchemaService schemaService = createSchemaService();
final DOMStore operStore = InMemoryDOMDataStoreFactory.create("DOM-OPER", schemaService);
final DOMStore configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", schemaService);
ExecutorService listenableFutureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(
16, 16, "CommitFutures");
- ConcurrentDOMDataBroker cdb = new ConcurrentDOMDataBroker(datastores, listenableFutureExecutor);
+ final ConcurrentDOMDataBroker cdb = new ConcurrentDOMDataBroker(datastores, listenableFutureExecutor);
this.transactionProvider = new TransactionProvider(cdb, sessionIdForReporting);
this.currentSchemaContext = new CurrentSchemaContext(schemaService);
@Test
public void testEmptyDatastore() throws Exception {
-
- Document response = getConfigRunning();
- assertEmptyDatastore(response);
-
- response = getConfigCandidate();
- assertEmptyDatastore(response);
-
- response = get();
- assertEmptyDatastore(response);
-
+ assertEmptyDatastore(get());
+ assertEmptyDatastore(getConfigCandidate());
+ assertEmptyDatastore(getConfigRunning());
}
@Test
public void testEditRunning() throws Exception {
try {
- edit("messages/mapping/editConfig_running.xml");
+ edit("messages/mapping/editConfigs/editConfig_running.xml");
fail("Should have failed - edit config on running datastore is not supported");
} catch (NetconfDocumentedException e) {
assertTrue(e.getErrorSeverity() == ErrorSeverity.error);
@Test
public void testCandidateTransaction() throws Exception {
- verifyResponse(edit("messages/mapping/editConfig_merge_n1.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_n1_control.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_n1.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_n1_control.xml"));
assertEmptyDatastore(getConfigRunning());
verifyResponse(discardChanges(), RPC_REPLY_OK);
@Test
public void testEditWithCommit() throws Exception {
- verifyResponse(edit("messages/mapping/editConfig_merge_n1.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_n1_control.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_n1.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_n1_control.xml"));
verifyResponse(commit(), RPC_REPLY_OK);
- verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_n1_control.xml"));
+ verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_n1_control.xml"));
deleteDatastore();
@Test
public void testMultipleEditsWithMerge() throws Exception {
- verifyResponse(edit("messages/mapping/editConfig_merge_multiple_1.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_control_1.xml"));
- verifyResponse(edit("messages/mapping/editConfig_merge_single_1.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_control_2.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_multiple_1.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_control_1.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_single_1.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_control_2.xml"));
assertEmptyDatastore(getConfigRunning());
verifyResponse(commit(), RPC_REPLY_OK);
- verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_control_2.xml"));
+ verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_control_2.xml"));
deleteDatastore();
@Test
public void testMoreComplexEditConfigs() throws Exception {
- verifyResponse(edit("messages/mapping/editConfig_merge_multiple_1.xml"), RPC_REPLY_OK);
- verifyResponse(edit("messages/mapping/editConfig_merge_single_1.xml"), RPC_REPLY_OK);
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_multiple_1.xml"), RPC_REPLY_OK);
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_single_1.xml"), RPC_REPLY_OK);
- verifyResponse(edit("messages/mapping/editConfig_merge_multiple_2.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_after_more_complex_merge.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_multiple_2.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_after_more_complex_merge.xml"));
- verifyResponse(edit("messages/mapping/editConfig_merge_multiple_3.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_after_more_complex_merge_2.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_multiple_3.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_after_more_complex_merge_2.xml"));
- verifyResponse(edit("messages/mapping/editConfig_merge_multiple_4_replace.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_after_replace.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_multiple_4_replace.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_after_replace.xml"));
verifyResponse(commit(), RPC_REPLY_OK);
- verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_multiple_after_replace.xml"));
+ verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_multiple_after_replace.xml"));
- verifyResponse(edit("messages/mapping/editConfig_replace_default.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_replace_default_control.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_replace_default.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_replace_default_control.xml"));
verifyResponse(commit(), RPC_REPLY_OK);
- verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_replace_default_control.xml"));
+ verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_replace_default_control.xml"));
deleteDatastore();
@Test
public void testEditWithCreate() throws Exception {
- verifyResponse(edit("messages/mapping/editConfig_create.xml"), RPC_REPLY_OK);
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_create.xml"), RPC_REPLY_OK);
verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_create_n1_control.xml"));
try {
- edit("messages/mapping/editConfig_create.xml");
+ edit("messages/mapping/editConfigs/editConfig_create.xml");
fail("Create should have failed - data already exists");
} catch (NetconfDocumentedException e) {
assertTrue(e.getErrorSeverity() == ErrorSeverity.error);
assertEmptyDatastore(getConfigRunning());
try {
- edit("messages/mapping/editConfig_delete-root.xml");
+ edit("messages/mapping/editConfigs/editConfig_delete-root.xml");
fail("Delete should have failed - data is missing");
} catch (NetconfDocumentedException e) {
assertTrue(e.getErrorSeverity() == ErrorSeverity.error);
@Test
public void testEditMissingDefaultOperation() throws Exception {
- verifyResponse(edit("messages/mapping/editConfig_merge_missing_default-operation_1.xml"), RPC_REPLY_OK);
- verifyResponse(edit("messages/mapping/editConfig_merge_missing_default-operation_2.xml"), RPC_REPLY_OK);
- verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_missing_default-operation_control.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_missing_default-operation_1.xml"), RPC_REPLY_OK);
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_merge_missing_default-operation_2.xml"), RPC_REPLY_OK);
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_missing_default-operation_control.xml"));
verifyResponse(commit(), RPC_REPLY_OK);
- verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfig_merge_missing_default-operation_control.xml"));
+ verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/editConfigs/editConfig_merge_missing_default-operation_control.xml"));
deleteDatastore();
}
+ @Test
+ public void testFiltering() throws Exception {
+
+ assertEmptyDatastore(getConfigCandidate());
+ assertEmptyDatastore(getConfigRunning());
+
+ verifyResponse(getConfigCandidate(), XmlFileLoader.xmlFileToDocument("messages/mapping/get-empty-response.xml"));
+ verifyResponse(getConfigRunning(), XmlFileLoader.xmlFileToDocument("messages/mapping/get-empty-response.xml"));
+ verifyResponse(getConfigWithFilter("messages/mapping/filters/get-filter-users.xml"),
+ XmlFileLoader.xmlFileToDocument("messages/mapping/get-empty-response.xml"));
+
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig-filtering-setup.xml"), RPC_REPLY_OK);
+ verifyResponse(commit(), RPC_REPLY_OK);
+
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-alluser.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(USERS).node(USER).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-company-info.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(USERS).node(USER).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-modules-and-admin.xml",
+ YangInstanceIdentifier.builder().node(TOP).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-only-names-types.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(USERS).node(USER).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-specific-module-type-and-user.xml",
+ YangInstanceIdentifier.builder().node(TOP).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-superuser.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(USERS).node(USER).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-users.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(USERS).build());
+
+ YangInstanceIdentifier ident = YangInstanceIdentifier.
+ builder(AUGMENTED_CONTAINER_IN_MODULES).
+ node(AUGMENTED_CONTAINER).
+ node(AUGMENTED_STRING_IN_CONT).build();
+
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-augmented-string.xml", ident);
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-augmented-case.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(CHOICE_NODE).node(AUGMENTED_CASE).build());
+
+ verifyResponse(getConfigWithFilter("messages/mapping/filters/get-filter-augmented-case.xml"),
+ XmlFileLoader.xmlFileToDocument("messages/mapping/filters/response-augmented-case.xml"));
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig-filtering-setup2.xml"), RPC_REPLY_OK);
+ verifyResponse(commit(), RPC_REPLY_OK);
+
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-augmented-case-inner-choice.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(CHOICE_NODE).node(CHOICE_WRAPPER).build());
+ verifyFilterIdentifier("messages/mapping/filters/get-filter-augmented-case-inner-case.xml",
+ YangInstanceIdentifier.builder().node(TOP).node(CHOICE_NODE).node(CHOICE_WRAPPER).node(INNER_CHOICE).node(INNER_CHOICE_TEXT).build());
+
+ verifyResponse(getConfigWithFilter("messages/mapping/filters/get-filter-augmented-string.xml"),
+ XmlFileLoader.xmlFileToDocument("messages/mapping/filters/response-augmented-string.xml"));
+ verifyResponse(getConfigWithFilter("messages/mapping/filters/get-filter-augmented-case-inner-choice.xml"),
+ XmlFileLoader.xmlFileToDocument("messages/mapping/filters/response-augmented-case-inner-choice.xml"));
+ verifyResponse(getConfigWithFilter("messages/mapping/filters/get-filter-augmented-case-inner-case.xml"),
+ XmlFileLoader.xmlFileToDocument("messages/mapping/filters/response-augmented-case-inner-choice.xml"));
+
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_delete-top.xml"), RPC_REPLY_OK);
+ verifyResponse(commit(), RPC_REPLY_OK);
+
+ }
+
+ private void verifyFilterIdentifier(String resource, YangInstanceIdentifier identifier) throws Exception{
+ TestingGetConfig getConfig = new TestingGetConfig(sessionIdForReporting, currentSchemaContext, transactionProvider);
+ Document request = XmlFileLoader.xmlFileToDocument(resource);
+ YangInstanceIdentifier iid = getConfig.getInstanceIdentifierFromDocument(request);
+ assertTrue(iid.equals(identifier));
+ }
+
+ private class TestingGetConfig extends GetConfig{
+ public TestingGetConfig(String sessionId, CurrentSchemaContext schemaContext, TransactionProvider transactionProvider) {
+ super(sessionId, schemaContext, transactionProvider);
+ }
+
+ public YangInstanceIdentifier getInstanceIdentifierFromDocument(Document request) throws NetconfDocumentedException {
+ XmlElement filterElement = XmlElement.fromDomDocument(request).getOnlyChildElement(GET_CONFIG).getOnlyChildElement(FILTER_NODE);
+ return getInstanceIdentifierFromFilter(filterElement);
+ }
+ }
+
private void deleteDatastore() throws Exception{
- verifyResponse(edit("messages/mapping/editConfig_delete-root.xml"), RPC_REPLY_OK);
+ verifyResponse(edit("messages/mapping/editConfigs/editConfig_delete-root.xml"), RPC_REPLY_OK);
assertEmptyDatastore(getConfigCandidate());
verifyResponse(commit(), RPC_REPLY_OK);
return executeOperation(getConfig, "messages/mapping/getConfig_candidate.xml");
}
+ private Document getConfigWithFilter(String resource) throws NetconfDocumentedException, ParserConfigurationException, SAXException, IOException {
+ GetConfig getConfig = new GetConfig(sessionIdForReporting, currentSchemaContext, transactionProvider);
+ return executeOperation(getConfig, resource);
+ }
+
private Document lock() throws NetconfDocumentedException, ParserConfigurationException, SAXException, IOException {
Lock lock = new Lock(sessionIdForReporting);
return executeOperation(lock, "messages/mapping/lock.xml");
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteSource;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import javax.xml.transform.TransformerException;
+import org.custommonkey.xmlunit.DetailedDiff;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.custommonkey.xmlunit.examples.RecursiveElementNameAndTextQualifier;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.parser.api.YangSyntaxErrorException;
+import org.opendaylight.yangtools.yang.parser.builder.impl.BuilderUtils;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+
+public class RuntimeRpcTest {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpcTest.class);
+
+ private String sessionIdForReporting = "netconf-test-session1";
+
+ private static Document RPC_REPLY_OK = null;
+
+ static {
+ try {
+ RPC_REPLY_OK = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/runtimerpc-ok-reply.xml");
+ } catch (Exception e) {
+ LOG.debug("unable to load rpc reply ok.", e);
+ RPC_REPLY_OK = XmlUtil.newDocument();
+ }
+ }
+
+ private DOMRpcService rpcServiceVoidInvoke = new DOMRpcService() {
+ @Nonnull
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+ return Futures.immediateCheckedFuture((DOMRpcResult) new DefaultDOMRpcResult(null, Collections.<RpcError>emptyList()));
+ }
+
+ @Nonnull
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+ return null;
+ }
+ };
+
+ private DOMRpcService rpcServiceFailedInvocation = new DOMRpcService() {
+ @Nonnull
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+ return Futures.immediateFailedCheckedFuture((DOMRpcException) new DOMRpcException("rpc invocation not implemented yet") {
+ });
+ }
+
+ @Nonnull
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+ return null;
+ }
+ };
+
+ private DOMRpcService rpcServiceSuccesfullInvocation = new DOMRpcService() {
+ @Nonnull
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+ Collection<DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>> children = (Collection) input.getValue();
+ Module module = schemaContext.findModuleByNamespaceAndRevision(type.getLastComponent().getNamespace(), null);
+ RpcDefinition rpcDefinition = getRpcDefinitionFromModule(module, module.getNamespace(), type.getLastComponent().getLocalName());
+ ContainerSchemaNode outputSchemaNode = rpcDefinition.getOutput();
+ ContainerNode node = ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(outputSchemaNode.getQName()))
+ .withValue(children).build();
+
+ return Futures.immediateCheckedFuture((DOMRpcResult) new DefaultDOMRpcResult(node));
+ }
+
+ @Nonnull
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+ return null;
+ }
+ };
+
+ private SchemaContext schemaContext = null;
+ private CurrentSchemaContext currentSchemaContext = null;
+ @Mock
+ private SchemaService schemaService;
+ @Mock
+ private SchemaContextListener listener;
+ @Mock
+ private ListenerRegistration registration;
+
+ @Before
+ public void setUp() throws Exception {
+
+ initMocks(this);
+ doNothing().when(registration).close();
+ doReturn(listener).when(registration).getInstance();
+ doNothing().when(schemaService).addModule(any(Module.class));
+ doNothing().when(schemaService).removeModule(any(Module.class));
+ doReturn(schemaContext).when(schemaService).getGlobalContext();
+ doReturn(schemaContext).when(schemaService).getSessionContext();
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ ((SchemaContextListener) invocationOnMock.getArguments()[0]).onGlobalContextUpdated(schemaContext);
+ return registration;
+ }
+ }).when(schemaService).registerSchemaContextListener(any(SchemaContextListener.class));
+
+ XMLUnit.setIgnoreWhitespace(true);
+ XMLUnit.setIgnoreAttributeOrder(true);
+
+ this.schemaContext = parseSchemas(getYangSchemas());
+ this.currentSchemaContext = new CurrentSchemaContext(schemaService);
+ }
+
+ @Test
+ public void testVoidOutputRpc() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceVoidInvoke);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-void-output.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+
+ verifyResponse(response, RPC_REPLY_OK);
+ }
+
+ @Test
+ public void testSuccesfullNonVoidInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceSuccesfullInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid-control.xml"));
+ }
+
+ @Test
+ public void testSuccesfullContainerInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceSuccesfullInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-container.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-container-control.xml"));
+ }
+
+ @Test
+ public void testFailedInvocation() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceFailedInvocation);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ try {
+ rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+ fail("should have failed with rpc invocation not implemented yet");
+ } catch (NetconfDocumentedException e) {
+ assertTrue(e.getErrorType() == ErrorType.application);
+ assertTrue(e.getErrorSeverity() == ErrorSeverity.error);
+ assertTrue(e.getErrorTag() == ErrorTag.operation_failed);
+ }
+ }
+
+ @Test
+ public void testVoidInputOutputRpc() throws Exception {
+ RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceVoidInvoke);
+
+ Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-void-input-output.xml");
+ HandlingPriority priority = rpc.canHandle(rpcDocument);
+ Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+ Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+
+ verifyResponse(response, RPC_REPLY_OK);
+ }
+
+ private void verifyResponse(Document response, Document template) throws IOException, TransformerException {
+ DetailedDiff dd = new DetailedDiff(new Diff(response, template));
+ dd.overrideElementQualifier(new RecursiveElementNameAndTextQualifier());
+ //we care about order so response has to be identical
+ assertTrue(dd.identical());
+ }
+
+ private RpcDefinition getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
+ for (RpcDefinition rpcDef : module.getRpcs()) {
+ if (rpcDef.getQName().getNamespace().equals(namespaceURI)
+ && rpcDef.getQName().getLocalName().equals(name)) {
+ return rpcDef;
+ }
+ }
+
+ return null;
+
+ }
+
+ private Collection<InputStream> getYangSchemas() {
+ final List<String> schemaPaths = Arrays.asList("/yang/mdsal-netconf-rpc-test.yang");
+ final List<InputStream> schemas = new ArrayList<>();
+
+ for (String schemaPath : schemaPaths) {
+ InputStream resourceAsStream = getClass().getResourceAsStream(schemaPath);
+ schemas.add(resourceAsStream);
+ }
+
+ return schemas;
+ }
+
+ private SchemaContext parseSchemas(Collection<InputStream> schemas) throws IOException, YangSyntaxErrorException {
+ final YangParserImpl parser = new YangParserImpl();
+ Collection<ByteSource> sources = BuilderUtils.streamsToByteSources(schemas);
+ return parser.parseSources(sources);
+ }
+}
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <edit-config>
+ <target>
+ <candidate/>
+ </target>
+ <test-option>
+ set
+ </test-option>
+ <default-operation>merge</default-operation>
+ <config>
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user>
+ <name>root</name>
+ <type>superuser</type>
+ <full-name>rooty root</full-name>
+ <company-info>
+ <dept>1</dept>
+ <id>1</id>
+ </company-info>
+ </user>
+ <user>
+ <name>admin</name>
+ <type>superuser</type>
+ <full-name>johny admin</full-name>
+ <company-info>
+ <dept>2</dept>
+ <id>2</id>
+ </company-info>
+ </user>
+ <user>
+ <name>regular</name>
+ <type>user</type>
+ <full-name>burt regular</full-name>
+ <company-info>
+ <dept>3</dept>
+ <id>3</id>
+ </company-info>
+ </user>
+ </users>
+ <modules>
+ <augmented-container>
+ <identifier>augmented container</identifier>
+ </augmented-container>
+ <module>
+ <id>module1</id>
+ <type>type1</type>
+ <desc>module1-desc</desc>
+ </module>
+ <module>
+ <id>module2</id>
+ <type>type1</type>
+ <desc>module2-desc</desc>
+ </module>
+ <module>
+ <id>module3</id>
+ <type>unknown</type>
+ <desc>module3-desc</desc>
+ </module>
+ </modules>
+ <augmented-case>augmented case</augmented-case>
+ </top>
+ </config>
+ </edit-config>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <edit-config>
+ <target>
+ <candidate/>
+ </target>
+ <test-option>
+ set
+ </test-option>
+ <default-operation>merge</default-operation>
+ <config>
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <choice-wrapper>
+ <text>augmented nested choice text1</text>
+ </choice-wrapper>
+ </top>
+ </config>
+ </edit-config>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+<edit-config>
+ <target>
+ <candidate/>
+ </target>
+ <test-option>
+ set
+ </test-option>
+ <default-operation>none</default-operation>
+ <config>
+ <top xmlns="urn:opendaylight:mdsal:mapping:test" xmlns:a="urn:ietf:params:xml:ns:netconf:base:1.0" a:operation="delete">
+ </top>
+ </config>
+</edit-config>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user/>
+ </users>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <choice-wrapper>
+ <text/>
+ </choice-wrapper>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
+
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <choice-wrapper/>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <augmented-case/>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <modules>
+ <augmented-container>
+ <identifier/>
+ </augmented-container>
+ </modules>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user>
+ <name>root</name>
+ <company-info/>
+ </user>
+ <user>
+ <name>admin</name>
+ <company-info>
+ <id/>
+ </company-info>
+ </user>
+ <user>
+ <name>regular</name>
+ <company-info>
+ <dept/>
+ </company-info>
+ </user>
+ </users>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user>
+ <name>admin</name>
+ </user>
+ </users>
+ <modules/>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user>
+ <name/>
+ <type/>
+ </user>
+ </users>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user>
+ <type>superuser</type>
+ </user>
+ </users>
+ <modules>
+ <module>
+ <type>type1</type>
+ </module>
+ </modules>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users>
+ <user>
+ <type>superuser</type>
+ </user>
+ </users>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc id="a" a="64" xmlnx="a:b:c:d" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="101">
+ <get-config>
+ <filter type="subtree">
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <users/>
+ </top>
+ </filter>
+ <source>
+ <running/>
+ </source>
+ </get-config>
+</rpc>
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" a="64" id="a" message-id="101" xmlnx="a:b:c:d">
+ <data>
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <choice-wrapper>
+ <text>augmented nested choice text1</text>
+ </choice-wrapper>
+ </top>
+ </data>
+</rpc-reply>
+
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" a="64" id="a" message-id="101" xmlnx="a:b:c:d">
+ <data>
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <augmented-case>augmented case</augmented-case>
+ </top>
+ </data>
+</rpc-reply>
+
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" a="64" id="a" message-id="101" xmlnx="a:b:c:d">
+ <data>
+ <top xmlns="urn:opendaylight:mdsal:mapping:test">
+ <modules>
+ <augmented-container>
+ <identifier>augmented container</identifier>
+ </augmented-container>
+ </modules>
+ </top>
+ </data>
+</rpc-reply>
+
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" a="64" id="a" message-id="101" xmlnx="a:b:c:d">
+ <data/>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <cont1 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ cont1 input string 1
+ </test-string>
+ <test-string2>
+ cont1 input string 2
+ </test-string2>
+ </cont1>
+ <cont2 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ cont2 input string 1
+ </test-string>
+ <test-string2>
+ cont2 input string 2
+ </test-string2>
+ </cont2>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <container-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <cont1>
+ <test-string>
+ cont1 input string 1
+ </test-string>
+ <test-string2>
+ cont1 input string 2
+ </test-string2>
+ </cont1>
+ <cont2>
+ <test-string>
+ cont2 input string 1
+ </test-string>
+ <test-string2>
+ cont2 input string 2
+ </test-string2>
+ </cont2>
+ </container-rpc>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <test-string xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ test rpc input string 1
+ </test-string>
+ <test-string2 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ test rpc input string 2
+ </test-string2>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <nonvoid-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ test rpc input string 1
+ </test-string>
+ <test-string2>
+ test rpc input string 2
+ </test-string2>
+ </nonvoid-rpc>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <void-input-output-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test"/>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="2"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <void-output-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+ <test-string>
+ test rpc input string 1
+ </test-string>
+ <test-string2>
+ test rpc input string 2
+ </test-string2>
+ </void-output-rpc>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="2">
+ <ok/>
+</rpc-reply>
\ No newline at end of file
}
}
}
+
+ container top {
+
+ container users {
+
+ list user {
+
+ leaf name {
+ type string;
+ }
+
+ leaf type {
+ type string;
+ }
+
+ leaf full-name {
+ type string;
+ }
+
+ container company-info {
+
+ leaf dept {
+ type string;
+ }
+
+ leaf id {
+ type string;
+ }
+ }
+ }
+ }
+
+ container modules {
+
+ list module {
+
+ leaf id {
+ type string;
+ }
+
+ leaf type {
+ type string;
+ }
+
+ leaf desc {
+ type string;
+ }
+ }
+ }
+
+ choice choice-node {
+ case a {
+ leaf text {
+ type string;
+ }
+ }
+
+ case b {
+ container text-cont {
+ leaf text {
+ type string;
+ }
+ }
+ }
+ }
+
+ } //top
+
+ augment "/map:top/map:choice-node" {
+ case c {
+ leaf augmented-case {
+ type string;
+ }
+ }
+
+ case d {
+ container choice-wrapper {
+ choice inner-choice {
+ case ia {
+ leaf text {
+ type string;
+ }
+ }
+
+ case ib {
+ leaf text2 {
+ type string;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ augment "/map:top/map:modules/" {
+ container augmented-container{
+ leaf identifier {
+ type string;
+ }
+ }
+ }
}
\ No newline at end of file
--- /dev/null
+module rpc-test {
+ yang-version 1;
+ namespace "urn:opendaylight:mdsal:mapping:rpc:test";
+ prefix "rpc";
+
+ rpc void-input-output-rpc {
+
+ }
+
+ rpc void-output-rpc {
+ input {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+
+ rpc nonvoid-rpc {
+ input {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ output {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+
+ rpc container-rpc {
+ input {
+ container cont1 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ container cont2 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+
+ output {
+ container cont1 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+
+ container cont2 {
+ leaf test-string {
+ type string;
+ }
+
+ leaf test-string2 {
+ type string;
+ }
+ }
+ }
+ }
+}
+
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>mdsal-netconf-monitoring</artifactId>
<packaging>bundle</packaging>
tx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(NetconfState.class), state);
// FIXME first attempt (right after we register to binding broker) always fails
// Is it due to the fact that we are writing from the onSessionInitiated callback ?
- final CheckedFuture<Void, TransactionCommitFailedException> submit = tx.submit();
-
- Futures.addCallback(submit, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void aVoid) {
- LOG.debug("Netconf state updated successfully");
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.warn("Unable to update netconf state", throwable);
- }
- });
+ try {
+ tx.submit().checkedGet();
+ LOG.debug("Netconf state updated successfully");
+ } catch (TransactionCommitFailedException e) {
+ LOG.warn("Unable to update netconf state", e);
+ }
}
@Override
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-api</artifactId>
<packaging>bundle</packaging>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-artifacts</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-auth</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-cli</artifactId>
<packaging>jar</packaging>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-impl</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-composite-node</artifactId>
- </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-model-api</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-parser-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-client</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-api</artifactId>
+ </dependency>
</dependencies>
<build>
import org.opendaylight.controller.netconf.cli.writer.OutFormatter;
import org.opendaylight.controller.netconf.cli.writer.WriteException;
import org.opendaylight.controller.netconf.cli.writer.Writer;
-import org.opendaylight.controller.netconf.cli.writer.impl.CompositeNodeWriter;
+import org.opendaylight.controller.netconf.cli.writer.impl.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
private void handleRegularOutput(final Output response, final OutputDefinition outputDefinition,
final Writer<DataSchemaNode> outHandler) {
- final Map<DataSchemaNode, List<Node<?>>> unwrap = response.unwrap(outputDefinition);
+ final Map<DataSchemaNode, List<NormalizedNode<?, ?>>> unwrap = response.unwrap(outputDefinition);
for (final DataSchemaNode schemaNode : unwrap.keySet()) {
Preconditions.checkNotNull(schemaNode);
private void handleEmptyOutput(final Command command, final Output response) {
try {
- new CompositeNodeWriter(consoleIO, new OutFormatter()).write(null,
- Collections.<Node<?>> singletonList(response.getOutput()));
+ new NormalizedNodeWriter(consoleIO, new OutFormatter()).write(null,
+ Collections.<NormalizedNode<?, ?>>singletonList(response.getOutput()));
} catch (final WriteException e) {
throw new IllegalStateException("Unable to write value for: " + response.getOutput().getNodeType()
+ " from: " + command.getCommandId(), e);
}
private Input handleInput(final InputDefinition inputDefinition) {
- List<Node<?>> allArgs = Collections.emptyList();
+ List<NormalizedNode<?, ?>> allArgs = Collections.emptyList();
try {
if (!inputDefinition.isEmpty()) {
allArgs = argumentHandlerRegistry.getGenericReader(schemaContextRegistry.getLocalSchemaContext()).read(
import com.google.common.base.Optional;
import jline.console.completer.Completer;
import jline.console.completer.NullCompleter;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.cli.commands.CommandDispatcher;
import org.opendaylight.controller.netconf.cli.io.ConsoleContext;
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
@Override
public synchronized void onDeviceConnected(final SchemaContext context,
- final NetconfSessionPreferences preferences, final RpcImplementation rpcImplementation) {
+ final NetconfSessionPreferences preferences, final DOMRpcService rpcService) {
console.enterRootContext(new ConsoleContext() {
@Override
// possible
// TODO detect netconf base version
// TODO detect inet types version
- commandDispatcher.addRemoteCommands(rpcImplementation, context);
+ commandDispatcher.addRemoteCommands(rpcService, context);
schemaContextRegistry.setRemoteSchemaContext(context);
up = true;
this.notify();
}
@Override
- public void onNotification(final CompositeNode compositeNode) {
- // FIXME
+ public void onNotification(ContainerNode domNotification) {
+
}
@Override
import org.opendaylight.controller.sal.connect.netconf.NetconfDevice.SchemaResourcesDTO;
import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas.NetconfStateSchemasResolverImpl;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
-import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceFilter;
repository.registerSchemaSourceListener(TextToASTTransformer.create(repository, repository));
device = new NetconfDevice(new SchemaResourcesDTO(repository, schemaContextFactory, new NetconfStateSchemasResolverImpl()),
- deviceId, handler, executor, new NetconfMessageTransformer());
+ deviceId, handler, executor, true);
listener = new NetconfDeviceCommunicator(deviceId, device);
configBuilder.withSessionListener(listener);
listener.initializeRemoteConnection(netconfClientDispatcher, configBuilder.build());
import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.cli.NetconfDeviceConnectionHandler;
import org.opendaylight.controller.netconf.cli.NetconfDeviceConnectionManager;
import org.opendaylight.controller.netconf.cli.commands.local.Close;
import org.opendaylight.controller.netconf.cli.commands.local.Help;
import org.opendaylight.controller.netconf.cli.commands.remote.RemoteCommand;
import org.opendaylight.controller.netconf.cli.io.IOUtil;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
public static final Collection<String> BASE_NETCONF_SCHEMA_PATHS = Lists.newArrayList("/schema/remote/ietf-netconf.yang",
"/schema/common/netconf-cli-ext.yang", "/schema/common/ietf-inet-types.yang");
- public synchronized void addRemoteCommands(final RpcImplementation rpcInvoker, final SchemaContext remoteSchema) {
- this.addRemoteCommands(rpcInvoker, remoteSchema, parseSchema(BASE_NETCONF_SCHEMA_PATHS));
+ public synchronized void addRemoteCommands(final DOMRpcService rpcService, final SchemaContext remoteSchema) {
+ this.addRemoteCommands(rpcService, remoteSchema, parseSchema(BASE_NETCONF_SCHEMA_PATHS));
}
- public synchronized void addRemoteCommands(final RpcImplementation rpcInvoker, final SchemaContext remoteSchema, final SchemaContext baseNetconfSchema) {
+ public synchronized void addRemoteCommands(final DOMRpcService rpcService, final SchemaContext remoteSchema, final SchemaContext baseNetconfSchema) {
for (final SchemaContext context : Lists.newArrayList(remoteSchema, baseNetconfSchema)) {
for (final Module module : context.getModules()) {
for (final RpcDefinition rpcDefinition : module.getRpcs()) {
- final Command command = RemoteCommand.fromRpc(rpcDefinition, rpcInvoker);
+ final Command command = RemoteCommand.fromRpc(rpcDefinition, rpcService);
remoteCommands.put(rpcDefinition.getQName(), command);
nameToQNameRemote.put(getCommandName(rpcDefinition, module), rpcDefinition.getQName());
}
package org.opendaylight.controller.netconf.cli.commands.input;
import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
/**
* Input arguments for and rpc/command execution
*/
public class Input {
- private final List<Node<?>> args;
+ private final List<NormalizedNode<?, ?>> args;
- private final Map<String, Node<?>> nameToArg = new HashMap<String, Node<?>>();
+ private final Map<String, NormalizedNode<?, ?>> nameToArg = new HashMap<>();
- public Input(final List<Node<?>> args) {
+ public Input(final List<NormalizedNode<?, ?>> args) {
// FIXME empty Input should be constructed from static factory method
if(args.isEmpty()) {
this.args = Collections.emptyList();
return;
}
- final Node<?> input = args.iterator().next();
+ final NormalizedNode<?, ?> input = args.iterator().next();
Preconditions
- .checkArgument(input instanceof CompositeNode, "Input container has to be of type composite node.");
- this.args = ((CompositeNode) input).getValue();
+ .checkArgument(input instanceof DataContainerChild<?, ?>, "Input container has to be of type Data Container Child.");
+ this.args = new ArrayList<>((Collection) input.getValue());
- for (final Node<?> arg : this.args) {
+ for (final NormalizedNode<?, ?> arg : this.args) {
nameToArg.put(arg.getNodeType().getLocalName(), arg);
}
}
- public Node<?> getArg(final String name) {
+ public NormalizedNode<?, ?> getArg(final String name) {
return nameToArg.get(name);
}
- public CompositeNode wrap(final QName rpcQName) {
- return new CompositeNodeTOImpl(rpcQName, null, args);
+ public NormalizedNode<?, ?> wrap(final QName rpcQName) {
+ //TODO just add the list as children to the node
+ return ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(rpcQName))
+ .withValue((Collection) args).build();
}
}
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
import org.opendaylight.protocol.framework.ReconnectStrategy;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
-import org.opendaylight.yangtools.yang.data.impl.SimpleNodeTOImpl;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
/**
private Output invoke(final NetconfClientConfigurationBuilder config, final String addressName, final Input inputArgs) {
final Set<String> remoteCmds = connectManager.connectBlocking(addressName, getAdress(inputArgs), config);
- final ArrayList<Node<?>> output = Lists.newArrayList();
- output.add(new SimpleNodeTOImpl<>(QName.create(getCommandId(), "status"), null, "Connection initiated"));
+ final ArrayList<DataContainerChild<?, ?>> output = Lists.newArrayList();
+ output.add(ImmutableLeafNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(getCommandId(), "status")))
+ .withValue("Connection initiated").build());
+ final ArrayList<LeafSetEntryNode<Object>> leafListChildren = Lists.newArrayList();
for (final String cmdId : remoteCmds) {
- output.add(new SimpleNodeTOImpl<>(QName.create(getCommandId(), "remote-commands"), null, cmdId));
+ leafListChildren.add(ImmutableLeafSetEntryNodeBuilder.create()
+ .withNodeIdentifier(new NodeWithValue(QName.create(getCommandId(), "remote-commands"), cmdId))
+ .withValue(cmdId).build());
}
- return new Output(new CompositeNodeTOImpl(getCommandId(), null, output));
+ return new Output(ImmutableLeafSetNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(getCommandId(), "remote-commands")))
+ .withValue(leafListChildren).build());
}
private NetconfClientConfigurationBuilder getConfig(final Input inputArgs) {
private <T> Optional<T> getArgumentOpt(final Input inputArgs, final String argName, final Class<T> type) {
final QName argQName = QName.create(getCommandId(), argName);
- final Node<?> argumentNode = inputArgs.getArg(argName);
+ final NormalizedNode<?, ?> argumentNode = inputArgs.getArg(argName);
if (argumentNode == null) {
return Optional.absent();
}
- Preconditions.checkArgument(argumentNode instanceof SimpleNode, "Only simple type argument supported, %s",
+ Preconditions.checkArgument(argumentNode instanceof LeafNode, "Only simple type argument supported, %s",
argQName);
final Object value = argumentNode.getValue();
*/
package org.opendaylight.controller.netconf.cli.commands.local;
-import com.google.common.collect.Lists;
import org.opendaylight.controller.netconf.cli.NetconfDeviceConnectionManager;
import org.opendaylight.controller.netconf.cli.commands.AbstractCommand;
import org.opendaylight.controller.netconf.cli.commands.Command;
import org.opendaylight.controller.netconf.cli.commands.output.Output;
import org.opendaylight.controller.netconf.cli.commands.output.OutputDefinition;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
-import org.opendaylight.yangtools.yang.data.impl.SimpleNodeTOImpl;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
/**
public Output invoke(final Input inputArgs) {
connectionManager.disconnect();
- return new Output(new CompositeNodeTOImpl(getCommandId(), null,
- Lists.<Node<?>> newArrayList(new SimpleNodeTOImpl<>(QName.create(getCommandId(), "status"), null,
- "Connection disconnected"))));
+ return new Output(
+ ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(getCommandId()))
+ .withChild(ImmutableLeafNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(getCommandId(), "status")))
+ .withValue("Connection disconnected").build()).build());
}
public static Command create(final RpcDefinition rpcDefinition,
import org.opendaylight.controller.netconf.cli.commands.output.Output;
import org.opendaylight.controller.netconf.cli.commands.output.OutputDefinition;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
/**
@Override
public Output invoke(final Input inputArgs) {
- final ArrayList<Node<?>> value = Lists.newArrayList();
+ final ArrayList<MapEntryNode> value = Lists.newArrayList();
for (final String id : commandDispatcher.getCommandIds()) {
final Optional<Command> cmd = commandDispatcher.getCommand(id);
Preconditions.checkState(cmd.isPresent(), "Command %s has to be present in command dispatcher", id);
final Optional<String> description = cmd.get().getCommandDescription();
- final List<Node<?>> nameAndDescription = Lists.newArrayList();
- nameAndDescription.add(NodeFactory.createImmutableSimpleNode(QName.create(getCommandId(), "id"), null, id));
+ final List<DataContainerChild<?, ?>> nameAndDescription = Lists.newArrayList();
+ nameAndDescription.add(
+ ImmutableLeafNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(getCommandId(), "id")))
+ .withValue(id).build());
if(description.isPresent()) {
- nameAndDescription.add(NodeFactory.createImmutableSimpleNode(QName.create(getCommandId(), "description"), null, description.get()));
+ nameAndDescription.add(
+ ImmutableLeafNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(getCommandId(), "description")))
+ .withValue(description.get()).build());
}
- value.add(ImmutableCompositeNode.create(QName.create(getCommandId(), "commands"), nameAndDescription));
+ value.add(ImmutableMapEntryNodeBuilder.create()
+ .withValue(nameAndDescription)
+ .withNodeIdentifier(
+ new NodeIdentifierWithPredicates(QName.create(getCommandId(), "commands"),
+ QName.create(getCommandId(), "id"), id)).build());
}
+ MapNode mappedHelp = ImmutableMapNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(QName.create(getCommandId(), "commands")))
+ .withValue(value).build();
- return new Output(new CompositeNodeTOImpl(getCommandId(), null, value));
+ return new Output(mappedHelp);
}
public static Command create(final RpcDefinition rpcDefinition, final CommandDispatcher commandDispatcher) {
import java.util.List;
import java.util.Map;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
/**
*/
public class Output {
- private final CompositeNode output;
+ private final NormalizedNode<?, ?> output;
- public Output(final CompositeNode output) {
- this.output = output;
+ public Output(final NormalizedNode<?, ?> output) {
+ if (output instanceof ContainerNode && output.getNodeType().getLocalName() == "rpc-reply") {
+ this.output = ((ContainerNode) output).getValue().iterator().next();
+ } else {
+ this.output = output;
+ }
}
- public Map<DataSchemaNode, List<Node<?>>> unwrap(final OutputDefinition outputDefinition) {
+ public Map<DataSchemaNode, List<NormalizedNode<?, ?>>> unwrap(final OutputDefinition outputDefinition) {
Preconditions.checkArgument(outputDefinition.isEmpty() == false);
final Map<QName, DataSchemaNode> mappedSchemaNodes = mapOutput(outputDefinition);
- final Map<DataSchemaNode, List<Node<?>>> mappedNodesToSchema = Maps.newHashMap();
-
- for (final Node<?> node : output.getValue()) {
- final DataSchemaNode schemaNode = mappedSchemaNodes.get(node.getKey().withoutRevision());
- final List<Node<?>> list = mappedNodesToSchema.get(schemaNode) == null ? Lists.<Node<?>> newArrayList()
- : mappedNodesToSchema.get(schemaNode);
- list.add(node);
- mappedNodesToSchema.put(schemaNode, list);
- }
+ final Map<DataSchemaNode, List<NormalizedNode<?, ?>>> mappedNodesToSchema = Maps.newHashMap();
+
+ final DataSchemaNode schemaNode = mappedSchemaNodes.get(output.getNodeType().withoutRevision());
+ final List<NormalizedNode<?, ?>> list = mappedNodesToSchema.get(schemaNode) == null ? Lists.<NormalizedNode<?, ?>>newArrayList()
+ : mappedNodesToSchema.get(schemaNode);
+ list.add(output);
+ mappedNodesToSchema.put(schemaNode, list);
return mappedNodesToSchema;
}
- public CompositeNode getOutput() {
+ public NormalizedNode<?, ?> getOutput() {
return output;
}
*/
package org.opendaylight.controller.netconf.cli.commands.remote;
-import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collections;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
import org.opendaylight.controller.netconf.cli.commands.AbstractCommand;
import org.opendaylight.controller.netconf.cli.commands.Command;
import org.opendaylight.controller.netconf.cli.commands.CommandInvocationException;
import org.opendaylight.controller.netconf.cli.commands.input.InputDefinition;
import org.opendaylight.controller.netconf.cli.commands.output.Output;
import org.opendaylight.controller.netconf.cli.commands.output.OutputDefinition;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
/**
* Generic remote command implementation that sends the rpc xml to the remote device and waits for response
// TODO make this configurable
private static final long DEFAULT_TIMEOUT = 10000;
private static final TimeUnit DEFAULT_TIMEOUT_UNIT = TimeUnit.MILLISECONDS;
- private final RpcImplementation rpc;
+ private final DOMRpcService rpcService;
- public RemoteCommand(final QName qName, final InputDefinition args, final OutputDefinition output, final String description, final RpcImplementation rpc) {
+ public RemoteCommand(final QName qName, final InputDefinition args, final OutputDefinition output, final String description, final DOMRpcService rpcService) {
super(qName, args, output, description);
- this.rpc = rpc;
+ this.rpcService = rpcService;
}
@Override
public Output invoke(final Input inputArgs) throws CommandInvocationException {
- final ListenableFuture<RpcResult<CompositeNode>> invokeRpc = rpc.invokeRpc(getCommandId(), inputArgs.wrap(getCommandId()));
+ final CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc =
+ rpcService.invokeRpc(SchemaPath.create(Collections.singletonList(getCommandId()), true), inputArgs.wrap(getCommandId()));
+
try {
return new Output(invokeRpc.get(DEFAULT_TIMEOUT, DEFAULT_TIMEOUT_UNIT).getResult());
} catch (final ExecutionException e) {
}
}
- public static Command fromRpc(final RpcDefinition rpcDefinition, final RpcImplementation rpcInvoker) {
+ public static Command fromRpc(final RpcDefinition rpcDefinition, final DOMRpcService rpcService) {
final InputDefinition args = getInputDefinition(rpcDefinition);
final OutputDefinition retVal = getOutputDefinition(rpcDefinition);
- return new RemoteCommand(rpcDefinition.getQName(), args, retVal, rpcDefinition.getDescription(), rpcInvoker);
+ return new RemoteCommand(rpcDefinition.getQName(), args, retVal, rpcDefinition.getDescription(), rpcService);
}
}
import jline.console.completer.NullCompleter;
import org.opendaylight.controller.netconf.cli.io.ConsoleContext;
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
}
@Override
- public List<Node<?>> read(final T schemaNode) throws ReadingException {
+ public List<NormalizedNode<?, ?>> read(final T schemaNode) throws ReadingException {
if (isReadingWanted(schemaNode)) {
final ConsoleContext ctx = getContext(schemaNode);
console.enterContext(ctx);
// TODO javadoc
- protected abstract List<Node<?>> readWithContext(T schemaNode) throws IOException, ReadingException;
+ protected abstract List<NormalizedNode<?, ?>> readWithContext(T schemaNode) throws IOException, ReadingException;
protected abstract ConsoleContext getContext(T schemaNode);
package org.opendaylight.controller.netconf.cli.reader;
import java.util.List;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
/**
*/
public interface Reader<T extends DataSchemaNode> {
- List<Node<?>> read(T schemaNode) throws ReadingException;
+ List<NormalizedNode<?, ?>> read(T schemaNode) throws ReadingException;
}
package org.opendaylight.controller.netconf.cli.reader.custom;
import static org.opendaylight.controller.netconf.cli.io.IOUtil.isSkipInput;
-
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
// FIXME refactor + unite common code with FilterReader
@Override
- protected List<Node<?>> readWithContext(final DataSchemaNode schemaNode) throws IOException, ReadingException {
+ protected List<NormalizedNode<?, ?>> readWithContext(final DataSchemaNode schemaNode) throws IOException, ReadingException {
console.writeLn("Config " + schemaNode.getQName().getLocalName());
console.writeLn("Submit path of the data to edit. Use TAB for autocomplete");
filterPartsQNames.add(qName);
}
- List<Node<?>> previous = readInnerNode(rawValue);
+ List<? extends NormalizedNode<?, ?>> previous = readInnerNode(rawValue);
for (final QName qName : Lists.reverse(filterPartsQNames).subList(1, filterPartsQNames.size())) {
- previous = Collections.<Node<?>> singletonList(new CompositeNodeTOImpl(qName, null,
- previous == null ? Collections.<Node<?>> emptyList() : previous));
+ previous = Collections.<NormalizedNode<?, ?>>singletonList(
+ ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(qName))
+ .withValue(previous == null ? Collections.<DataContainerChild<?, ?>>emptyList() : (Collection) previous).build()
+ );
+ }
+
+ if (previous == null) {
+ return Collections.singletonList(null);
}
- final Node<?> newNode = previous == null ? null
- : new CompositeNodeTOImpl(schemaNode.getQName(), null, previous);
+ final DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> builder = ImmutableContainerNodeBuilder.create();
+ builder.withNodeIdentifier(new NodeIdentifier(schemaNode.getQName()));
+ builder.withValue((Collection<DataContainerChild<?, ?>>) previous);
- return Collections.<Node<?>> singletonList(newNode);
+ return Collections.<NormalizedNode<?, ?>> singletonList(builder.build());
}
- private List<Node<?>> readInnerNode(final String pathString) throws ReadingException {
+ private List<NormalizedNode<?, ?>> readInnerNode(final String pathString) throws ReadingException {
final Optional<DataSchemaNode> schema = getCurrentNode(getSchemaContext(), pathString);
Preconditions.checkState(schema.isPresent(), "Unable to find schema for %s", pathString);
return commandArgHandlerRegistry.getGenericReader(getSchemaContext(), true).read(schema.get());
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.controller.netconf.cli.reader.impl.ChoiceReader;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
}
@Override
- public List<Node<?>> readWithContext(final ChoiceSchemaNode choiceNode) throws IOException, ReadingException {
+ public List<NormalizedNode<?, ?>> readWithContext(final ChoiceSchemaNode choiceNode) throws IOException, ReadingException {
Preconditions.checkState(choiceNode.getQName().equals(EDIT_CONTENT_QNAME), "Unexpected choice %s, expected %s", choiceNode, EDIT_CONTENT_QNAME);
final ChoiceCaseNode selectedCase = choiceNode.getCaseNodeByName(CONFIG_QNAME);
Preconditions.checkNotNull(selectedCase, "Unexpected choice %s, expected %s that contains %s", choiceNode, EDIT_CONTENT_QNAME, CONFIG_QNAME);
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
public static final String FILTER_TYPE_VALUE_DEFAULT = "subtree";
@Override
- protected List<Node<?>> readWithContext(final DataSchemaNode schemaNode) throws IOException, ReadingException {
+ protected List<NormalizedNode<?, ?>> readWithContext(final DataSchemaNode schemaNode) throws IOException, ReadingException {
boolean redSuccessfuly = false;
- Node<?> newNode = null;
+ DataContainerChild<?, ?> newNode = null;
do {
console.writeLn("Filter " + schemaNode.getQName().getLocalName());
console.writeLn("Submit path of the data to retrieve. Use TAB for autocomplete");
filterPartsQNames.add(qName);
}
- Node<?> previous = null;
+ DataContainerChild<?, ?> previous = null;
for (final QName qName : Lists.reverse(filterPartsQNames)) {
- previous = new CompositeNodeTOImpl(qName, null,
- previous == null ? Collections.<Node<?>> emptyList()
- : Collections.<Node<?>> singletonList(previous));
+ previous = ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(qName))
+ .withValue(previous == null ? Collections.<DataContainerChild<?, ?>>emptyList()
+ : Collections.<DataContainerChild<?, ?>>singletonList(previous)).build();
}
final Map<QName, String> attributes = Collections.singletonMap(FILTER_TYPE_QNAME,
FILTER_TYPE_VALUE_DEFAULT);
- newNode = previous == null ? null : ImmutableCompositeNode.create(schemaNode.getQName(), attributes,
- Collections.<Node<?>> singletonList(previous));
+ newNode = previous == null ? null : ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(schemaNode.getQName())).withChild(previous).build();
redSuccessfuly = true;
} catch (final ReadingException e) {
final String message = "Specified filter path isn't correct.";
console.writeLn(message);
}
} while (!redSuccessfuly);
- return Collections.<Node<?>> singletonList(newNode);
+ return Collections.<NormalizedNode<?, ?>> singletonList(newNode);
}
@Override
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
-import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlDocumentUtils;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.w3c.dom.Document;
}
@Override
- protected List<Node<?>> readWithContext(final AnyXmlSchemaNode schemaNode) throws IOException, ReadingException {
+ protected List<NormalizedNode<?, ?>> readWithContext(final AnyXmlSchemaNode schemaNode) throws IOException, ReadingException {
console.writeLn(listType(schemaNode) + " " + schemaNode.getQName().getLocalName());
final String rawValue = console.read();
- Node<?> newNode = null;
+ DataContainerChild<?, ?> newNode = null;
if (!isSkipInput(rawValue)) {
- final Optional<Node<?>> value = tryParse(rawValue);
+ final Optional<DataContainerChild<?, ?>> value = tryParse(rawValue, schemaNode);
if (value.isPresent()) {
- newNode = NodeFactory.createImmutableCompositeNode(schemaNode.getQName(), null,
- Collections.<Node<?>> singletonList(value.get()));
+ newNode = ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(schemaNode.getQName()))
+ .withChild(value.get()).build();
} else {
- newNode = NodeFactory.createImmutableSimpleNode(schemaNode.getQName(), null, rawValue);
+ newNode = ImmutableLeafNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(schemaNode.getQName())).withValue(rawValue).build();
}
}
- final List<Node<?>> newNodes = new ArrayList<>();
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
newNodes.add(newNode);
return newNodes;
}
- private Optional<Node<?>> tryParse(final String rawValue) {
+ private Optional<DataContainerChild<?, ?>> tryParse(final String rawValue, final AnyXmlSchemaNode schemaNode) {
try {
final Document dom = XmlUtil.readXmlToDocument(rawValue);
- return Optional.<Node<?>> of(XmlDocumentUtils.toDomNode(dom));
+ return Optional.<DataContainerChild<?, ?>> of(
+ DomToNormalizedNodeParserFactory.
+ getInstance(DomUtils.defaultValueCodecProvider(), getSchemaContext()).
+ getAnyXmlNodeParser().
+ parse(Collections.singletonList(dom.getDocumentElement()), schemaNode)
+ );
} catch (SAXException | IOException e) {
// TODO log
return Optional.absent();
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.codec.TypeDefinitionAwareCodec;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
}
@Override
- public List<Node<?>> readWithContext(final T schemaNode) throws IOException, ReadingException {
+ public List<NormalizedNode<?, ?>> readWithContext(final T schemaNode) throws IOException, ReadingException {
TypeDefinition<?> type = getType(schemaNode);
console.formatLn("Submit %s %s(%s)", listType(schemaNode), schemaNode.getQName().getLocalName(), type.getQName().getLocalName());
return wrapValue(schemaNode, resolvedValue);
}
- private List<Node<?>> postSkipOperations(final DataSchemaNode schemaNode) throws IOException {
+ private List<NormalizedNode<?, ?>> postSkipOperations(final DataSchemaNode schemaNode) throws IOException {
console.formatLn("Skipping %s", schemaNode.getQName());
return Collections.emptyList();
}
return console.read();
}
- private List<Node<?>> wrapValue(final T schemaNode, final Object value) {
- final Node<?> newNode = NodeFactory.createImmutableSimpleNode(schemaNode.getQName(), null, value);
- return Collections.<Node<?>> singletonList(newNode);
+ private List<NormalizedNode<?, ?>> wrapValue(final T schemaNode, final Object value) {
+ final NormalizedNode<?, ?> newNode = ImmutableLeafNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(schemaNode.getQName()))
+ .withValue(value).build();
+ return Collections.<NormalizedNode<?, ?>>singletonList(newNode);
}
protected abstract TypeDefinition<?> getType(final T schemaNode);
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableChoiceNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
}
@Override
- public List<Node<?>> readWithContext(final ChoiceSchemaNode choiceNode) throws IOException, ReadingException {
+ public List<NormalizedNode<?, ?>> readWithContext(final ChoiceSchemaNode choiceNode) throws IOException, ReadingException {
final Map<String, ChoiceCaseNode> availableCases = collectAllCases(choiceNode);
console.formatLn("Select case for choice %s from: %s", choiceNode.getQName().getLocalName(),
formatSet(availableCases.keySet()));
throw new ReadingException(message);
}
- return readSelectedCase(selectedCase);
+ return Collections.<NormalizedNode<?, ?>>singletonList(
+ ImmutableChoiceNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(choiceNode.getQName()))
+ .withValue(((Collection) readSelectedCase(selectedCase))).build());
}
- protected List<Node<?>> readSelectedCase(final ChoiceCaseNode selectedCase) throws ReadingException {
+ protected List<NormalizedNode<?, ?>> readSelectedCase(final ChoiceCaseNode selectedCase) throws ReadingException {
// IF there is a case that contains only one Empty type leaf, create the
// leaf without question, since the case was selected
if (containsOnlyOneEmptyLeaf(selectedCase)) {
- final Node<?> newNode = NodeFactory.createImmutableSimpleNode(selectedCase.getChildNodes().iterator()
- .next().getQName(), null, null);
- return Collections.<Node<?>> singletonList(newNode);
+ final NormalizedNode<?, ?> newNode = ImmutableLeafNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(selectedCase.getChildNodes().iterator().next().getQName())).build();
+ return Collections.<NormalizedNode<?, ?>>singletonList(newNode);
}
- final List<Node<?>> newNodes = new ArrayList<>();
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
for (final DataSchemaNode schemaNode : selectedCase.getChildNodes()) {
newNodes.addAll(argumentHandlerRegistry.getGenericReader(getSchemaContext(), getReadConfigNode()).read(
schemaNode));
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
}
@Override
- public List<Node<?>> readWithContext(final ContainerSchemaNode containerNode) throws IOException, ReadingException {
+ public List<NormalizedNode<?, ?>> readWithContext(final ContainerSchemaNode containerNode) throws IOException, ReadingException {
console.formatLn("Submit child nodes for container: %s, %s", containerNode.getQName().getLocalName(),
Collections2.transform(containerNode.getChildNodes(), new Function<DataSchemaNode, String>() {
@Override
return input.getQName().getLocalName();
}
}));
+ final DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> builder = ImmutableContainerNodeBuilder.create();
+ builder.withNodeIdentifier(new NodeIdentifier(containerNode.getQName()));
- final CompositeNodeBuilder<ImmutableCompositeNode> compositeNodeBuilder = ImmutableCompositeNode.builder();
- compositeNodeBuilder.setQName(containerNode.getQName());
+ final ArrayList<NormalizedNode<?, ?>> nodesToAdd = new ArrayList<>();
final SeparatedNodes separatedNodes = SeparatedNodes.separateNodes(containerNode, getReadConfigNode());
for (final DataSchemaNode childNode : sortChildren(separatedNodes.getMandatoryNotKey())) {
- final List<Node<?>> redNodes = argumentHandlerRegistry.getGenericReader(getSchemaContext(),
+ final List<NormalizedNode<?, ?>> redNodes = argumentHandlerRegistry.getGenericReader(getSchemaContext(),
getReadConfigNode()).read(childNode);
if (redNodes.isEmpty()) {
console.formatLn("No data specified for mandatory element %s.", childNode.getQName().getLocalName());
return Collections.emptyList();
} else {
- compositeNodeBuilder.addAll(redNodes);
+ nodesToAdd.addAll(redNodes);
}
}
for (final DataSchemaNode childNode : sortChildren(separatedNodes.getOthers())) {
- compositeNodeBuilder.addAll(argumentHandlerRegistry.getGenericReader(getSchemaContext(),
+ nodesToAdd.addAll(argumentHandlerRegistry.getGenericReader(getSchemaContext(),
getReadConfigNode()).read(childNode));
}
- return Collections.<Node<?>> singletonList(compositeNodeBuilder.toInstance());
+ return Collections.<NormalizedNode<?, ?>> singletonList(builder.withValue((ArrayList) nodesToAdd).build());
}
private List<DataSchemaNode> sortChildren(final Set<DataSchemaNode> unsortedNodes) {
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.GenericListEntryReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
}
@Override
- public List<Node<?>> readWithContext(final T schemaNode) throws IOException, ReadingException {
- final List<Node<?>> newNodes = new ArrayList<>();
+ public List<NormalizedNode<?, ?>> readWithContext(final T schemaNode) throws IOException, ReadingException {
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
Optional<Boolean> readNextListEntry = Optional.of(Boolean.TRUE);
console.formatLn("Reading collection type argument: %s", schemaNode.getQName().getLocalName());
while (readNextListEntry.isPresent() && readNextListEntry.get()) {
import org.opendaylight.controller.netconf.cli.reader.Reader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
}
@Override
- protected List<Node<?>> readWithContext(final DataSchemaNode schemaNode) throws IOException, ReadingException {
+ protected List<NormalizedNode<?, ?>> readWithContext(final DataSchemaNode schemaNode) throws IOException, ReadingException {
final Optional<Class<? extends Reader<DataSchemaNode>>> customReaderClassOpt = tryGetCustomHandler(schemaNode);
if (customReaderClassOpt.isPresent()) {
// TODO reuse instances
}
- private List<Node<?>> readGeneric(final DataSchemaNode schemaNode) throws ReadingException, IOException {
- final List<Node<?>> newNodes = new ArrayList<>();
+ private List<NormalizedNode<?, ?>> readGeneric(final DataSchemaNode schemaNode) throws ReadingException, IOException {
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
boolean isRedCorrectly = false;
do {
try {
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.opendaylight.controller.netconf.cli.CommandArgHandlerRegistry;
import org.opendaylight.controller.netconf.cli.io.BaseConsoleContext;
import org.opendaylight.controller.netconf.cli.reader.AbstractReader;
import org.opendaylight.controller.netconf.cli.reader.GenericListEntryReader;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
}
@Override
- public List<Node<?>> readWithContext(final ListSchemaNode listNode) throws IOException, ReadingException {
+ public List<NormalizedNode<?, ?>> readWithContext(final ListSchemaNode listNode) throws IOException, ReadingException {
console.formatLn("Submit child nodes for list entry: %s, %s", listNode.getQName().getLocalName(),
Collections2.transform(listNode.getChildNodes(), new Function<DataSchemaNode, String>() {
@Override
}));
final String listName = listNode.getQName().getLocalName();
- final CompositeNodeBuilder<ImmutableCompositeNode> compositeNodeBuilder = ImmutableCompositeNode.builder();
- compositeNodeBuilder.setQName(listNode.getQName());
+
+ final DataContainerNodeAttrBuilder<NodeIdentifierWithPredicates, MapEntryNode> builder =
+ ImmutableMapEntryNodeBuilder.create();
+// final CompositeNodeBuilder<ImmutableCompositeNode> compositeNodeBuilder = ImmutableCompositeNode.builder();
+// compositeNodeBuilder.setQName(listNode.getQName());
final SeparatedNodes separatedChildNodes = SeparatedNodes.separateNodes(listNode, getReadConfigNode());
- final List<Node<?>> nodes = readKeys(separatedChildNodes.getKeyNodes());
+ final List<NormalizedNode<?, ?>> nodes = readKeys(separatedChildNodes.getKeyNodes());
+ final Map<QName, Object> qnameToValues = new HashMap<>();
+ for (NormalizedNode node : nodes) {
+ qnameToValues.put(node.getNodeType(), node.getValue());
+ }
+ builder.withNodeIdentifier(new NodeIdentifierWithPredicates(listNode.getQName(), qnameToValues));
+
nodes.addAll(readMandatoryNotKeys(separatedChildNodes.getMandatoryNotKey()));
if (!separatedChildNodes.getOthers().isEmpty()) {
final Optional<Boolean> readNodesWhichAreNotKey = new DecisionReader().read(console,
}
if (!nodes.isEmpty()) {
- compositeNodeBuilder.addAll(nodes);
- return Collections.<Node<?>> singletonList(compositeNodeBuilder.toInstance());
+// compositeNodeBuilder.addAll(nodes);
+ builder.withValue((List) nodes);
+ return Collections.<NormalizedNode<?, ?>>singletonList(
+ ImmutableMapNodeBuilder.create()
+ .withNodeIdentifier(new NodeIdentifier(listNode.getQName()))
+ .withChild(builder.build()).build());
+// return Collections.<DataContainerChild<?, ?>> singletonList(compositeNodeBuilder.toInstance());
} else {
return Collections.emptyList();
}
}
- private List<Node<?>> readKeys(final Set<DataSchemaNode> keys) throws ReadingException, IOException {
- final List<Node<?>> newNodes = new ArrayList<>();
+ private List<NormalizedNode<?, ?>> readKeys(final Set<DataSchemaNode> keys) throws ReadingException, IOException {
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
console.writeLn("Reading keys:");
for (final DataSchemaNode key : keys) {
- final List<Node<?>> readKey = new LeafReader(console, getSchemaContext(), getReadConfigNode())
+ final List<NormalizedNode<?, ?>> readKey = new LeafReader(console, getSchemaContext(), getReadConfigNode())
.read((LeafSchemaNode) key);
if (readKey.size() != 1) {
final String message = String.format(
}
newNodes.addAll(readKey);
}
+
return newNodes;
}
- private List<Node<?>> readMandatoryNotKeys(final Set<DataSchemaNode> mandatoryNotKeys) throws ReadingException,
+ private List<NormalizedNode<?, ?>> readMandatoryNotKeys(final Set<DataSchemaNode> mandatoryNotKeys) throws ReadingException,
IOException {
- final List<Node<?>> newNodes = new ArrayList<>();
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
console.writeLn("Reading mandatory not keys nodes:");
for (final DataSchemaNode mandatoryNode : mandatoryNotKeys) {
- final List<Node<?>> redValue = argumentHandlerRegistry.getGenericReader(getSchemaContext(),
+ final List<NormalizedNode<?, ?>> redValue = argumentHandlerRegistry.getGenericReader(getSchemaContext(),
getReadConfigNode()).read(mandatoryNode);
if (redValue.isEmpty()) {
final String message = String.format(
return newNodes;
}
- private List<Node<?>> readNotKeys(final Set<DataSchemaNode> notKeys) throws ReadingException {
- final List<Node<?>> newNodes = new ArrayList<>();
+ private List<NormalizedNode<?, ?>> readNotKeys(final Set<DataSchemaNode> notKeys) throws ReadingException {
+ final List<NormalizedNode<?, ?>> newNodes = new ArrayList<>();
for (final DataSchemaNode notKey : notKeys) {
newNodes.addAll(argumentHandlerRegistry.getGenericReader(getSchemaContext(), getReadConfigNode()).read(
notKey));
package org.opendaylight.controller.netconf.cli.writer;
import java.util.List;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
/**
*/
public interface Writer<T extends DataSchemaNode> {
- void write(T dataSchemaNode, List<Node<?>> dataNodes) throws WriteException;
+ void write(T dataSchemaNode, List<NormalizedNode<?, ?>> dataNodes) throws WriteException;
}
import org.opendaylight.controller.netconf.cli.writer.WriteException;
import org.opendaylight.controller.netconf.cli.writer.impl.AbstractWriter;
import org.opendaylight.controller.netconf.cli.writer.impl.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlDocumentUtils;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
}
@Override
- protected void writeInner(final DataSchemaNode dataSchemaNode, final List<Node<?>> dataNodes) throws IOException, WriteException {
+ protected void writeInner(final DataSchemaNode dataSchemaNode, final List<NormalizedNode<?, ?>> dataNodes) throws IOException, WriteException {
Preconditions.checkArgument(dataNodes.size() == 1, "Expected only 1 element for data node");
- final Node<?> dataNode = dataNodes.get(0);
- Preconditions.checkArgument(dataNode instanceof CompositeNode, "Unexpected node type: %s, should be %s", dataNode, CompositeNode.class);
+ final NormalizedNode<?, ?> dataNode = dataNodes.get(0);
+ Preconditions.checkArgument(dataNode instanceof ContainerNode, "Unexpected node type: %s, should be %s", dataNode, ContainerNode.class);
StringBuilder output = new StringBuilder();
out.increaseIndent().addStringWithIndent(output, dataSchemaNode.getQName().getLocalName()).openComposite(output);
console.writeLn(output.toString());
- for (final Node<?> childNode : ((CompositeNode) dataNode).getValue()) {
+ for (final Object oChildNode : ((DataContainerNode) dataNode).getValue()) {
+ final NormalizedNode<?, ?> childNode = (NormalizedNode<?, ?>) oChildNode;
final Optional<DataSchemaNode> schemaNode = XmlDocumentUtils.findFirstSchema(childNode.getNodeType(), remoteSchemaContext.getDataDefinitions());
Preconditions.checkState(schemaNode.isPresent(), "Unknown data node %s, not defined in schema", childNode.getNodeType());
- new NormalizedNodeWriter(console, out).write(schemaNode.get(), Collections.<Node<?>>singletonList(childNode));
+ new NormalizedNodeWriter(console, out).write(schemaNode.get(), Collections.<NormalizedNode<?, ?>>singletonList(childNode));
}
output = new StringBuilder();
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.netconf.cli.writer.WriteException;
import org.opendaylight.controller.netconf.cli.writer.Writer;
-import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
public abstract class AbstractWriter<T extends DataSchemaNode> implements Writer<T> {
}
@Override
- public void write(final T dataSchemaNode, final List<Node<?>> dataNodes) throws WriteException {
+ public void write(final T dataSchemaNode, final List<NormalizedNode<?, ?>> dataNodes) throws WriteException {
try {
writeInner(dataSchemaNode, dataNodes);
} catch (final IOException e) {
}
}
- protected abstract void writeInner(final T dataSchemaNode, final List<Node<?>> dataNodes) throws IOException,
+ protected abstract void writeInner(final T dataSchemaNode, final List<NormalizedNode<?, ?>> dataNodes) throws IOException,
WriteException;
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.cli.writer.impl;
-
-import java.io.IOException;
-import java.util.List;
-import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
-import org.opendaylight.controller.netconf.cli.writer.OutFormatter;
-import org.opendaylight.controller.netconf.cli.writer.WriteException;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-
-public class CompositeNodeWriter extends AbstractWriter<DataSchemaNode> {
-
- private final OutFormatter outFormatter;
-
- public CompositeNodeWriter(final ConsoleIO console, final OutFormatter outFormatter) {
- super(console);
- this.outFormatter = outFormatter;
- }
-
- @Override
- protected void writeInner(final DataSchemaNode dataSchemaNode, final List<Node<?>> dataNodes) throws IOException, WriteException {
- final StringBuilder output = new StringBuilder();
- writeNode(dataNodes, output);
- console.writeLn(output);
- }
-
- private void writeNode(final List<Node<?>> dataNodes, final StringBuilder output) throws IOException, WriteException {
- for (final Node<?> dataNode : dataNodes) {
- outFormatter.increaseIndent();
- outFormatter.addStringWithIndent(output, dataNode.getNodeType().getLocalName());
- if (dataNode instanceof CompositeNode) {
- outFormatter.openComposite(output);
- outFormatter.newLine(output);
- writeNode(((CompositeNode) dataNode).getValue(), output);
- outFormatter.closeCompositeWithIndent(output);
- outFormatter.newLine(output);
- } else if (dataNode instanceof SimpleNode<?>) {
- final SimpleNode<?> simpleNode = (SimpleNode<?>) dataNode;
- output.append(" ");
- output.append(simpleNode.getValue());
- outFormatter.newLine(output);
- }
- outFormatter.decreaseIndent();
- }
- }
-}
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.netconf.cli.writer.OutFormatter;
import org.opendaylight.controller.netconf.cli.writer.WriteException;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.composite.node.schema.cnsn.parser.CnSnToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.base.serializer.NodeSerializerDispatcher;
import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
-import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
-import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
this.out = out;
}
- public void writeInner(final DataSchemaNode dataSchemaNode, final List<Node<?>> dataNodes) throws WriteException,
+ public void writeInner(final DataSchemaNode dataSchemaNode, final List<NormalizedNode<?, ?>> dataNodes) throws WriteException,
IOException {
-
+ //Preconditions.checkState(dataNodes.size() == 1);
// TODO - add getDispatcher method to CnSnToNormalizedNodeParserFactory
// to be able call dispatchChildElement
- final DataContainerChild<? extends PathArgument, ?> dataContainerChild = parseToNormalizedNode(dataNodes,
- dataSchemaNode);
+ final NormalizedNode<?, ?> dataContainerChild = dataNodes.get(0);
if (dataContainerChild != null) {
console.writeLn(serializeToCliOutput(dataContainerChild, dataSchemaNode));
}
- private String serializeToCliOutput(final DataContainerChild<? extends PathArgument, ?> dataContainerChild,
+ private String serializeToCliOutput(final NormalizedNode<?, ?> dataContainerChild,
final DataSchemaNode childSchema) {
final CliOutputFromNormalizedNodeSerializerFactory factorySerialization = CliOutputFromNormalizedNodeSerializerFactory
.getInstance(out, DomUtils.defaultValueCodecProvider());
final NodeSerializerDispatcher<String> dispatcher = factorySerialization.getDispatcher();
- final Iterable<String> result = dispatcher.dispatchChildElement(childSchema, dataContainerChild);
+ final Iterable<String> result = dispatcher.dispatchChildElement(childSchema, (DataContainerChild<?, ?>) dataContainerChild);
if (result == null) {
return "";
return output.next();
}
- private DataContainerChild<? extends PathArgument, ?> parseToNormalizedNode(final List<Node<?>> dataNodes,
- final DataSchemaNode dataSchemaNode) {
- final CnSnToNormalizedNodeParserFactory factoryParsing = CnSnToNormalizedNodeParserFactory.getInstance();
- if (dataSchemaNode instanceof ContainerSchemaNode) {
- return factoryParsing.getContainerNodeParser().parse(dataNodes, (ContainerSchemaNode) dataSchemaNode);
- } else if (dataSchemaNode instanceof LeafSchemaNode) {
- return factoryParsing.getLeafNodeParser().parse(dataNodes, (LeafSchemaNode) dataSchemaNode);
- } else if (dataSchemaNode instanceof LeafListSchemaNode) {
- return factoryParsing.getLeafSetNodeParser().parse(dataNodes, (LeafListSchemaNode) dataSchemaNode);
- } else if (dataSchemaNode instanceof ListSchemaNode) {
- return factoryParsing.getMapNodeParser().parse(dataNodes, (ListSchemaNode) dataSchemaNode);
- } else if (dataSchemaNode instanceof ChoiceSchemaNode) {
- return factoryParsing.getChoiceNodeParser().parse(dataNodes, (ChoiceSchemaNode) dataSchemaNode);
- } else if (dataSchemaNode instanceof AugmentationSchema) {
- return factoryParsing.getAugmentationNodeParser().parse(dataNodes, (AugmentationSchema) dataSchemaNode);
- }
- return null;
- }
-
}
*/
package org.opendaylight.controller.netconf.cli;
-import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import static org.opendaylight.controller.netconf.cli.io.IOUtil.PROMPT_SUFIX;
import java.io.File;
import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.controller.netconf.cli.reader.ReadingException;
-import org.opendaylight.controller.netconf.cli.reader.impl.GenericReader;
-import org.opendaylight.controller.netconf.cli.writer.OutFormatter;
import org.opendaylight.controller.netconf.cli.writer.WriteException;
-import org.opendaylight.controller.netconf.cli.writer.impl.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.parser.api.YangContextParser;
final ConsoleIOTestImpl console = new ConsoleIOTestImpl(values, valuesForMessages);
- final List<Node<?>> redData = new GenericReader(console, new CommandArgHandlerRegistry(console,
- new SchemaContextRegistry(schemaContext)), schemaContext).read(cont1);
- assertNotNull(redData);
- assertEquals(1, redData.size());
+// final List<Node<?>> redData = new GenericReader(console, new CommandArgHandlerRegistry(console,
+// new SchemaContextRegistry(schemaContext)), schemaContext).read(cont1);
+// assertNotNull(redData);
+// assertEquals(1, redData.size());
+//
+// assertTrue(redData.get(0) instanceof CompositeNode);
+// final CompositeNode redTopLevelNode = (CompositeNode) redData.get(0);
- assertTrue(redData.get(0) instanceof CompositeNode);
- final CompositeNode redTopLevelNode = (CompositeNode) redData.get(0);
-
- new NormalizedNodeWriter(console, new OutFormatter()).write(cont1, redData);
+ //new NormalizedNodeWriter(console, new OutFormatter()).write(cont1, redData);
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-client</artifactId>
<packaging>bundle</packaging>
this.timer = timer;
}
+ protected Timer getTimer() {
+ return timer;
+ }
+
@Override
public Future<NetconfClientSession> createClient(final NetconfClientConfiguration clientConfiguration) {
switch (clientConfiguration.getProtocol()) {
package org.opendaylight.controller.netconf.client;
-import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
private static final XPathExpression sessionIdXPath = XMLNetconfUtil
.compileXPath("/netconf:hello/netconf:session-id");
+ private static final XPathExpression sessionIdXPathNoNamespace = XMLNetconfUtil
+ .compileXPath("/hello/session-id");
+
private static final String EXI_1_0_CAPABILITY_MARKER = "exi:1.0";
protected NetconfClientSessionNegotiator(final NetconfClientSessionPreferences sessionPreferences,
}
private long extractSessionId(final Document doc) {
- final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
- Preconditions.checkState(sessionIdNode != null, "");
- String textContent = sessionIdNode.getTextContent();
- if (textContent == null || textContent.equals("")) {
- throw new IllegalStateException("Session id not received from server");
+ String textContent = getSessionIdWithXPath(doc, sessionIdXPath);
+ if (Strings.isNullOrEmpty(textContent)) {
+ textContent = getSessionIdWithXPath(doc, sessionIdXPathNoNamespace);
+ if (Strings.isNullOrEmpty(textContent)) {
+ throw new IllegalStateException("Session id not received from server, hello message: " + XmlUtil.toString(doc));
+ }
}
return Long.valueOf(textContent);
}
+ private String getSessionIdWithXPath(final Document doc, final XPathExpression sessionIdXPath) {
+ final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+ return sessionIdNode != null ? sessionIdNode.getTextContent() : null;
+ }
+
@Override
protected NetconfClientSession getSession(final NetconfClientSessionListener sessionListener, final Channel channel,
final NetconfHelloMessage message) throws NetconfDocumentedException {
public class NetconfClientSessionNegotiatorFactory implements SessionNegotiatorFactory<NetconfMessage, NetconfClientSession, NetconfClientSessionListener> {
- public static final Set<String> CLIENT_CAPABILITIES = ImmutableSet.of(
+ public static final Set<String> EXI_CLIENT_CAPABILITIES = ImmutableSet.of(
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1,
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_CAPABILITY_EXI_1_0);
+ public static final Set<String> LEGACY_EXI_CLIENT_CAPABILITIES = ImmutableSet.of(
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_CAPABILITY_EXI_1_0);
+
+ public static final Set<String> DEFAULT_CLIENT_CAPABILITIES = ImmutableSet.of(
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
+
+ public static final Set<String> LEGACY_FRAMING_CLIENT_CAPABILITIES = ImmutableSet.of(
+ XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0);
+
private static final Logger LOG = LoggerFactory.getLogger(NetconfClientSessionNegotiatorFactory.class);
private static final String START_EXI_MESSAGE_ID = "default-start-exi";
private static final EXIOptions DEFAULT_OPTIONS;
DEFAULT_OPTIONS = opts;
}
+ private final Set<String> clientCapabilities;
+
public NetconfClientSessionNegotiatorFactory(final Timer timer,
final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
final long connectionTimeoutMillis) {
this(timer, additionalHeader, connectionTimeoutMillis, DEFAULT_OPTIONS);
}
+ public NetconfClientSessionNegotiatorFactory(final Timer timer,
+ final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
+ final long connectionTimeoutMillis, final Set<String> capabilities) {
+ this(timer, additionalHeader, connectionTimeoutMillis, DEFAULT_OPTIONS, capabilities);
+
+ }
+
public NetconfClientSessionNegotiatorFactory(final Timer timer,
final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
final long connectionTimeoutMillis, final EXIOptions exiOptions) {
+ this(timer, additionalHeader, connectionTimeoutMillis, exiOptions, EXI_CLIENT_CAPABILITIES);
+ }
+
+ public NetconfClientSessionNegotiatorFactory(final Timer timer,
+ final Optional<NetconfHelloMessageAdditionalHeader> additionalHeader,
+ final long connectionTimeoutMillis, final EXIOptions exiOptions, final Set<String> capabilities) {
this.timer = Preconditions.checkNotNull(timer);
this.additionalHeader = additionalHeader;
this.connectionTimeoutMillis = connectionTimeoutMillis;
this.options = exiOptions;
+ this.clientCapabilities = capabilities;
}
@Override
NetconfMessage startExiMessage = NetconfStartExiMessage.create(options, START_EXI_MESSAGE_ID);
NetconfHelloMessage helloMessage = null;
try {
- helloMessage = NetconfHelloMessage.createClientHello(CLIENT_CAPABILITIES, additionalHeader);
+ helloMessage = NetconfHelloMessage.createClientHello(clientCapabilities, additionalHeader);
} catch (NetconfDocumentedException e) {
- LOG.error("Unable to create client hello message with capabilities {} and additional handler {}",CLIENT_CAPABILITIES,additionalHeader);
+ LOG.error("Unable to create client hello message with capabilities {} and additional handler {}", clientCapabilities,additionalHeader);
throw new IllegalStateException(e);
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-config</artifactId>
<description>Configuration files for netconf</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-connector-config</artifactId>
<description>Configuration files for netconf-connector</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-impl</artifactId>
<packaging>bundle</packaging>
import com.google.common.base.Preconditions;
import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelFutureListener;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.MessageToByteEncoder;
import java.text.SimpleDateFormat;
private Date loginTime;
private long inRpcSuccess, inRpcFail, outRpcError;
+ private volatile boolean delayedClose;
public NetconfServerSession(final NetconfServerSessionListener sessionListener, final Channel channel, final long sessionId,
final NetconfHelloMessageAdditionalHeader header) {
super.sessionUp();
}
+ /**
+ * Close this session after next message is sent.
+ * Suitable for close rpc that needs to send ok response before the session is closed.
+ */
+ public void delayedClose() {
+ this.delayedClose = true;
+ }
+
+ @Override
+ public ChannelFuture sendMessage(final NetconfMessage netconfMessage) {
+ final ChannelFuture channelFuture = super.sendMessage(netconfMessage);
+ // delayed close was set, close after the message was sent
+ if(delayedClose) {
+ channelFuture.addListener(new ChannelFutureListener() {
+ @Override
+ public void operationComplete(final ChannelFuture future) throws Exception {
+ close();
+ }
+ });
+ }
+ return channelFuture;
+ }
+
public void onIncommingRpcSuccess() {
inRpcSuccess++;
}
import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.controller.netconf.util.messages.SendErrorExceptionUtil;
-import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
session);
LOG.debug("Responding with message {}", message);
session.sendMessage(message);
-
- if (isCloseSession(netconfMessage)) {
- closeNetconfSession(session);
- }
-
} catch (final RuntimeException e) {
// TODO: should send generic error or close session?
LOG.error("Unexpected exception", e);
}
}
- private void closeNetconfSession(final NetconfServerSession session) {
- // destroy NetconfOperationService
- session.close();
- LOG.info("Session {} closed successfully", session.getSessionId());
- }
-
-
-
private NetconfMessage processDocument(final NetconfMessage netconfMessage, final NetconfServerSession session)
throws NetconfDocumentedException {
ImmutableMap.of(NetconfDocumentedException.ErrorTag.missing_attribute.toString(),
XmlNetconfConstants.MESSAGE_ID));
}
-
- private static boolean isCloseSession(final NetconfMessage incomingDocument) {
- final Document document = incomingDocument.getDocument();
- XmlElement rpcElement = XmlElement.fromDomDocument(document);
- if (rpcElement.getOnlyChildElementOptionally(DefaultCloseSession.CLOSE_SESSION).isPresent()) {
- return true;
- }
-
- return false;
- }
}
private static final Logger LOG = LoggerFactory.getLogger(SubtreeFilter.class);
static Document applySubtreeFilter(Document requestDocument, Document rpcReply) throws NetconfDocumentedException {
- // FIXME: rpcReply document must be reread otherwise some nodes do not inherit namespaces. (services/service)
- try {
- rpcReply = XmlUtil.readXmlToDocument(XmlUtil.toString(rpcReply, true));
- } catch (SAXException | IOException e) {
- LOG.error("Cannot transform document", e);
- throw new NetconfDocumentedException("Cannot transform document");
- }
-
OperationNameAndNamespace operationNameAndNamespace = new OperationNameAndNamespace(requestDocument);
if (XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0.equals(operationNameAndNamespace.getNamespace()) &&
XmlNetconfConstants.GET.equals(operationNameAndNamespace.getOperationName()) ||
// not implement filtering.
Optional<XmlElement> maybeFilter = operationNameAndNamespace.getOperationElement().getOnlyChildElementOptionally(
XmlNetconfConstants.FILTER, XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
- if (maybeFilter.isPresent() && (
- "subtree".equals(maybeFilter.get().getAttribute("type"))||
- "subtree".equals(maybeFilter.get().getAttribute("type", XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0)))
- ) {
+ if (!maybeFilter.isPresent()) {
+ return rpcReply;
+ }
+ // FIXME: rpcReply document must be reread otherwise some nodes do not inherit namespaces. (services/service)
+ try {
+ rpcReply = XmlUtil.readXmlToDocument(XmlUtil.toString(rpcReply, true));
+ } catch (SAXException | IOException e) {
+ LOG.error("Cannot transform document", e);
+ throw new NetconfDocumentedException("Cannot transform document" + e);
+ }
+ XmlElement filter = maybeFilter.get();
+ if ("subtree".equals(filter.getAttribute("type"))||
+ "subtree".equals(filter.getAttribute("type", XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0))) {
// do
return filtered(maybeFilter.get(), rpcReply);
}
}
+
return rpcReply; // return identical document
}
package org.opendaylight.controller.netconf.impl.mapping.operations;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import java.util.Collections;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class DefaultCloseSession extends AbstractSingletonNetconfOperation {
+public class DefaultCloseSession extends AbstractSingletonNetconfOperation implements DefaultNetconfOperation {
+ private static final Logger LOG = LoggerFactory.getLogger(DefaultCloseSession.class);
+
public static final String CLOSE_SESSION = "close-session";
+
private final AutoCloseable sessionResources;
+ private NetconfServerSession session;
public DefaultCloseSession(String netconfSessionIdForReporting, AutoCloseable sessionResources) {
super(netconfSessionIdForReporting);
throws NetconfDocumentedException {
try {
sessionResources.close();
+ Preconditions.checkNotNull(session, "Session was not set").delayedClose();
+ LOG.info("Session {} closing", session.getSessionId());
} catch (Exception e) {
throw new NetconfDocumentedException("Unable to properly close session "
+ getNetconfSessionIdForReporting(), NetconfDocumentedException.ErrorType.application,
}
return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
}
+
+ @Override
+ public void setNetconfSession(final NetconfServerSession s) {
+ this.session = s;
+ }
}
package org.opendaylight.controller.netconf.impl.mapping.operations;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.NetconfServerSessionListener;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
public class DefaultCloseSessionTest {
+
@Test
public void testDefaultCloseSession() throws Exception {
AutoCloseable res = mock(AutoCloseable.class);
doNothing().when(res).close();
- DefaultCloseSession session = new DefaultCloseSession("", res);
+ DefaultCloseSession close = new DefaultCloseSession("", res);
Document doc = XmlUtil.newDocument();
XmlElement elem = XmlElement.fromDomElement(XmlUtil.readXmlToElement("<elem/>"));
- session.handleWithNoSubsequentOperations(doc, elem);
+ final Channel channel = mock(Channel.class);
+ doReturn("channel").when(channel).toString();
+ doReturn(mock(ChannelFuture.class)).when(channel).close();
+
+ final ChannelFuture sendFuture = mock(ChannelFuture.class);
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(final InvocationOnMock invocation) throws Throwable {
+ ((GenericFutureListener) invocation.getArguments()[0]).operationComplete(sendFuture);
+ return null;
+ }
+ }).when(sendFuture).addListener(any(GenericFutureListener.class));
+ doReturn(sendFuture).when(channel).writeAndFlush(anyObject());
+ final NetconfServerSessionListener listener = mock(NetconfServerSessionListener.class);
+ doNothing().when(listener).onSessionTerminated(any(NetconfServerSession.class), any(NetconfTerminationReason.class));
+ final NetconfServerSession session =
+ new NetconfServerSession(listener, channel, 1L,
+ NetconfHelloMessageAdditionalHeader.fromString("[netconf;10.12.0.102:48528;ssh;;;;;;]"));
+ close.setNetconfSession(session);
+ close.handleWithNoSubsequentOperations(doc, elem);
+ // Fake close response to trigger delayed close
+ session.sendMessage(new NetconfMessage(XmlUtil.readXmlToDocument("<rpc-reply message-id=\"101\"\n" +
+ "xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<ok/>\n" +
+ "</rpc-reply>")));
+ verify(channel).close();
+ verify(listener).onSessionTerminated(any(NetconfServerSession.class), any(NetconfTerminationReason.class));
}
@Test(expected = NetconfDocumentedException.class)
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-it</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-mapping-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-mdsal-config</artifactId>
<description>Configuration files for netconf for mdsal</description>
<name>yang-schema-service</name>
</root-schema-service>
<dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
- <name>inmemory-data-broker</name>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+ <name>dom-broker</name>
</dom-broker>
<mapper-aggregator xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-mapper-registry</type>
<password>admin</password>
</module>
+
+ <!--TCP endpoint for MD-SAL netconf server -->
+ <!--<module>-->
+ <!--<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:tcp">prefix:netconf-northbound-tcp</type>-->
+ <!--<name>netconf-mdsal-tcp-server</name>-->
+ <!--<dispatcher xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:tcp">-->
+ <!--<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound">prefix:netconf-server-dispatcher</type>-->
+ <!--<name>netconf-mdsal-server-dispatcher</name>-->
+ <!--</dispatcher>-->
+ <!--</module>-->
+
</modules>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper?module=netconf-mdsal-mapper&revision=2015-01-14</capability>
<capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring?module=netconf-mdsal-monitoring&revision=2015-02-18</capability>
<capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh?module=netconf-northbound-ssh&revision=2015-01-14</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:tcp?module=netconf-northbound-tcp&revision=2015-04-23</capability>
<capability>urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl?module=netconf-northbound-impl&revision=2015-01-12</capability>
<capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:scheduled?module=threadpool-impl-scheduled&revision=2013-12-01</capability>
</required-capabilities>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-monitoring</artifactId>
<packaging>bundle</packaging>
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public class GetSchema extends AbstractLastNetconfOperation {
+public class GetSchema extends AbstractSingletonNetconfOperation {
public static final String GET_SCHEMA = "get-schema";
public static final String IDENTIFIER = "identifier";
public static final String VERSION = "version";
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-netty-util</artifactId>
<packaging>bundle</packaging>
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.NetconfSessionListener;
import org.opendaylight.controller.netconf.api.NetconfSessionPreferences;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.nettyutil.handler.FramingMechanismHandlerFactory;
import org.opendaylight.controller.netconf.nettyutil.handler.NetconfChunkAggregator;
import org.opendaylight.controller.netconf.nettyutil.handler.NetconfMessageToXMLEncoder;
}
private boolean containsBase11Capability(final Document doc) {
- final NodeList nList = doc.getElementsByTagName("capability");
+ final NodeList nList = doc.getElementsByTagName(XmlNetconfConstants.CAPABILITY);
for (int i = 0; i < nList.getLength(); i++) {
- if (nList.item(i).getTextContent().contains("base:1.1")) {
+ if (nList.item(i).getTextContent().contains(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1)) {
return true;
}
}
}
ret.setOutputProperty(OutputKeys.INDENT, "yes");
- ret.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
+ ret.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "no");
return ret;
};
@Override
public synchronized void connect(final ChannelHandlerContext ctx, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise) throws Exception {
- LOG.debug("XXX session connecting on channel {}. promise: {} ", ctx.channel(), connectPromise);
+ LOG.debug("SSH session connecting on channel {}. promise: {} ", ctx.channel(), connectPromise);
this.connectPromise = promise;
startSsh(ctx, remoteAddress);
}
<parent>
<artifactId>netconf-subsystem</artifactId>
<groupId>org.opendaylight.controller</groupId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<packaging>bundle</packaging>
<parent>
<artifactId>netconf-subsystem</artifactId>
<groupId>org.opendaylight.controller</groupId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-ssh</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netty-config-api</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</dependency>
<dependency>
@AfterClass
public static void tearDown() throws Exception {
hashedWheelTimer.stop();
- nettyGroup.shutdownGracefully().await();
+ nettyGroup.shutdownGracefully().await(5, TimeUnit.SECONDS);
minaTimerEx.shutdownNow();
nioExec.shutdownNow();
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-tcp</artifactId>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-config-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</instructions>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
</plugins>
</build>
--- /dev/null
+package org.opendaylight.controller.config.yang.netconf.northbound.tcp;
+
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import org.opendaylight.controller.netconf.api.NetconfServerDispatcher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NetconfNorthboundTcpModule extends org.opendaylight.controller.config.yang.netconf.northbound.tcp.AbstractNetconfNorthboundTcpModule {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfNorthboundTcpModule.class);
+
+
+ public NetconfNorthboundTcpModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfNorthboundTcpModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.netconf.northbound.tcp.NetconfNorthboundTcpModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ final NetconfServerDispatcher dispatch = getDispatcherDependency();
+ final ChannelFuture tcpServer = dispatch.createServer(getInetAddress());
+
+ tcpServer.addListener(new GenericFutureListener<ChannelFuture>() {
+ @Override
+ public void operationComplete(ChannelFuture future) throws Exception {
+ if (future.isDone() && future.isSuccess()) {
+ LOG.info("Netconf TCP endpoint started successfully at {}", getInetAddress());
+ } else {
+ LOG.warn("Unable to start TCP netconf server at {}", getInetAddress(), future.cause());
+ throw new RuntimeException("Unable to start TCP netconf server", future.cause());
+ }
+ }
+ });
+
+ return new NetconfServerCloseable(tcpServer);
+ }
+
+ private InetSocketAddress getInetAddress() {
+ try {
+ final InetAddress inetAd = InetAddress.getByName(getBindingAddress().getIpv4Address() == null ? getBindingAddress().getIpv6Address().getValue() : getBindingAddress().getIpv4Address().getValue());
+ return new InetSocketAddress(inetAd, getPort().getValue());
+ } catch (final UnknownHostException e) {
+ throw new IllegalArgumentException("Unable to bind netconf endpoint to address " + getBindingAddress(), e);
+ }
+ }
+
+ private static final class NetconfServerCloseable implements AutoCloseable {
+ private final ChannelFuture localServer;
+
+ public NetconfServerCloseable(final ChannelFuture localServer) {
+ this.localServer = localServer;
+ }
+
+ @Override
+ public void close() throws Exception {
+ if(localServer.isDone()) {
+ localServer.channel().close();
+ } else {
+ localServer.cancel(true);
+ }
+ }
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-northbound-tcp yang module local name: netconf-northbound-tcp
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Thu Apr 23 16:34:55 CEST 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.netconf.northbound.tcp;
+public class NetconfNorthboundTcpModuleFactory extends org.opendaylight.controller.config.yang.netconf.northbound.tcp.AbstractNetconfNorthboundTcpModuleFactory {
+
+}
--- /dev/null
+module netconf-northbound-tcp {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:tcp";
+ prefix "nni";
+
+ import netconf-northbound-mapper { prefix nnm; revision-date 2015-01-14; }
+ import netconf-northbound { prefix nn; revision-date 2015-01-14; }
+ import config { prefix config; revision-date 2013-04-05; }
+ import threadpool {prefix th;}
+ import netty {prefix netty;}
+ import ietf-inet-types { prefix inet; revision-date 2010-09-24; }
+
+ organization "Cisco Systems, Inc.";
+
+ description
+ "This module contains the base YANG definitions for
+ a default implementation of netconf northbound tcp server";
+
+ revision "2015-04-23" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-northbound-tcp {
+ base config:module-type;
+ config:java-name-prefix NetconfNorthboundTcp;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-northbound-tcp {
+ when "/config:modules/config:module/config:type = 'netconf-northbound-tcp'";
+
+ leaf port {
+ type inet:port-number;
+ default 2831;
+ }
+
+ leaf binding-address {
+ type inet:ip-address;
+ default "0.0.0.0";
+ }
+
+ container dispatcher {
+ uses config:service-ref {
+ refine type {
+ config:required-identity nn:netconf-server-dispatcher;
+ }
+ }
+ }
+ }
+ }
+
+}
\ No newline at end of file
--- /dev/null
+<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+<module>
+<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl">prefix:threadfactory-naming</type>
+<name>name{MSG_ID}</name>
+<name-prefix xmlns="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl">remote-connector-processing-executor</name-prefix>
+</module>
+</modules>
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-testtool</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-connector-config</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>logback-config</artifactId>
<shadedClassifierName>executable</shadedClassifierName>
</configuration>
</execution>
+
+ <execution>
+ <id>stress-client</id>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <shadedArtifactId>stress-client</shadedArtifactId>
+ <filters>
+ <filter>
+ <artifact>*:*</artifact>
+ <excludes>
+ <exclude>META-INF/*.SF</exclude>
+ <exclude>META-INF/*.DSA</exclude>
+ <exclude>META-INF/*.RSA</exclude>
+ </excludes>
+ </filter>
+ </filters>
+ <transformers>
+ <transformer
+ implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+ <mainClass>org.opendaylight.controller.netconf.test.tool.client.stress.StressClient</mainClass>
+ </transformer>
+ </transformers>
+ <shadedArtifactAttached>true</shadedArtifactAttached>
+ <shadedClassifierName>executable</shadedClassifierName>
+ </configuration>
+ </execution>
</executions>
</plugin>
</plugins>
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.NavigableMap;
import java.util.Set;
-import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
return input.getKey().getAST();
}
});
- final Map<String, TreeMap<Date, URI>> namespaceContext = BuilderUtils.createYangNamespaceContext(
+ final Map<String, NavigableMap<Date, URI>> namespaceContext = BuilderUtils.createYangNamespaceContext(
asts.values(), Optional.<SchemaContext>absent());
final ParseTreeWalker walker = new ParseTreeWalker();
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class AsyncExecutionStrategy implements ExecutionStrategy {
+ private static final Logger LOG = LoggerFactory.getLogger(AsyncExecutionStrategy.class);
+
+ private final Parameters params;
+ private final List<NetconfMessage> preparedMessages;
+ private final NetconfDeviceCommunicator sessionListener;
+ private final List<Integer> editBatches;
+ private final int editAmount;
+
+ public AsyncExecutionStrategy(final Parameters params, final List<NetconfMessage> editConfigMsgs, final NetconfDeviceCommunicator sessionListener) {
+ this.params = params;
+ this.preparedMessages = editConfigMsgs;
+ this.sessionListener = sessionListener;
+ this.editBatches = countEditBatchSizes(params, editConfigMsgs.size());
+ editAmount = editConfigMsgs.size();
+ }
+
+ private static List<Integer> countEditBatchSizes(final Parameters params, final int amount) {
+ final List<Integer> editBatches = Lists.newArrayList();
+ if (params.editBatchSize != amount) {
+ final int fullBatches = amount / params.editBatchSize;
+ for (int i = 0; i < fullBatches; i++) {
+ editBatches.add(params.editBatchSize);
+ }
+
+ if (amount % params.editBatchSize != 0) {
+ editBatches.add(amount % params.editBatchSize);
+ }
+ } else {
+ editBatches.add(params.editBatchSize);
+ }
+ return editBatches;
+ }
+
+ @Override
+ public void invoke() {
+ final AtomicInteger responseCounter = new AtomicInteger(0);
+ final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
+
+ int batchI = 0;
+ for (final Integer editBatch : editBatches) {
+ for (int i = 0; i < editBatch; i++) {
+ final int msgId = i + (batchI * params.editBatchSize);
+ final NetconfMessage msg = preparedMessages.get(msgId);
+ LOG.debug("Sending message {}", msgId);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending message {}", XmlUtil.toString(msg.getDocument()));
+ }
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture =
+ sessionListener.sendRequest(msg, StressClient.EDIT_QNAME);
+ futures.add(netconfMessageFuture);
+ }
+ batchI++;
+ LOG.info("Batch {} with size {} sent. Committing", batchI, editBatch);
+ futures.add(sessionListener.sendRequest(StressClient.COMMIT_MSG, StressClient.COMMIT_QNAME));
+ }
+
+ LOG.info("All batches sent. Waiting for responses");
+ // Wait for every future
+ for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+ try {
+ final RpcResult<NetconfMessage> netconfMessageRpcResult = future.get(params.msgTimeout, TimeUnit.SECONDS);
+ if(netconfMessageRpcResult.isSuccessful()) {
+ responseCounter.incrementAndGet();
+ LOG.debug("Received response {}", responseCounter.get());
+ } else {
+ LOG.warn("Request failed {}", netconfMessageRpcResult);
+ }
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException | TimeoutException e) {
+ throw new RuntimeException("Request not finished", e);
+ }
+ }
+
+ Preconditions.checkState(responseCounter.get() == editAmount + editBatches.size(), "Not all responses were received, only %s from %s", responseCounter.get(), params.editCount + editBatches.size());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.Timer;
+import java.util.Set;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionNegotiatorFactory;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+
+public class ConfigurableClientDispatcher extends NetconfClientDispatcherImpl {
+
+ private final Set<String> capabilities;
+
+ private ConfigurableClientDispatcher(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer, final Set<String> capabilities) {
+ super(bossGroup, workerGroup, timer);
+ this.capabilities = capabilities;
+ }
+
+ /**
+ * EXI + chunked framing
+ */
+ public static ConfigurableClientDispatcher createChunkedExi(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.EXI_CLIENT_CAPABILITIES);
+ }
+
+ /**
+ * EXI + ]]>]]> framing
+ */
+ public static ConfigurableClientDispatcher createLegacyExi(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.LEGACY_EXI_CLIENT_CAPABILITIES);
+ }
+
+ /**
+ * Chunked framing
+ */
+ public static ConfigurableClientDispatcher createChunked(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.DEFAULT_CLIENT_CAPABILITIES);
+ }
+
+ /**
+ * ]]>]]> framing
+ */
+ public static ConfigurableClientDispatcher createLegacy(final EventLoopGroup bossGroup, final EventLoopGroup workerGroup, final Timer timer) {
+ return new ConfigurableClientDispatcher(bossGroup, workerGroup, timer, NetconfClientSessionNegotiatorFactory.LEGACY_FRAMING_CLIENT_CAPABILITIES);
+ }
+
+ @Override
+ protected NetconfClientSessionNegotiatorFactory getNegotiatorFactory(final NetconfClientConfiguration cfg) {
+ return new NetconfClientSessionNegotiatorFactory(getTimer(), cfg.getAdditionalHeader(), cfg.getConnectionTimeoutMillis(), capabilities);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+/**
+ * Created by mmarsale on 18.4.2015.
+ */
+public interface ExecutionStrategy {
+ void invoke();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import com.google.common.base.Preconditions;
+import java.io.File;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import net.sourceforge.argparse4j.ArgumentParsers;
+import net.sourceforge.argparse4j.annotation.Arg;
+import net.sourceforge.argparse4j.inf.ArgumentParser;
+
+public class Parameters {
+
+ @Arg(dest = "ip")
+ public String ip;
+
+ @Arg(dest = "port")
+ public int port;
+
+ @Arg(dest = "edit-count")
+ public int editCount;
+
+ @Arg(dest = "edit-content")
+ public File editContent;
+
+ @Arg(dest = "edit-batch-size")
+ public int editBatchSize;
+
+ @Arg(dest = "debug")
+ public boolean debug;
+
+ @Arg(dest = "legacy-framing")
+ public boolean legacyFraming;
+
+ @Arg(dest = "exi")
+ public boolean exi;
+
+ @Arg(dest = "async")
+ public boolean async;
+
+ @Arg(dest = "ssh")
+ public boolean ssh;
+
+ @Arg(dest = "msg-timeout")
+ public long msgTimeout;
+
+ @Arg(dest = "tcp-header")
+ public String tcpHeader;
+
+ @Arg(dest = "thread-amount")
+ public int threadAmount;
+
+ static ArgumentParser getParser() {
+ final ArgumentParser parser = ArgumentParsers.newArgumentParser("netconf stress client");
+
+ parser.description("Netconf stress client");
+
+ parser.addArgument("--ip")
+ .type(String.class)
+ .setDefault("127.0.0.1")
+ .type(String.class)
+ .help("Netconf server IP")
+ .dest("ip");
+
+ parser.addArgument("--port")
+ .type(Integer.class)
+ .setDefault(2830)
+ .type(Integer.class)
+ .help("Netconf server port")
+ .dest("port");
+
+ parser.addArgument("--edits")
+ .type(Integer.class)
+ .setDefault(50000)
+ .type(Integer.class)
+ .help("Netconf edit rpcs to be sent")
+ .dest("edit-count");
+
+ parser.addArgument("--edit-content")
+ .type(File.class)
+ .setDefault(new File("edit.txt"))
+ .type(File.class)
+ .dest("edit-content");
+
+ parser.addArgument("--edit-batch-size")
+ .type(Integer.class)
+ .required(false)
+ .setDefault(-1)
+ .type(Integer.class)
+ .dest("edit-batch-size");
+
+ parser.addArgument("--debug")
+ .type(Boolean.class)
+ .setDefault(false)
+ .help("Whether to use debug log level instead of INFO")
+ .dest("debug");
+
+ parser.addArgument("--legacy-framing")
+ .type(Boolean.class)
+ .setDefault(false)
+ .dest("legacy-framing");
+
+ parser.addArgument("--exi")
+ .type(Boolean.class)
+ .setDefault(false)
+ .dest("exi");
+
+ parser.addArgument("--async-requests")
+ .type(Boolean.class)
+ .setDefault(true)
+ .dest("async");
+
+ parser.addArgument("--msg-timeout")
+ .type(Integer.class)
+ .setDefault(60)
+ .dest("msg-timeout");
+
+ parser.addArgument("--ssh")
+ .type(Boolean.class)
+ .setDefault(false)
+ .dest("ssh");
+
+ parser.addArgument("--tcp-header")
+ .type(String.class)
+ .required(false)
+ .dest("tcp-header");
+
+ parser.addArgument("--thread-amount")
+ .type(Integer.class)
+ .setDefault(1)
+ .dest("thread-amount");
+
+ // TODO add get-config option instead of edit + commit
+ // TODO different edit config content
+
+ return parser;
+ }
+
+ void validate() {
+ Preconditions.checkArgument(port > 0, "Port =< 0");
+ Preconditions.checkArgument(editCount > 0, "Edit count =< 0");
+ if (editBatchSize == -1) {
+ editBatchSize = editCount;
+ } else {
+ Preconditions.checkArgument(editBatchSize <= editCount, "Edit count =< 0");
+ }
+
+ Preconditions.checkArgument(editContent.exists(), "Edit content file missing");
+ Preconditions.checkArgument(editContent.isDirectory() == false, "Edit content file is a dir");
+ Preconditions.checkArgument(editContent.canRead(), "Edit content file is unreadable");
+ // TODO validate
+ }
+
+ public InetSocketAddress getInetAddress() {
+ try {
+ return new InetSocketAddress(InetAddress.getByName(ip), port);
+ } catch (final UnknownHostException e) {
+ throw new IllegalArgumentException("Unknown ip", e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import ch.qos.logback.classic.Level;
+import com.google.common.base.Charsets;
+import com.google.common.base.Stopwatch;
+import com.google.common.io.Files;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import net.sourceforge.argparse4j.inf.ArgumentParser;
+import net.sourceforge.argparse4j.inf.ArgumentParserException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.base._1._0.rev110601.CommitInput;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.base._1._0.rev110601.EditConfigInput;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.xml.sax.SAXException;
+
+public final class StressClient {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StressClient.class);
+
+ static final QName COMMIT_QNAME = QName.create(CommitInput.QNAME, "commit");
+ public static final NetconfMessage COMMIT_MSG;
+
+ static {
+ try {
+ COMMIT_MSG = new NetconfMessage(XmlUtil.readXmlToDocument("<rpc message-id=\"commit-batch\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ " <commit/>\n" +
+ "</rpc>"));
+ } catch (SAXException | IOException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
+
+ static final QName EDIT_QNAME = QName.create(EditConfigInput.QNAME, "edit-config");
+ static final org.w3c.dom.Document editBlueprint;
+
+ static {
+ try {
+ editBlueprint = XmlUtil.readXmlToDocument(
+ "<rpc xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ " <edit-config xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ " <target>\n" +
+ " <candidate/>\n" +
+ " </target>\n" +
+ " <default-operation>none</default-operation>" +
+ " <config/>\n" +
+ " </edit-config>\n" +
+ "</rpc>");
+ } catch (SAXException | IOException e) {
+ throw new ExceptionInInitializerError(e);
+ }
+ }
+
+ private static final String MSG_ID_PLACEHOLDER_REGEX = "\\{MSG_ID\\}";
+ private static final String PHYS_ADDR_PLACEHOLDER = "{PHYS_ADDR}";
+
+ private static long macStart = 0xAABBCCDD0000L;
+
+ public static void main(final String[] args) {
+ final Parameters params = parseArgs(args, Parameters.getParser());
+ params.validate();
+
+ final ch.qos.logback.classic.Logger root = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
+ root.setLevel(params.debug ? Level.DEBUG : Level.INFO);
+
+ final int threadAmount = params.threadAmount;
+ LOG.info("thread amount: " + threadAmount);
+ final int requestsPerThread = params.editCount / params.threadAmount;
+ LOG.info("requestsPerThread: " + requestsPerThread);
+ final int leftoverRequests = params.editCount % params.threadAmount;
+ LOG.info("leftoverRequests: " + leftoverRequests);
+
+
+ LOG.info("Preparing messages");
+ // Prepare all msgs up front
+ final List<List<NetconfMessage>> allPreparedMessages = new ArrayList<>(threadAmount);
+ for (int i = 0; i < threadAmount; i++) {
+ if (i != threadAmount - 1) {
+ allPreparedMessages.add(new ArrayList<NetconfMessage>(requestsPerThread));
+ } else {
+ allPreparedMessages.add(new ArrayList<NetconfMessage>(requestsPerThread + leftoverRequests));
+ }
+ }
+
+
+ final String editContentString;
+ try {
+ editContentString = Files.toString(params.editContent, Charsets.UTF_8);
+ } catch (final IOException e) {
+ throw new IllegalArgumentException("Cannot read content of " + params.editContent);
+ }
+
+ for (int i = 0; i < threadAmount; i++) {
+ final List<NetconfMessage> preparedMessages = allPreparedMessages.get(i);
+ int padding = 0;
+ if (i == threadAmount - 1) {
+ padding = leftoverRequests;
+ }
+ for (int j = 0; j < requestsPerThread + padding; j++) {
+ LOG.debug("id: " + (i * requestsPerThread + j));
+ preparedMessages.add(prepareMessage(i * requestsPerThread + j, editContentString));
+ }
+ }
+
+ final NioEventLoopGroup nioGroup = new NioEventLoopGroup();
+ final Timer timer = new HashedWheelTimer();
+
+ final NetconfClientDispatcherImpl netconfClientDispatcher = configureClientDispatcher(params, nioGroup, timer);
+
+ final List<StressClientCallable> callables = new ArrayList<>(threadAmount);
+ for (final List<NetconfMessage> messages : allPreparedMessages) {
+ callables.add(new StressClientCallable(params, netconfClientDispatcher, messages));
+ }
+
+ final ExecutorService executorService = Executors.newFixedThreadPool(threadAmount);
+
+ LOG.info("Starting stress test");
+ final Stopwatch started = Stopwatch.createStarted();
+ try {
+ final List<Future<Boolean>> futures = executorService.invokeAll(callables);
+ for (final Future<Boolean> future : futures) {
+ try {
+ future.get(4L, TimeUnit.MINUTES);
+ } catch (ExecutionException | TimeoutException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ executorService.shutdownNow();
+ } catch (final InterruptedException e) {
+ throw new RuntimeException("Unable to execute requests", e);
+ }
+ started.stop();
+
+ LOG.info("FINISHED. Execution time: {}", started);
+ LOG.info("Requests per second: {}", (params.editCount * 1000.0 / started.elapsed(TimeUnit.MILLISECONDS)));
+
+ // Cleanup
+ timer.stop();
+ try {
+ nioGroup.shutdownGracefully().get(20L, TimeUnit.SECONDS);
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
+ LOG.warn("Unable to close executor properly", e);
+ }
+ }
+
+ static NetconfMessage prepareMessage(final int id, final String editContentString) {
+ final Document msg = XmlUtil.createDocumentCopy(editBlueprint);
+ msg.getDocumentElement().setAttribute("message-id", Integer.toString(id));
+ final NetconfMessage netconfMessage = new NetconfMessage(msg);
+
+ final Element editContentElement;
+ try {
+ // Insert message id where needed
+ String specificEditContent = editContentString.replaceAll(MSG_ID_PLACEHOLDER_REGEX, Integer.toString(id));
+
+ final StringBuilder stringBuilder = new StringBuilder(specificEditContent);
+ int idx = stringBuilder.indexOf(PHYS_ADDR_PLACEHOLDER);
+ while (idx!= -1) {
+ stringBuilder.replace(idx, idx + PHYS_ADDR_PLACEHOLDER.length(), getMac(macStart++));
+ idx = stringBuilder.indexOf(PHYS_ADDR_PLACEHOLDER);
+ }
+ specificEditContent = stringBuilder.toString();
+
+ editContentElement = XmlUtil.readXmlToElement(specificEditContent);
+ final Node config = ((Element) msg.getDocumentElement().getElementsByTagName("edit-config").item(0)).
+ getElementsByTagName("config").item(0);
+ config.appendChild(msg.importNode(editContentElement, true));
+ } catch (final IOException | SAXException e) {
+ throw new IllegalArgumentException("Edit content file is unreadable", e);
+ }
+
+ return netconfMessage;
+ }
+
+ private static NetconfClientDispatcherImpl configureClientDispatcher(final Parameters params, final NioEventLoopGroup nioGroup, final Timer timer) {
+ final NetconfClientDispatcherImpl netconfClientDispatcher;
+ if(params.exi) {
+ if(params.legacyFraming) {
+ netconfClientDispatcher= ConfigurableClientDispatcher.createLegacyExi(nioGroup, nioGroup, timer);
+ } else {
+ netconfClientDispatcher = ConfigurableClientDispatcher.createChunkedExi(nioGroup, nioGroup, timer);
+ }
+ } else {
+ if(params.legacyFraming) {
+ netconfClientDispatcher = ConfigurableClientDispatcher.createLegacy(nioGroup, nioGroup, timer);
+ } else {
+ netconfClientDispatcher = ConfigurableClientDispatcher.createChunked(nioGroup, nioGroup, timer);
+ }
+ }
+ return netconfClientDispatcher;
+ }
+
+ public static String getMac(long mac) {
+ StringBuilder m = new StringBuilder(Long.toString(mac, 16));
+
+ for (int i = m.length(); i < 12; i++) {
+ m.insert(0, "0");
+ }
+
+ for (int j = m.length() - 2; j >= 2; j-=2) {
+ m.insert(j, ":");
+ }
+
+ return m.toString();
+ }
+
+ private static Parameters parseArgs(final String[] args, final ArgumentParser parser) {
+ final Parameters opt = new Parameters();
+ try {
+ parser.parseArgs(args, opt);
+ return opt;
+ } catch (final ArgumentParserException e) {
+ parser.handleError(e);
+ }
+
+ System.exit(1);
+ return null;
+ }
+
+
+ static class LoggingRemoteDevice implements RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> {
+ @Override
+ public void onRemoteSessionUp(final NetconfSessionPreferences remoteSessionCapabilities, final NetconfDeviceCommunicator netconfDeviceCommunicator) {
+ LOG.info("Session established");
+ }
+
+ @Override
+ public void onRemoteSessionDown() {
+ LOG.info("Session down");
+ }
+
+ @Override
+ public void onRemoteSessionFailed(final Throwable throwable) {
+ LOG.info("Session failed");
+ }
+
+ @Override
+ public void onNotification(final NetconfMessage notification) {
+ LOG.info("Notification received: {}", notification.toString());
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import io.netty.util.concurrent.GlobalEventExecutor;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class StressClientCallable implements Callable<Boolean>{
+
+ private static final Logger LOG = LoggerFactory.getLogger(StressClientCallable.class);
+
+ private Parameters params;
+ private final NetconfDeviceCommunicator sessionListener;
+ private final NetconfClientDispatcherImpl netconfClientDispatcher;
+ private final NetconfClientConfiguration cfg;
+ private final NetconfClientSession netconfClientSession;
+ private final ExecutionStrategy executionStrategy;
+
+ public StressClientCallable(final Parameters params,
+ final NetconfClientDispatcherImpl netconfClientDispatcher,
+ final List<NetconfMessage> preparedMessages) {
+ this.params = params;
+ this.sessionListener = getSessionListener(params.getInetAddress());
+ this.netconfClientDispatcher = netconfClientDispatcher;
+ cfg = getNetconfClientConfiguration(this.params, this.sessionListener);
+
+ LOG.info("Connecting to netconf server {}:{}", params.ip, params.port);
+ try {
+ netconfClientSession = netconfClientDispatcher.createClient(cfg).get();
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException e) {
+ throw new RuntimeException("Unable to connect", e);
+ }
+ executionStrategy = getExecutionStrategy(params, preparedMessages, sessionListener);
+ }
+
+ @Override
+ public Boolean call() throws Exception {
+ executionStrategy.invoke();
+ netconfClientSession.close();
+ return true;
+ }
+
+ private static ExecutionStrategy getExecutionStrategy(final Parameters params, final List<NetconfMessage> preparedMessages, final NetconfDeviceCommunicator sessionListener) {
+ if(params.async) {
+ return new AsyncExecutionStrategy(params, preparedMessages, sessionListener);
+ } else {
+ return new SyncExecutionStrategy(params, preparedMessages, sessionListener);
+ }
+ }
+
+ private static NetconfDeviceCommunicator getSessionListener(final InetSocketAddress inetAddress) {
+ final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> loggingRemoteDevice = new StressClient.LoggingRemoteDevice();
+ return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test", inetAddress), loggingRemoteDevice);
+ }
+
+ private static NetconfClientConfiguration getNetconfClientConfiguration(final Parameters params, final NetconfDeviceCommunicator sessionListener) {
+ final NetconfClientConfigurationBuilder netconfClientConfigurationBuilder = NetconfClientConfigurationBuilder.create();
+ netconfClientConfigurationBuilder.withSessionListener(sessionListener);
+ netconfClientConfigurationBuilder.withAddress(params.getInetAddress());
+ if(params.tcpHeader != null) {
+ final String header = params.tcpHeader.replaceAll("\"", "").trim() + "\n";
+ netconfClientConfigurationBuilder.withAdditionalHeader(new NetconfHelloMessageAdditionalHeader(null, null, null, null, null) {
+ @Override
+ public String toFormattedString() {
+ LOG.debug("Sending TCP header {}", header);
+ return header;
+ }
+ });
+ }
+ netconfClientConfigurationBuilder.withProtocol(params.ssh ? NetconfClientConfiguration.NetconfClientProtocol.SSH : NetconfClientConfiguration.NetconfClientProtocol.TCP);
+ netconfClientConfigurationBuilder.withConnectionTimeoutMillis(20000L);
+ netconfClientConfigurationBuilder.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
+ return netconfClientConfigurationBuilder.build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.client.stress;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// TODO reuse code from org.opendaylight.controller.netconf.test.tool.client.stress.AsyncExecutionStrategy
+class SyncExecutionStrategy implements ExecutionStrategy {
+ private static final Logger LOG = LoggerFactory.getLogger(SyncExecutionStrategy.class);
+
+ private final Parameters params;
+ private final List<NetconfMessage> preparedMessages;
+ private final NetconfDeviceCommunicator sessionListener;
+ private final List<Integer> editBatches;
+ private final int editAmount;
+
+ public SyncExecutionStrategy(final Parameters params, final List<NetconfMessage> preparedMessages, final NetconfDeviceCommunicator sessionListener) {
+ this.params = params;
+ this.preparedMessages = preparedMessages;
+ this.sessionListener = sessionListener;
+ this.editBatches = countEditBatchSizes(params, preparedMessages.size());
+ editAmount = preparedMessages.size();
+ }
+
+ private static List<Integer> countEditBatchSizes(final Parameters params, final int amount) {
+ final List<Integer> editBatches = Lists.newArrayList();
+ if (params.editBatchSize != amount) {
+ final int fullBatches = amount / params.editBatchSize;
+ for (int i = 0; i < fullBatches; i++) {
+ editBatches.add(params.editBatchSize);
+ }
+
+ if (amount % params.editBatchSize != 0) {
+ editBatches.add(amount % params.editBatchSize);
+ }
+ } else {
+ editBatches.add(params.editBatchSize);
+ }
+ return editBatches;
+ }
+
+ public void invoke() {
+ final AtomicInteger responseCounter = new AtomicInteger(0);
+
+ int batchI = 0;
+ for (final Integer editBatch : editBatches) {
+ for (int i = 0; i < editBatch; i++) {
+ final int msgId = i + (batchI * params.editBatchSize);
+ final NetconfMessage msg = preparedMessages.get(msgId);
+ LOG.debug("Sending message {}", msgId);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending message {}", XmlUtil.toString(msg.getDocument()));
+ }
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture =
+ sessionListener.sendRequest(msg, StressClient.EDIT_QNAME);
+ // Wait for response
+ waitForResponse(responseCounter, netconfMessageFuture);
+
+ }
+ batchI++;
+ LOG.info("Batch {} with size {} sent. Committing", batchI, editBatch);
+
+ // Commit batch sync
+ waitForResponse(responseCounter,
+ sessionListener.sendRequest(StressClient.COMMIT_MSG, StressClient.COMMIT_QNAME));
+ }
+
+ Preconditions.checkState(responseCounter.get() == editAmount + editBatches.size(), "Not all responses were received, only %s from %s", responseCounter.get(), params.editCount + editBatches.size());
+ }
+
+ private void waitForResponse(AtomicInteger responseCounter, final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture) {
+ try {
+ final RpcResult<NetconfMessage> netconfMessageRpcResult =
+ netconfMessageFuture.get(params.msgTimeout, TimeUnit.SECONDS);
+ if (netconfMessageRpcResult.isSuccessful()) {
+ responseCounter.incrementAndGet();
+ LOG.debug("Received response {}", responseCounter.get());
+ } else {
+ LOG.warn("Request failed {}", netconfMessageRpcResult);
+ }
+
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException | TimeoutException e) {
+ throw new RuntimeException("Request not finished", e);
+ }
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-usermanager</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-util</artifactId>
<packaging>bundle</packaging>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-model-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
</dependencies>
<build>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter.UNKNOWN_SIZE;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Iterables;
+import java.io.Closeable;
+import java.io.Flushable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamAttributeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
+import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+//TODO this does not extend NormalizedNodeWriter from yangtools due to api freeze, make this inherit common methods to avoid code duplication
+//TODO move this to yangtools, since this is in netconf-util due to api freeze in lithium
+public class OrderedNormalizedNodeWriter implements Closeable, Flushable{
+
+ private final SchemaContext schemaContext;
+ private final SchemaNode root;
+ private final NormalizedNodeStreamWriter writer;
+
+ public OrderedNormalizedNodeWriter(NormalizedNodeStreamWriter writer, SchemaContext schemaContext, SchemaPath path) {
+ this.writer = writer;
+ this.schemaContext = schemaContext;
+ this.root = findParentSchemaOnPath(schemaContext, path);
+ }
+
+ public OrderedNormalizedNodeWriter write(final NormalizedNode<?, ?> node) throws IOException {
+ if (root == schemaContext) {
+ return write(node, schemaContext.getDataChildByName(node.getNodeType()));
+ }
+
+ return write(node, root);
+ }
+
+ public OrderedNormalizedNodeWriter write(final Collection<DataContainerChild<?,?>> nodes) throws IOException {
+ if (writeChildren(nodes, root, false)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize nodes " + nodes);
+
+ }
+
+ private OrderedNormalizedNodeWriter write(NormalizedNode<?, ?> node, SchemaNode dataSchemaNode) throws IOException {
+ if (node == null) {
+ return this;
+ }
+
+ if (wasProcessedAsCompositeNode(node, dataSchemaNode)) {
+ return this;
+ }
+
+ if (wasProcessAsSimpleNode(node)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize node " + node);
+ }
+
+ private void write(List<NormalizedNode<?, ?>> nodes, SchemaNode dataSchemaNode) throws IOException {
+ for (NormalizedNode<?, ?> node : nodes) {
+ write(node, dataSchemaNode);
+ }
+ }
+
+ private OrderedNormalizedNodeWriter writeLeaf(final NormalizedNode<?, ?> node) throws IOException {
+ if (wasProcessAsSimpleNode(node)) {
+ return this;
+ }
+
+ throw new IllegalStateException("It wasn't possible to serialize node " + node);
+ }
+
+ private boolean writeChildren(final Iterable<? extends NormalizedNode<?, ?>> children, SchemaNode parentSchemaNode, boolean endParent) throws IOException {
+ //Augmentations cannot be gotten with node.getChild so create our own structure with augmentations resolved
+ ArrayListMultimap<QName, NormalizedNode<?, ?>> qNameToNodes = ArrayListMultimap.create();
+ for (NormalizedNode<?, ?> child : children) {
+ if (child instanceof AugmentationNode) {
+ qNameToNodes.putAll(resolveAugmentations(child));
+ } else {
+ qNameToNodes.put(child.getNodeType(), child);
+ }
+ }
+
+ if (parentSchemaNode instanceof DataNodeContainer) {
+ if (parentSchemaNode instanceof ListSchemaNode && qNameToNodes.containsKey(parentSchemaNode.getQName())) {
+ write(qNameToNodes.get(parentSchemaNode.getQName()), parentSchemaNode);
+ } else {
+ for (DataSchemaNode schemaNode : ((DataNodeContainer) parentSchemaNode).getChildNodes()) {
+ write(qNameToNodes.get(schemaNode.getQName()), schemaNode);
+ }
+ }
+ } else if(parentSchemaNode instanceof ChoiceSchemaNode) {
+ for (ChoiceCaseNode ccNode : ((ChoiceSchemaNode) parentSchemaNode).getCases()) {
+ for (DataSchemaNode dsn : ccNode.getChildNodes()) {
+ if (qNameToNodes.containsKey(dsn.getQName())) {
+ write(qNameToNodes.get(dsn.getQName()), dsn);
+ }
+ }
+ }
+ } else {
+ for (NormalizedNode<?, ?> child : children) {
+ writeLeaf(child);
+ }
+ }
+ if (endParent) {
+ writer.endNode();
+ }
+ return true;
+ }
+
+ private ArrayListMultimap<QName, NormalizedNode<?, ?>> resolveAugmentations(NormalizedNode<?, ?> child) {
+ final ArrayListMultimap<QName, NormalizedNode<?, ?>> resolvedAugs = ArrayListMultimap.create();
+ for (NormalizedNode<?, ?> node : ((AugmentationNode) child).getValue()) {
+ if (node instanceof AugmentationNode) {
+ resolvedAugs.putAll(resolveAugmentations(node));
+ } else {
+ resolvedAugs.put(node.getNodeType(), node);
+ }
+ }
+ return resolvedAugs;
+ }
+
+ private boolean writeMapEntryNode(final MapEntryNode node, final SchemaNode dataSchemaNode) throws IOException {
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer)
+ .startMapEntryNode(node.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(node.getValue()), node.getAttributes());
+ } else {
+ writer.startMapEntryNode(node.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(node.getValue()));
+ }
+ return writeChildren(node.getValue(), dataSchemaNode, true);
+ }
+
+ private boolean wasProcessAsSimpleNode(final NormalizedNode<?, ?> node) throws IOException {
+ if (node instanceof LeafSetEntryNode) {
+ final LeafSetEntryNode<?> nodeAsLeafList = (LeafSetEntryNode<?>)node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).leafSetEntryNode(nodeAsLeafList.getValue(), nodeAsLeafList.getAttributes());
+ } else {
+ writer.leafSetEntryNode(nodeAsLeafList.getValue());
+ }
+ return true;
+ } else if (node instanceof LeafNode) {
+ final LeafNode<?> nodeAsLeaf = (LeafNode<?>)node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).leafNode(nodeAsLeaf.getIdentifier(), nodeAsLeaf.getValue(), nodeAsLeaf.getAttributes());
+ } else {
+ writer.leafNode(nodeAsLeaf.getIdentifier(), nodeAsLeaf.getValue());
+ }
+ return true;
+ } else if (node instanceof AnyXmlNode) {
+ final AnyXmlNode anyXmlNode = (AnyXmlNode)node;
+ writer.anyxmlNode(anyXmlNode.getIdentifier(), anyXmlNode.getValue());
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean wasProcessedAsCompositeNode(final NormalizedNode<?, ?> node, SchemaNode dataSchemaNode) throws IOException {
+ if (node instanceof ContainerNode) {
+ final ContainerNode n = (ContainerNode) node;
+ if(writer instanceof NormalizedNodeStreamAttributeWriter) {
+ ((NormalizedNodeStreamAttributeWriter) writer).startContainerNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()), n.getAttributes());
+ } else {
+ writer.startContainerNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ }
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof MapEntryNode) {
+ return writeMapEntryNode((MapEntryNode) node, dataSchemaNode);
+ }
+ if (node instanceof UnkeyedListEntryNode) {
+ final UnkeyedListEntryNode n = (UnkeyedListEntryNode) node;
+ writer.startUnkeyedListItem(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof ChoiceNode) {
+ final ChoiceNode n = (ChoiceNode) node;
+ writer.startChoiceNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof AugmentationNode) {
+ final AugmentationNode n = (AugmentationNode) node;
+ writer.startAugmentationNode(n.getIdentifier());
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof UnkeyedListNode) {
+ final UnkeyedListNode n = (UnkeyedListNode) node;
+ writer.startUnkeyedList(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof OrderedMapNode) {
+ final OrderedMapNode n = (OrderedMapNode) node;
+ writer.startOrderedMapNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof MapNode) {
+ final MapNode n = (MapNode) node;
+ writer.startMapNode(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+ if (node instanceof LeafSetNode) {
+ //covers also OrderedLeafSetNode for which doesn't exist start* method
+ final LeafSetNode<?> n = (LeafSetNode<?>) node;
+ writer.startLeafSet(n.getIdentifier(), OrderedNormalizedNodeWriter.childSizeHint(n.getValue()));
+ return writeChildren(n.getValue(), dataSchemaNode, true);
+ }
+
+ return false;
+ }
+
+ private static final int childSizeHint(final Iterable<?> children) {
+ return (children instanceof Collection) ? ((Collection<?>) children).size() : UNKNOWN_SIZE;
+ }
+
+ //TODO similar code is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private SchemaNode findParentSchemaOnPath(SchemaContext schemaContext, SchemaPath path) {
+ SchemaNode current = Preconditions.checkNotNull(schemaContext);
+ for (final QName qname : path.getPathFromRoot()) {
+ SchemaNode child;
+ if(current instanceof DataNodeContainer) {
+ child = ((DataNodeContainer) current).getDataChildByName(qname);
+
+ if (child == null && current instanceof SchemaContext) {
+ child = tryFindGroupings((SchemaContext) current, qname).orNull();
+ }
+
+ if(child == null && current instanceof SchemaContext) {
+ child = tryFindNotification((SchemaContext) current, qname)
+ .or(tryFindRpc(((SchemaContext) current), qname)).orNull();
+ }
+ } else if (current instanceof ChoiceSchemaNode) {
+ child = ((ChoiceSchemaNode) current).getCaseNodeByName(qname);
+ } else if (current instanceof RpcDefinition) {
+ switch (qname.getLocalName()) {
+ case "input":
+ child = ((RpcDefinition) current).getInput();
+ break;
+ case "output":
+ child = ((RpcDefinition) current).getOutput();
+ break;
+ default:
+ child = null;
+ break;
+ }
+ } else {
+ throw new IllegalArgumentException(String.format("Schema node %s does not allow children.", current));
+ }
+ current = child;
+ }
+ return current;
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindGroupings(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode> fromNullable(Iterables.find(ctx.getGroupings(), new SchemaNodePredicate(qname), null));
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindRpc(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode>fromNullable(Iterables.find(ctx.getOperations(), new SchemaNodePredicate(qname), null));
+ }
+
+ //TODO this method is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private Optional<SchemaNode> tryFindNotification(final SchemaContext ctx, final QName qname) {
+ return Optional.<SchemaNode>fromNullable(Iterables.find(ctx.getNotifications(), new SchemaNodePredicate(qname), null));
+ }
+
+ @Override
+ public void flush() throws IOException {
+ writer.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.flush();
+ writer.close();
+ }
+
+ //TODO this class is already present in schemaTracker, unify this when this writer is moved back to yangtools
+ private static final class SchemaNodePredicate implements Predicate<SchemaNode> {
+ private final QName qname;
+
+ public SchemaNodePredicate(final QName qname) {
+ this.qname = qname;
+ }
+
+ @Override
+ public boolean apply(final SchemaNode input) {
+ return input.getQName().equals(qname);
+ }
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.netconf.util.mapping;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
public abstract class AbstractSingletonNetconfOperation extends AbstractLastNetconfOperation {
super(netconfSessionIdForReporting);
}
+ @Override
+ protected Element handle(Document document, XmlElement operationElement,
+ NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ return handleWithNoSubsequentOperations(document, operationElement);
+ }
+
@Override
protected HandlingPriority getHandlingPriority() {
return HandlingPriority.HANDLE_WITH_MAX_PRIORITY;
private static boolean isHelloMessage(final Document document) {
XmlElement element = XmlElement.fromDomElement(document.getDocumentElement());
try {
+ // accept even if hello has no namespace
return element.getName().equals(HELLO_TAG) &&
- element.hasNamespace() &&
- element.getNamespace().equals(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
+ (!element.hasNamespace() || element.getNamespace().equals(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
} catch (MissingNameSpaceException e) {
// Cannot happen, since we check for hasNamespace
throw new IllegalStateException(e);
package org.opendaylight.controller.netconf.util.messages;
import com.google.common.base.Function;
+import com.google.common.base.Optional;
import com.google.common.collect.Collections2;
import java.util.Collection;
import java.util.List;
public static Collection<String> extractCapabilitiesFromHello(Document doc) throws NetconfDocumentedException {
XmlElement responseElement = XmlElement.fromDomDocument(doc);
- XmlElement capabilitiesElement = responseElement
- .getOnlyChildElementWithSameNamespace(XmlNetconfConstants.CAPABILITIES);
- List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
+ // Extract child element <capabilities> from <hello> with or without(fallback) the same namespace
+ Optional<XmlElement> capabilitiesElement = responseElement
+ .getOnlyChildElementWithSameNamespaceOptionally(XmlNetconfConstants.CAPABILITIES)
+ .or(responseElement
+ .getOnlyChildElementOptionally(XmlNetconfConstants.CAPABILITIES));
+
+ List<XmlElement> caps = capabilitiesElement.get().getChildElements(XmlNetconfConstants.CAPABILITY);
return Collections2.transform(caps, new Function<XmlElement, String>() {
@Override
import com.google.common.base.Optional;
import io.netty.channel.local.LocalAddress;
import java.net.InetSocketAddress;
+import java.util.concurrent.TimeUnit;
import org.osgi.framework.BundleContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final String PRIVATE_KEY_PATH_PROP = ".pk.path";
private static final String CONNECTION_TIMEOUT_MILLIS_PROP = "connectionTimeoutMillis";
- private static final long DEFAULT_TIMEOUT_MILLIS = 5000;
+ public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.SECONDS.toMillis(30);
private static final LocalAddress netconfLocalAddress = new LocalAddress("netconf");
public static LocalAddress getNetconfLocalAddress() {
}
public String getName() {
- if (element.getLocalName()!=null && !element.getLocalName().equals(DEFAULT_NAMESPACE_PREFIX)){
- return element.getLocalName();
+ final String localName = element.getLocalName();
+ if (!Strings.isNullOrEmpty(localName)){
+ return localName;
}
return element.getTagName();
}
/**
*
* @param tagName tag name without prefix
- * @return
+ * @return List of child elements
*/
public List<XmlElement> getChildElements(final String tagName) {
return getChildElementsInternal(new ElementFilteringStrategy() {
}
public Optional<XmlElement> getOnlyChildElementOptionally(String childName) {
- try {
- return Optional.of(getOnlyChildElement(childName));
- } catch (Exception e) {
+ List<XmlElement> nameElements = getChildElements(childName);
+ if (nameElements.size() != 1) {
return Optional.absent();
}
+ return Optional.of(nameElements.get(0));
}
- public Optional<XmlElement> getOnlyChildElementOptionally(String childName, String namespace) {
- try {
- return Optional.of(getOnlyChildElement(childName, namespace));
- } catch (Exception e) {
+ public Optional<XmlElement> getOnlyChildElementOptionally(final String childName, final String namespace) {
+ List<XmlElement> children = getChildElementsWithinNamespace(namespace);
+ children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
+ @Override
+ public boolean apply(XmlElement xmlElement) {
+ return xmlElement.getName().equals(childName);
+ }
+ }));
+ if (children.size() != 1){
return Optional.absent();
}
+ return Optional.of(children.get(0));
}
public XmlElement getOnlyChildElementWithSameNamespace(String childName) throws NetconfDocumentedException {
return getOnlyChildElement(childName, getNamespace());
}
- public Optional<XmlElement> getOnlyChildElementWithSameNamespaceOptionally(String childName) {
- try {
- return Optional.of(getOnlyChildElement(childName, getNamespace()));
- } catch (Exception e) {
- return Optional.absent();
+ public Optional<XmlElement> getOnlyChildElementWithSameNamespaceOptionally(final String childName) {
+ Optional<String> namespace = getNamespaceOptionally();
+ if (namespace.isPresent()) {
+ List<XmlElement> children = getChildElementsWithinNamespace(namespace.get());
+ children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
+ @Override
+ public boolean apply(XmlElement xmlElement) {
+ return xmlElement.getName().equals(childName);
+ }
+ }));
+ if (children.size() != 1){
+ return Optional.absent();
+ }
+ return Optional.of(children.get(0));
}
+ return Optional.absent();
}
public XmlElement getOnlyChildElementWithSameNamespace() throws NetconfDocumentedException {
}
public Optional<XmlElement> getOnlyChildElementWithSameNamespaceOptionally() {
- try {
- XmlElement childElement = getOnlyChildElement();
- childElement.checkNamespace(getNamespace());
- return Optional.of(childElement);
- } catch (Exception e) {
- return Optional.absent();
+ Optional<XmlElement> child = getOnlyChildElementOptionally();
+ if (child.isPresent()
+ && child.get().getNamespaceOptionally().isPresent()
+ && getNamespaceOptionally().isPresent()
+ && getNamespaceOptionally().get().equals(child.get().getNamespaceOptionally().get())) {
+ return child;
}
+ return Optional.absent();
}
public XmlElement getOnlyChildElement(final String childName, String namespace) throws NetconfDocumentedException {
return children.get(0);
}
+ public Optional<XmlElement> getOnlyChildElementOptionally() {
+ List<XmlElement> children = getChildElements();
+ if (children.size() != 1) {
+ return Optional.absent();
+ }
+ return Optional.of(children.get(0));
+ }
+
public String getTextContent() throws NetconfDocumentedException {
NodeList childNodes = element.getChildNodes();
if (childNodes.getLength() == 0) {
return attribute;
}
+ public Optional<String> getNamespaceAttributeOptionally(){
+ String attribute = element.getAttribute(XmlUtil.XMLNS_ATTRIBUTE_KEY);
+ if (attribute == null || attribute.equals(DEFAULT_NAMESPACE_PREFIX)){
+ return Optional.absent();
+ }
+ return Optional.of(attribute);
+ }
+
public Optional<String> getNamespaceOptionally() {
String namespaceURI = element.getNamespaceURI();
if (Strings.isNullOrEmpty(namespaceURI)) {
public String getNamespace() throws MissingNameSpaceException {
Optional<String> namespaceURI = getNamespaceOptionally();
- if (namespaceURI.isPresent() == false){
+ if (!namespaceURI.isPresent()){
throw new MissingNameSpaceException(String.format("No namespace defined for %s", this),
NetconfDocumentedException.ErrorType.application,
NetconfDocumentedException.ErrorTag.operation_failed,
XmlElement that = (XmlElement) o;
- if (!element.isEqualNode(that.element)) {
- return false;
- }
+ return element.isEqualNode(that.element);
- return true;
}
@Override
}
public boolean hasNamespace() {
- try {
- getNamespaceAttribute();
- } catch (MissingNameSpaceException e) {
- try {
- getNamespace();
- } catch (MissingNameSpaceException e1) {
+ if (!getNamespaceAttributeOptionally().isPresent()) {
+ if (!getNamespaceOptionally().isPresent()) {
return false;
}
- return true;
}
return true;
}
factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
factory.setXIncludeAware(false);
factory.setExpandEntityReferences(false);
+ // Performance improvement for messages with size <10k according to
+ // https://xerces.apache.org/xerces2-j/faq-performance.html
+ factory.setFeature("http://apache.org/xml/features/dom/defer-node-expansion", false);
} catch (ParserConfigurationException e) {
throw new ExceptionInInitializerError(e);
}
assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
- assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), NetconfConfigUtil.DEFAULT_TIMEOUT_MILLIS);
doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
- assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), NetconfConfigUtil.DEFAULT_TIMEOUT_MILLIS);
}
@Test
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
+ <version>1.6.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>netconf-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
+ <version>0.4.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<modules>
<module>netconf-api</module>
- <!--<module>netconf-cli</module>-->
+ <module>netconf-cli</module>
<module>netconf-config</module>
<module>netconf-impl</module>
<module>config-netconf-connector</module>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectory>${project.basedir}</sourceDirectory>
<includes>**\/*.java,**\/*.xml,**\/*.ini,**\/*.sh,**\/*.bat,**\/*.yang</includes>
- <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/${jmxGeneratorPath}\/,**\/${salGeneratorPath}\/,**\/netconf\/test\/tool\/Main.java</excludes>
+ <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/${jmxGeneratorPath}\/,**\/${salGeneratorPath}\/,**\/netconf\/test\/tool\/Main.java, **\/netconf\/test\/tool\/client\/stress\/StressClient.java</excludes>
</configuration>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>opendaylight/commons/parent</relativePath>
</parent>
<artifactId>releasepom</artifactId>
- <version>0.2.0-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>controller</name>
<!-- Used by Sonar to set project name -->