-->
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
- <version>${feature.test.version}</version>
<scope>test</scope>
</dependency>
<!-- dependency for opendaylight-karaf-empty for use by testing -->
<karaf.distro.version>${karaf.empty.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
</dependency>
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
</dependency>
</dependencies>
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
</dependency>
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
</dependency>
</dependencies>
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
</dependency>
<!-- test the features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
</dependency>
</dependencies>
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
<branding.version>1.1.0-SNAPSHOT</branding.version>
<karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
<karaf.version>3.0.1</karaf.version>
- <feature.test.version>0.7.0-SNAPSHOT</feature.test.version>
<karaf.empty.version>1.5.0-SNAPSHOT</karaf.empty.version>
<surefire.version>2.16</surefire.version>
</properties>
<version>${jolokia.version}</version>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
- <version>${feature.test.version}</version>
<scope>test</scope>
</dependency>
<!-- dependency for opendaylight-karaf-empty for use by testing -->
<karaf.distro.version>${karaf.empty.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
</dependency>
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
</dependency>
</dependencies>
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
<classifier>features</classifier>
<type>xml</type>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-netconf</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-config-persister</artifactId>
<artifactId>sal-inmemory-datastore</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-netconf-connector</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-netconf-monitoring</artifactId>
+ </dependency>
+
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-netconf-connector</artifactId>
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
- <version>0.7.0-SNAPSHOT</version>
</dependency>
</dependencies>
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
<repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-config-persister/${config.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-config-netty/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-netconf/${netconf.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
<feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
<feature version='${project.version}'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/sal-common-impl/${mdsal.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-common-util/${mdsal.version}</bundle>
</feature>
+
+ <!-- TODO move to netconf features, however there are some weird dependencies on features-config-persister all over that cause cyclic dependencies-->
+ <!-- TODO when installing this in pure karaf distro, many optimistic lock exceptions are thrown from config manager -->
+ <feature name='odl-netconf-mdsal' version='${project.version}' description="OpenDaylight :: Netconf :: Mdsal">
+ <feature version='${config.version}'>odl-config-all</feature>
+ <feature version='${netconf.version}'>odl-netconf-all</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-ssh/${netconf.version}</bundle>
+ <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
+ <bundle>mvn:org.opendaylight.controller/mdsal-netconf-connector/${netconf.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/mdsal-netconf-monitoring/${netconf.version}</bundle>
+ <!-- TODO 01-netconf.xml file requires netconf-config-dispatcher to be present and its part of netconf-connector features. Clean Up-->
+ <bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${config.version}</bundle>
+ <configfile finalname='${config.configfile.directory}/${config.netconf.client.configfile}'>mvn:org.opendaylight.controller/netconf-config/${netconf.version}/xml/config</configfile>
+ <configfile finalname='${config.configfile.directory}/${config.netconf.mdsal.configfile}'>mvn:org.opendaylight.controller/netconf-mdsal-config/${netconf.version}/xml/config</configfile>
+ </feature>
+
<feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
<feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${config.version}'>odl-config-startup</feature>
<feature version='${config.version}'>odl-config-netty</feature>
+ <bundle>mvn:com.lmax/disruptor/${lmax.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-core-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-core-spi/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-broker-impl/${project.version}</bundle>
<configfile finalname="configuration/initial/akka.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf</configfile>
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
+ <configfile finalname="etc/org.opendaylight.controller.cluster.datastore.cfg">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore</configfile>
</feature>
<feature name='odl-clustering-test-app' version='${project.version}'>
<!-- test to validate features.xml -->
<!--FIXME BUG-2195 When running single feature tests for netconf connector, features including ssh proxy server always fail (this behavior does not appear when running karaf distro directly)-->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
- <version>${yangtools.version}</version>
<scope>test</scope>
</dependency>
<!-- dependency for opendaylight-karaf-empty for use by testing -->
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
-->
<feature version='${project.version}'>odl-netconf-connector</feature>
<feature version='${project.version}'>odl-netconf-connector-ssh</feature>
+
+
</feature>
<!--
Necessary TODO: Define your features. It is useful to list then in order of dependency. So if A depends on B, list A first.
</feature>
<feature name='odl-netconf-ssh' version='${netconf.version}' description="OpenDaylight :: Netconf Connector :: SSH">
<feature version='${netconf.version}'>odl-netconf-tcp</feature>
+ <feature version='${config.version}'>odl-config-netty</feature>
<!-- FIXME: This introduces cycle between projects, which makes version updates
harder. Should be moved to different.
-->
</parent>
<artifactId>features-netconf</artifactId>
- <packaging>pom</packaging>
+ <packaging>jar</packaging>
<properties>
<features.file>features.xml</features.file>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-mdsal-config</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-auth</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring-extension</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools.model</groupId>
<artifactId>ietf-inet-types</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-monitoring</artifactId>
</dependency>
+ <!-- test to validate features.xml -->
+ <dependency>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>features-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <!-- dependency for opendaylight-karaf-empty for use by testing -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>opendaylight-karaf-empty</artifactId>
+ <version>${commons.opendaylight.version}</version>
+ <type>zip</type>
+ </dependency>
</dependencies>
<build>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${surefire.version}</version>
+ <configuration>
+ <systemPropertyVariables>
+ <karaf.distro.groupId>org.opendaylight.controller</karaf.distro.groupId>
+ <karaf.distro.artifactId>opendaylight-karaf-empty</karaf.distro.artifactId>
+ <karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
+ </systemPropertyVariables>
+ <dependenciesToScan>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
+ </dependenciesToScan>
+ </configuration>
+ </plugin>
</plugins>
</build>
<scm>
<bundle>mvn:org.opendaylight.controller/netconf-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-auth/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/ietf-netconf/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/ietf-netconf-notifications/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring-extension/${project.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-inet-types/${ietf-inet-types.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types/${ietf-yang-types.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types-20130715/2013.07.15.7-SNAPSHOT</bundle>
</feature>
<feature name='odl-netconf-mapping-api' version='${project.version}' description="OpenDaylight :: Netconf :: Mapping API">
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-config-netconf-connector</feature>
<!-- Netconf will not provide schemas without monitoring -->
<feature version='${project.version}'>odl-netconf-monitoring</feature>
+ <feature version='${project.version}'>odl-netconf-notifications-impl</feature>
<bundle>mvn:org.opendaylight.controller/netconf-impl/${project.version}</bundle>
</feature>
<feature name='odl-config-netconf-connector' version='${project.version}' description="OpenDaylight :: Netconf :: Connector">
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
+ <feature version='${project.version}'>odl-netconf-notifications-api</feature>
<bundle>mvn:org.opendaylight.controller/config-netconf-connector/${project.version}</bundle>
</feature>
<feature name='odl-netconf-netty-util' version='${project.version}' description="OpenDaylight :: Netconf :: Netty Util">
<feature version='${project.version}'>odl-netconf-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-monitoring/${project.version}</bundle>
</feature>
+ <feature name='odl-netconf-notifications-api' version='${project.version}' description="OpenDaylight :: Netconf :: Notification :: Api">
+ <feature version='${project.version}'>odl-netconf-api</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-notifications-api/${project.version}</bundle>
+ </feature>
+ <feature name='odl-netconf-notifications-impl' version='${project.version}' description="OpenDaylight :: Netconf :: Monitoring :: Impl">
+ <feature version='${project.version}'>odl-netconf-notifications-api</feature>
+ <feature version='${project.version}'>odl-netconf-util</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-notifications-impl/${project.version}</bundle>
+ </feature>
</features>
<module>netconf-connector</module>
<module>restconf</module>
<module>extras</module>
- <module>neutron</module>
</modules>
</project>
</dependency>
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
</dependency>
</dependencies>
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
-->
<!-- test to validate features.xml -->
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-test</artifactId>
- <version>${yangtools.version}</version>
<scope>test</scope>
</dependency>
<!-- dependency for opendaylight-karaf-empty for use by testing -->
<karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
</systemPropertyVariables>
<dependenciesToScan>
- <dependency>org.opendaylight.yangtools:features-test</dependency>
+ <dependency>org.opendaylight.odlparent:features-test</dependency>
</dependenciesToScan>
</configuration>
</plugin>
<bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
<bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools/yang-model-export/${yangtools.version}</bundle>
<bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
<bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
<bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
<dependency>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>pax-exam-container-karaf</artifactId>
- <version>${pax.exam.version}</version>
+ <version>${exam.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<dependency>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>pax-exam</artifactId>
- <version>${pax.exam.version}</version>
+ <version>${exam.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<artifactId>karaf-parent</artifactId>
<name>${project.artifactId}</name>
<packaging>pom</packaging>
- <prerequisites>
- <maven>3.1.1</maven>
- </prerequisites>
+
<properties>
<branding.version>1.1.0-SNAPSHOT</branding.version>
<karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
</plugins>
</pluginManagement>
<plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-enforcer-plugin</artifactId>
+ <version>${enforcer.version}</version>
+ <executions>
+ <execution>
+ <id>enforce-maven</id>
+ <goals>
+ <goal>enforce</goal>
+ </goals>
+ <configuration>
+ <rules>
+ <requireMavenVersion>
+ <version>3.1.1</version>
+ </requireMavenVersion>
+ </rules>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
</plugin>
</parent>
<artifactId>opendaylight-karaf-empty</artifactId>
<packaging>pom</packaging>
- <prerequisites>
- <maven>3.0</maven>
- </prerequisites>
<dependencies>
<dependency>
</parent>
<artifactId>distribution.opendaylight-karaf</artifactId>
<packaging>pom</packaging>
- <prerequisites>
- <maven>3.0</maven>
- </prerequisites>
<dependencies>
<dependency>
--- /dev/null
+<?xml version="1.0"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ copyright (c) 2015 cisco systems, inc. and others. all rights reserved.
+
+ this program and the accompanying materials are made available under the
+ terms of the eclipse public license v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<enunciate label="full" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://enunciate.codehaus.org/schemas/enunciate-1.28.xsd">
+</enunciate>
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ copyright (c) 2015 cisco systems, inc. and others. all rights reserved.
+
+ this program and the accompanying materials are made available under the
+ terms of the eclipse public license v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>enunciate-parent</artifactId>
+ <version>1.5.0-SNAPSHOT</version>
+ <relativePath>../../commons/enunciate-parent</relativePath>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>adsal-enunciate-parent</artifactId>
+ <packaging>pom</packaging>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.enunciate</groupId>
+ <artifactId>maven-enunciate-plugin</artifactId>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>${sal.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+</project>
+
-->
</feature>
+ <feature name="odl-nsf-service" description="OpenDaylight :: NSF :: Network Service Functions in Controller" version="${project.version}">
+ <feature version="${sal.version}">odl-adsal-all</feature>
+ <feature version="${project.version}">odl-nsf-controller-managers</feature>
+ <feature version="${project.version}">odl-adsal-controller-northbound</feature>
+ </feature>
+
<feature name="odl-nsf-managers" description="OpenDaylight :: AD-SAL :: Network Service Functions" version="${project.version}">
<feature version="${commons.opendaylight.version}">odl-base-all</feature>
<feature version="${sal.version}">odl-adsal-all</feature>
<bundle>mvn:org.opendaylight.controller/routing.dijkstra_implementation/${routing.dijkstra_implementation.version}</bundle>
</feature>
+ <feature name="odl-nsf-controller-managers" description="OpenDaylight :: AD-SAL :: Network Service Functions in Controller" version="${project.version}">
+ <feature version="${commons.opendaylight.version}">odl-base-all</feature>
+ <feature version="${sal.version}">odl-adsal-all</feature>
+ <bundle>mvn:org.opendaylight.controller/usermanager/${usermanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/usermanager.implementation/${usermanager.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/appauth/${appauth.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/connectionmanager/${connectionmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/connectionmanager.implementation/${connectionmanager.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/containermanager/${containermanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/containermanager.implementation/${containermanager.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/statisticsmanager/${statisticsmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/statisticsmanager.implementation/${statisticsmanager.implementation.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/switchmanager/${switchmanager.api.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/switchmanager.implementation/${switchmanager.implementation.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/forwardingrulesmanager/${forwardingrulesmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/forwardingrulesmanager.implementation/${forwardingrulesmanager.implementation.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/topologymanager/${topologymanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/topologymanager.shell/${topologymanager.shell.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/hosttracker/${hosttracker.api.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/hosttracker.implementation/${hosttracker.implementation.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/hosttracker.shell/${hosttracker.shell.version}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting/${forwarding.staticrouting}</bundle>
+
+ <bundle>mvn:org.opendaylight.controller.thirdparty/net.sf.jung2/2.0.1</bundle>
+ <bundle>mvn:org.opendaylight.controller/routing.dijkstra_implementation/${routing.dijkstra_implementation.version}</bundle>
+ </feature>
+
<feature name="odl-adsal-northbound" description="OpenDaylight :: AD-SAL :: Northbound APIs" version="${project.version}">
<feature version="${commons.opendaylight.version}">odl-base-all</feature>
<feature version="${project.version}">odl-nsf-managers</feature>
<bundle>mvn:org.opendaylight.controller/topology.northbound/${topology.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/usermanager.northbound/${usermanager.northbound.version}</bundle>
</feature>
+
+ <feature name="odl-adsal-controller-northbound" description="OpenDaylight :: AD-SAL :: Northbound APIs in Controller" version="${project.version}">
+ <feature version="${commons.opendaylight.version}">odl-base-all</feature>
+ <feature version="${project.version}">odl-nsf-managers</feature>
+ <bundle>mvn:org.ow2.asm/asm-all/${asm.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/bundlescanner/${bundlescanner.api.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/bundlescanner.implementation/${bundlescanner.implementation.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/commons.northbound/${northbound.commons.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/connectionmanager.northbound/${connectionmanager.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/subnets.northbound/${subnets.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/switchmanager.northbound/${switchmanager.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/topology.northbound/${topology.northbound.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/usermanager.northbound/${usermanager.northbound.version}</bundle>
+ </feature>
</features>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>connectionmanager.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>containermanager.northbound</artifactId>
</instructions>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>controllermanager.northbound</artifactId>
<version>0.1.0-SNAPSHOT</version>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>flowprogrammer.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>hosttracker.northbound</artifactId>
<version>0.5.0-SNAPSHOT</version>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>northbound.client</artifactId>
</execution>
</executions>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- </plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../../commons/opendaylight</relativePath>
+ <relativePath>../../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>networkconfig.bridgedomain.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>forwarding.staticrouting.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>statistics.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>subnets.northbound</artifactId>
</dependency>
</dependencies>
<build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.enunciate</groupId>
+ <artifactId>maven-enunciate-plugin</artifactId>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>clustering.services</artifactId>
+ <version>${clustering.services.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>configuration</artifactId>
+ <version>${configuration.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>${sal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>switchmanager</artifactId>
+ <version>${switchmanager.api.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.felix</groupId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- <version>${clustering.services.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration</artifactId>
- <version>${configuration.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager</artifactId>
- <version>${switchmanager.api.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>switchmanager.northbound</artifactId>
<version>0.5.0-SNAPSHOT</version>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>topology.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>usermanager.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- </plugin>
</plugins>
</build>
<scm>
<module>samples/northbound/loadbalancer</module>
<module>dummy-console</module>
+ <module>adsal-enunciate-parent</module>
<!-- features -->
<module>features/base</module>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>adsal-enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../../commons/opendaylight</relativePath>
+ <relativePath>../../../adsal-enunciate-parent</relativePath>
</parent>
<artifactId>samples.loadbalancer.northbound</artifactId>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- </dependencies>
- </plugin>
</plugins>
</build>
<scm>
<!-- karaf distro -->
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-karaf</directory>
+ <directory>karaf</directory>
<includes>
<include>pom.xml</include>
</includes>
<!-- features -->
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-features</directory>
+ <directory>features</directory>
<includes>
<include>pom.xml</include>
</includes>
</fileSet>
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-features/src/main/features</directory>
+ <directory>features/src/main/features</directory>
<includes>
<include>**/*.xml</include>
</includes>
<!-- impl -->
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-impl</directory>
+ <directory>impl</directory>
<includes>
<include>pom.xml</include>
</includes>
</fileSet>
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-impl/src/main/java</directory>
+ <directory>impl/src/main/java</directory>
<includes>
<include>**/*.java</include>
</includes>
</fileSet>
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-impl/src/main/config</directory>
+ <directory>impl/src/test/java</directory>
+ <includes>
+ <include>**/*.java</include>
+ </includes>
+ </fileSet>
+ <fileSet filtered="true" encoding="UTF-8">
+ <directory>impl/src/main/config</directory>
<includes>
<include>**/*.xml</include>
</includes>
</fileSet>
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-impl/src/main/yang</directory>
+ <directory>impl/src/main/yang</directory>
<includes>
<include>**/*.yang</include>
</includes>
<!-- api -->
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-api</directory>
+ <directory>api</directory>
<includes>
<include>pom.xml</include>
</includes>
</fileSet>
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-api/src/main/yang</directory>
+ <directory>api/src/main/yang</directory>
<includes>
<include>**/*.yang</include>
</includes>
<!-- artifacts -->
<fileSet filtered="true" encoding="UTF-8">
- <directory>__artifactId__-artifacts</directory>
+ <directory>artifacts</directory>
<includes>
<include>pom.xml</include>
</includes>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
+ <relativePath/>
</parent>
<groupId>${groupId}</groupId>
<artifactId>${artifactId}-features</artifactId>
<properties>
<mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
<yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
+ <configfile.directory>etc/opendaylight/karaf</configfile.directory>
</properties>
<dependencyManagement>
<dependencies>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${symbol_dollar}{yangtools.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-mdsal/${symbol_dollar}{mdsal.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-restconf/${symbol_dollar}{mdsal.version}/xml/features</repository>
- <feature name='odl-${artifactId}-api' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: api '>
+ <feature name='odl-${artifactId}-api' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: api'>
<feature version='${symbol_dollar}{yangtools.version}'>odl-yangtools-models</feature>
<bundle>mvn:${groupId}/${artifactId}-api/${symbol_dollar}{project.version}</bundle>
</feature>
- <feature name='odl-${artifactId}-impl' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: impl '>
+ <feature name='odl-${artifactId}' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId}'>
<feature version='${symbol_dollar}{mdsal.version}'>odl-mdsal-broker</feature>
<feature version='${symbol_dollar}{project.version}'>odl-${artifactId}-api</feature>
<bundle>mvn:${groupId}/${artifactId}-impl/${symbol_dollar}{project.version}</bundle>
- <configfile finalname="${artifactId}-impl-default-config.xml">mvn:${groupId}/${artifactId}-impl/${symbol_dollar}{project.version}/xml/config</configfile>
+ <configfile finalname="${configfile.directory}/${artifactId}.xml">mvn:${groupId}/${artifactId}-impl/${symbol_dollar}{project.version}/xml/config</configfile>
</feature>
- <feature name='odl-${artifactId}-impl-rest' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: impl :: REST '>
- <feature version="${symbol_dollar}{project.version}">odl-${artifactId}-impl</feature>
+ <feature name='odl-${artifactId}-rest' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: REST'>
+ <feature version="${symbol_dollar}{project.version}">odl-${artifactId}</feature>
<feature version="${symbol_dollar}{mdsal.version}">odl-restconf</feature>
</feature>
- <feature name='odl-${artifactId}-impl-ui' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: impl :: UI'>
- <feature version="${symbol_dollar}{project.version}">odl-${artifactId}-impl-rest</feature>
+ <feature name='odl-${artifactId}-ui' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${artifactId} :: UI'>
+ <feature version="${symbol_dollar}{project.version}">odl-${artifactId}-rest</feature>
<feature version="${symbol_dollar}{mdsal.version}">odl-mdsal-apidocs</feature>
<feature version="${symbol_dollar}{mdsal.version}">odl-mdsal-xsql</feature>
</feature>
<artifactId>${artifactId}-api</artifactId>
<version>${symbol_dollar}{project.version}</version>
</dependency>
+
+ <!-- Testing Dependencies -->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
</project>
-->
<snapshot>
<required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:${artifactId}:impl?module=${artifactId}-impl&revision=2014-12-10</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
</required-capabilities>
<configuration>
<data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:${artifactId}:impl">prefix:${artifactId}-impl</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:${artifactId}:impl">prefix:${artifactId}</type>
<name>${artifactId}-default</name>
<broker>
<type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-broker-osgi-registry</type>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package ${package};
+package ${package}.impl;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
*/
package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210;
-import ${package}.${classPrefix}Provider;
+import ${package}.impl.${classPrefix}Provider;
-public class ${classPrefix}ImplModule extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.Abstract${classPrefix}ImplModule {
- public ${classPrefix}ImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+public class ${classPrefix}Module extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.Abstract${classPrefix}Module {
+ public ${classPrefix}Module(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
- public ${classPrefix}ImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.${classPrefix}ImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ public ${classPrefix}Module(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.${classPrefix}Module oldModule, java.lang.AutoCloseable oldInstance) {
super(identifier, dependencyResolver, oldModule, oldInstance);
}
* Do not modify this file unless it is present under src/main directory
*/
package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210;
-public class ${classPrefix}ImplModuleFactory extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.Abstract${classPrefix}ImplModuleFactory {
+public class ${classPrefix}ModuleFactory extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210.Abstract${classPrefix}ModuleFactory {
}
"Initial revision";
}
- identity ${artifactId}-impl {
+ identity ${artifactId} {
base config:module-type;
- config:java-name-prefix ${classPrefix}Impl;
+ config:java-name-prefix ${classPrefix};
}
augment "/config:modules/config:module/config:configuration" {
- case ${artifactId}-impl {
- when "/config:modules/config:module/config:type = '${artifactId}-impl'";
+ case ${artifactId} {
+ when "/config:modules/config:module/config:type = '${artifactId}'";
container broker {
uses config:service-ref {
refine type {
--- /dev/null
+#set( $symbol_pound = '#' )
+#set( $symbol_dollar = '$' )
+#set( $symbol_escape = '\' )
+#set( $provider = "${classPrefix}Provider" )
+/*
+ * ${copyright} and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package ${package}.impl;
+
+import org.junit.Test;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+
+import static org.mockito.Mockito.mock;
+
+public class ${classPrefix}ProviderTest {
+ @Test
+ public void testOnSessionInitiated() {
+ ${provider} provider = new ${provider}();
+
+ // ensure no exceptions
+ // currently this method is empty
+ provider.onSessionInitiated(mock(BindingAwareBroker.ProviderContext.class));
+ }
+
+ @Test
+ public void testClose() throws Exception {
+ ${provider} provider = new ${provider}();
+
+ // ensure no exceptions
+ // currently this method is empty
+ provider.close();
+ }
+}
--- /dev/null
+#set( $symbol_pound = '#' )
+#set( $symbol_dollar = '$' )
+#set( $symbol_escape = '\' )
+#set( $factory = "${classPrefix}ModuleFactory" )
+/*
+ * ${copyright} and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210;
+
+import org.junit.Test;
+
+public class ${classPrefix}ModuleFactoryTest {
+ @Test
+ public void testFactoryConstructor() {
+ // ensure no exceptions on construction
+ new ${factory}();
+ }
+}
--- /dev/null
+#set( $symbol_pound = '#' )
+#set( $symbol_dollar = '$' )
+#set( $symbol_escape = '\' )
+#set( $module = "${classPrefix}Module" )
+/*
+ * ${copyright} and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.${artifactId}.impl.rev141210;
+
+import org.junit.Test;
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.JmxAttribute;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import ${package}.impl.${classPrefix}Provider;
+
+import javax.management.ObjectName;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class ${classPrefix}ModuleTest {
+ @Test
+ public void testCustomValidation() {
+ ${module} module = new ${module}(mock(ModuleIdentifier.class), mock(DependencyResolver.class));
+
+ // ensure no exceptions on validation
+ // currently this method is empty
+ module.customValidation();
+ }
+
+ @Test
+ public void testCreateInstance() throws Exception {
+ // configure mocks
+ DependencyResolver dependencyResolver = mock(DependencyResolver.class);
+ BindingAwareBroker broker = mock(BindingAwareBroker.class);
+ when(dependencyResolver.resolveInstance(eq(BindingAwareBroker.class), any(ObjectName.class), any(JmxAttribute.class))).thenReturn(broker);
+
+ // create instance of module with injected mocks
+ ${module} module = new ${module}(mock(ModuleIdentifier.class), dependencyResolver);
+
+ // getInstance calls resolveInstance to get the broker dependency and then calls createInstance
+ AutoCloseable closeable = module.getInstance();
+
+ // verify that the module registered the returned provider with the broker
+ verify(broker).registerProvider((${classPrefix}Provider)closeable);
+
+ // ensure no exceptions on close
+ closeable.close();
+ }
+}
<maven>3.1.1</maven>
</prerequisites>
<properties>
- <karaf.localFeature>odl-${artifactId}-impl-ui</karaf.localFeature>
+ <karaf.localFeature>odl-${artifactId}-ui</karaf.localFeature>
</properties>
<dependencyManagement>
<dependencies>
<scope>runtime</scope>
</dependency>
</dependencies>
+ <!-- DO NOT install or deploy the karaf artifact -->
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
</project>
<groupId>${groupId}</groupId>
<artifactId>${artifactId}-aggregator</artifactId>
<version>${version}</version>
- <name>${project.artifactId}</name>
+ <name>${artifactId}</name>
<packaging>pom</packaging>
<modelVersion>4.0.0</modelVersion>
<prerequisites>
<maven>3.1.1</maven>
</prerequisites>
<modules>
- <module>${artifactId}-api</module>
- <module>${artifactId}-impl</module>
- <module>${artifactId}-karaf</module>
- <module>${artifactId}-features</module>
- <module>${artifactId}-artifacts</module>
+ <module>api</module>
+ <module>impl</module>
+ <module>karaf</module>
+ <module>features</module>
+ <module>artifacts</module>
</modules>
<!-- DO NOT install or deploy the repo root pom as it's only needed to initiate a build -->
<build>
</plugin>
</plugins>
</build>
+
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/${artifactId}.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/${artifactId}.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/${artifactId}:Main</url>
+ </scm>
</project>
--- /dev/null
+<?xml version="1.0"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ copyright (c) 2015 cisco systems, inc. and others. all rights reserved.
+
+ this program and the accompanying materials are made available under the
+ terms of the eclipse public license v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<enunciate label="full" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://enunciate.codehaus.org/schemas/enunciate-1.28.xsd">
+</enunciate>
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ copyright (c) 2015 cisco systems, inc. and others. all rights reserved.
+
+ this program and the accompanying materials are made available under the
+ terms of the eclipse public license v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.5.0-SNAPSHOT</version>
+ <relativePath>../opendaylight</relativePath>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>enunciate-parent</artifactId>
+ <packaging>pom</packaging>
+
+ <!-- enunciate plugin does not work with JDK8 onwards -->
+ <profiles>
+ <profile>
+ <id>non-java8</id>
+ <activation>
+ <jdk>1.7</jdk>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.enunciate</groupId>
+ <artifactId>maven-enunciate-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+</project>
+
<properties>
- <akka.version>2.3.4</akka.version>
- <aopalliance.version>1.0.0</aopalliance.version>
+ <akka.version>2.3.9</akka.version>
<appauth.version>0.5.0-SNAPSHOT</appauth.version>
<archetype-app-northbound>0.1.0-SNAPSHOT</archetype-app-northbound>
- <aries.util.version>1.1.0</aries.util.version>
<arphandler.version>0.6.0-SNAPSHOT</arphandler.version>
- <!-- Controller Modules Versions -->
- <asm.version>4.1</asm.version>
<!-- Plugin Versions -->
- <bouncycastle.version>1.50</bouncycastle.version>
- <bundle.plugin.version>2.4.0</bundle.plugin.version>
<bundlescanner.api.version>0.5.0-SNAPSHOT</bundlescanner.api.version>
<bundlescanner.implementation.version>0.5.0-SNAPSHOT</bundlescanner.implementation.version>
<bundlescanner.version>0.5.0-SNAPSHOT</bundlescanner.version>
- <checkstyle.version>2.12</checkstyle.version>
<clustering.services.version>0.6.0-SNAPSHOT</clustering.services.version>
<clustering.services_implementation.version>0.5.0-SNAPSHOT</clustering.services_implementation.version>
<clustering.stub.version>0.5.0-SNAPSHOT</clustering.stub.version>
<commons.tomcat.util>7.0.53.v201406070630</commons.tomcat.util>
<commons.checkstyle.version>0.1.0-SNAPSHOT</commons.checkstyle.version>
- <commons.fileupload.version>1.2.2</commons.fileupload.version>
<commons.httpclient.version>0.2.0-SNAPSHOT</commons.httpclient.version>
- <commons.io.version>2.4</commons.io.version>
- <commons.lang3.version>3.1</commons.lang3.version>
<commons.logback_settings.version>0.1.0-SNAPSHOT</commons.logback_settings.version>
<commons.net.version>3.0.1</commons.net.version>
<commons.opendaylight.commons.httpclient>0.2.0-SNAPSHOT</commons.opendaylight.commons.httpclient>
<commons.opendaylight.concepts.version>0.6.0-SNAPSHOT</commons.opendaylight.concepts.version>
<commons.opendaylight.version>1.5.0-SNAPSHOT</commons.opendaylight.version>
<commons.parent.version>1.1.0-SNAPSHOT</commons.parent.version>
- <compiler.version>2.3.2</compiler.version>
<commons.httpclient.version>0.2.0-SNAPSHOT</commons.httpclient.version>
<concepts.version>0.6.0-SNAPSHOT</concepts.version>
<concurrentlinkedhashmap.version>1.4</concurrentlinkedhashmap.version>
<config.xsql.configfile>04-xsql.xml</config.xsql.configfile>
<config.netconf.client.configfile>01-netconf.xml</config.netconf.client.configfile>
<config.toaster.configfile>03-toaster-sample.xml</config.toaster.configfile>
+ <config.netconf.mdsal.configfile>08-mdsal-netconf.xml</config.netconf.mdsal.configfile>
<config.restconf.configfile>10-rest-connector.xml</config.restconf.configfile>
<config.netconf.connector.configfile>99-netconf-connector.xml</config.netconf.connector.configfile>
<configuration.implementation.version>0.5.0-SNAPSHOT</configuration.implementation.version>
<config.statistics.manager.configfile>30-statistics-manager.xml</config.statistics.manager.configfile>
<eclipse.persistence.version>2.5.0</eclipse.persistence.version>
<eclipse.jdt.core.compiler.batch.version>3.8.0.I20120518-2145</eclipse.jdt.core.compiler.batch.version>
- <!-- enforcer version -->
- <enforcer.version>1.3.1</enforcer.version>
- <enunciate.version>1.28</enunciate.version>
<!-- OpenEXI third party lib for netconf-->
-
<exi.nagasena.version>0000.0002.0038.0</exi.nagasena.version>
<felix.util.version>1.6.0</felix.util.version>
+ <features.test.version>1.5.0-SNAPSHOT</features.test.version>
<filtervalve.version>1.5.0-SNAPSHOT</filtervalve.version>
<findbugs.maven.plugin.version>2.4.0</findbugs.maven.plugin.version>
<flowprogrammer.northbound.version>0.5.0-SNAPSHOT</flowprogrammer.northbound.version>
<jmxGeneratorPath>src/main/yang-gen-config</jmxGeneratorPath>
<jolokia-bridge.version>0.1.0-SNAPSHOT</jolokia-bridge.version>
<jolokia.version>1.1.4</jolokia.version>
- <jsr305.api.version>2.0.1</jsr305.api.version>
<jsr311.api.version>1.1.1</jsr311.api.version>
<jsr311.v2.api.version>2.0</jsr311.v2.api.version>
<karaf.branding.version>1.1.0-SNAPSHOT</karaf.branding.version>
<leveldb.version>0.7</leveldb.version>
<leveldbjni.version>1.8</leveldbjni.version>
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
- <logback.version>1.0.9</logback.version>
<logging.bridge.version>0.5.0-SNAPSHOT</logging.bridge.version>
<maven.plugin.api.version>3.0.5</maven.plugin.api.version>
<mimepull.version>1.9.4</mimepull.version>
<northbound.jolokia.version>1.5.0-SNAPSHOT</northbound.jolokia.version>
<opendaylight-l2-types.version>2013.08.27.7-SNAPSHOT</opendaylight-l2-types.version>
<osgi-brandfragment.web.version>0.1.0-SNAPSHOT</osgi-brandfragment.web.version>
- <pax.exam.version>4.0.0</pax.exam.version>
<parboiled.version>1.1.6</parboiled.version>
<parboiled.scala.version>1.1.6</parboiled.scala.version>
<propertymavenplugin.version>1.0-alpha-2</propertymavenplugin.version>
<yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
<sshd-core.version>0.12.0</sshd-core.version>
<jmh.version>0.9.7</jmh.version>
+ <lmax.version>3.3.0</lmax.version>
</properties>
<dependencyManagement>
<artifactId>java-concurrent-hash-trie-map</artifactId>
<version>${ctrie.version}</version>
</dependency>
- <dependency>
- <groupId>com.google.code.findbugs</groupId>
- <artifactId>jsr305</artifactId>
- <version>${jsr305.api.version}</version>
- </dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.lmax</groupId>
+ <artifactId>disruptor</artifactId>
+ <version>${lmax.version}</version>
+ </dependency>
+
<!-- 3rd party dependencies needed by config-->
<dependency>
- <groupId>com.jcabi</groupId>
- <artifactId>jcabi-maven-slf4j</artifactId>
- <version>0.8</version>
+ <groupId>org.apache.maven</groupId>
+ <artifactId>maven-core</artifactId>
+ <version>3.1.1</version>
+ <scope>provided</scope>
</dependency>
<dependency>
<version>${yangtools.version}</version>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>features-test</artifactId>
+ <version>${features.test.version}</version>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
<artifactId>nagasena-rta</artifactId>
<version>${exi.nagasena.version}</version>
</dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.compendium</artifactId>
- <version>${osgi.compendium.version}</version>
- </dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- <version>${osgi.core.version}</version>
- </dependency>
<dependency>
<groupId>org.reflections</groupId>
<artifactId>reflections</artifactId>
/**
*
*/
+@Deprecated
public final class NeverReconnectStrategyFactoryModule extends org.opendaylight.controller.config.yang.protocol.framework.AbstractNeverReconnectStrategyFactoryModule
{
/**
*
*/
+@Deprecated
public class NeverReconnectStrategyFactoryModuleFactory extends org.opendaylight.controller.config.yang.protocol.framework.AbstractNeverReconnectStrategyFactoryModuleFactory
{
/**
*
*/
+@Deprecated
public final class ReconnectImmediatelyStrategyFactoryModule extends org.opendaylight.controller.config.yang.protocol.framework.AbstractReconnectImmediatelyStrategyFactoryModule
{
/**
*
*/
+@Deprecated
public class ReconnectImmediatelyStrategyFactoryModuleFactory extends org.opendaylight.controller.config.yang.protocol.framework.AbstractReconnectImmediatelyStrategyFactoryModuleFactory
{
/**
*
*/
+@Deprecated
public final class TimedReconnectStrategyFactoryModule extends org.opendaylight.controller.config.yang.protocol.framework.AbstractTimedReconnectStrategyFactoryModule
{
/**
*
*/
+@Deprecated
public class TimedReconnectStrategyFactoryModuleFactory extends org.opendaylight.controller.config.yang.protocol.framework.AbstractTimedReconnectStrategyFactoryModuleFactory
{
* Dispatcher class for creating servers and clients. The idea is to first create servers and clients and the run the
* start method that will handle sockets in different thread.
*/
+@Deprecated
public abstract class AbstractDispatcher<S extends ProtocolSession<?>, L extends SessionListener<?, ?, ?>> implements Closeable {
* @param connectStrategyFactory Factory for creating reconnection strategy for every reconnect attempt
*
* @return Future representing the reconnection task. It will report completion based on reestablishStrategy, e.g.
- * success if it indicates no further attempts should be made and failure if it reports an error
+ * success is never reported, only failure when it runs out of reconnection attempts.
*/
protected Future<Void> createReconnectingClient(final InetSocketAddress address, final ReconnectStrategyFactory connectStrategyFactory,
final PipelineInitializer<S> initializer) {
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
public abstract class AbstractProtocolSession<M> extends SimpleChannelInboundHandler<Object> implements ProtocolSession<M> {
private static final Logger LOG = LoggerFactory.getLogger(AbstractProtocolSession.class);
* @param <M> Protocol message type
* @param <S> Protocol session type, has to extend ProtocolSession<M>
*/
+@Deprecated
public abstract class AbstractSessionNegotiator<M, S extends AbstractProtocolSession<?>> extends ChannelInboundHandlerAdapter implements SessionNegotiator<S> {
private final Logger LOG = LoggerFactory.getLogger(AbstractSessionNegotiator.class);
private final Promise<S> promise;
* Utility ReconnectStrategy singleton, which will cause the reconnect process
* to always fail.
*/
+@Deprecated
@ThreadSafe
public final class NeverReconnectStrategy implements ReconnectStrategy {
private final EventExecutor executor;
*
* This interface should be implemented by a final class representing a protocol specific session.
*/
+@Deprecated
public interface ProtocolSession<T> extends Closeable {
@Override
void close();
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
@ThreadSafe
final class ProtocolSessionPromise<S extends ProtocolSession<?>> extends DefaultPromise<S> {
private static final Logger LOG = LoggerFactory.getLogger(ProtocolSessionPromise.class);
* Utility ReconnectStrategy singleton, which will cause the reconnect process
* to immediately schedule a reconnection attempt.
*/
+@Deprecated
@ThreadSafe
public final class ReconnectImmediatelyStrategy implements ReconnectStrategy {
private static final Logger LOG = LoggerFactory.getLogger(ReconnectImmediatelyStrategy.class);
import io.netty.util.concurrent.DefaultPromise;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.Promise;
import java.net.InetSocketAddress;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
final class ReconnectPromise<S extends ProtocolSession<?>, L extends SessionListener<?, ?, ?>> extends DefaultPromise<Void> {
private static final Logger LOG = LoggerFactory.getLogger(ReconnectPromise.class);
channel.pipeline().addLast(new ClosedChannelHandler(ReconnectPromise.this));
}
});
+
+ pending.addListener(new GenericFutureListener<Future<Object>>() {
+ @Override
+ public void operationComplete(Future<Object> future) throws Exception {
+ if (!future.isSuccess()) {
+ ReconnectPromise.this.setFailure(future.cause());
+ }
+ }
+ });
}
/**
* not attempt any more connection attempts and should abort the reconnection
* process.
*/
+@Deprecated
public interface ReconnectStrategy {
/**
* Query the strategy for the connect timeout.
* primarily useful for allowing injection of a specific type of strategy for
* on-demand use, pretty much like you would use a ThreadFactory.
*/
+@Deprecated
public interface ReconnectStrategyFactory {
/**
* Create a new ReconnectStrategy.
* implemented by a protocol specific abstract class, that is extended by
* a final class that implements the methods.
*/
+@Deprecated
public interface SessionListener<M, S extends ProtocolSession<?>, T extends TerminationReason> extends EventListener {
/**
* Fired when the session was established successfully.
* implemented by a protocol specific abstract class, that is extended by
* a final class that implements the methods.
*/
+@Deprecated
public interface SessionListenerFactory<T extends SessionListener<?, ?, ?>> {
/**
* Returns one session listener
*
* @param <T> Protocol session type.
*/
+@Deprecated
public interface SessionNegotiator<T extends ProtocolSession<?>> extends ChannelInboundHandler {
}
*
* @param <S> session type
*/
+@Deprecated
public interface SessionNegotiatorFactory<M, S extends ProtocolSession<?>, L extends SessionListener<?, ?, ?>> {
/**
* Create a new negotiator attached to a channel, which will notify
/**
* Marker interface for grouping session termination cause.
*/
+@Deprecated
public interface TerminationReason {
/**
*
* Both these caps can be combined, with the strategy giving up as soon as the first one is reached.
*/
+@Deprecated
@ThreadSafe
public final class TimedReconnectStrategy implements ReconnectStrategy {
private static final Logger LOG = LoggerFactory.getLogger(TimedReconnectStrategy.class);
<dependency>
<groupId>org.osgi</groupId>
<artifactId>org.osgi.core</artifactId>
+
+ <!-- We are adding generated code which is bound to OSGi, so we need
+ to make sure anyone dependending on this artifact inherits it -->
+ <scope>compile</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
Set<String> getAvailableModuleNames();
-
- /**
- * Find all runtime beans
- *
- * @return objectNames
- */
- Set<ObjectName> lookupRuntimeBeans();
-
- /**
- * Find all runtime of specified module
- *
- * @param moduleName
- * of bean
- * @param instanceName
- * of bean
- * @return objectNames
- */
- Set<ObjectName> lookupRuntimeBeans(String moduleName, String instanceName);
-
}
*/
Set<String> getAvailableModuleFactoryQNames();
+ /**
+ * Find all runtime beans
+ *
+ * @return objectNames
+ */
+ Set<ObjectName> lookupRuntimeBeans();
+
+ /**
+ * Find all runtime of specified module
+ *
+ * @param moduleName
+ * of bean
+ * @param instanceName
+ * of bean
+ * @return objectNames
+ */
+ Set<ObjectName> lookupRuntimeBeans(String moduleName, String instanceName);
}
public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
txLookupRegistry.checkConfigBeanExists(objectName);
}
+
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return txLookupRegistry.lookupRuntimeBeans();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(String moduleName,
+ String instanceName) {
+ return txLookupRegistry.lookupRuntimeBeans(moduleName, instanceName);
+ }
+
// --
/**
return ModuleQNameUtil.getQNames(allCurrentFactories);
}
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return lookupRuntimeBeans("*", "*");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(String moduleName,
+ String instanceName) {
+ String finalModuleName = moduleName == null ? "*" : moduleName;
+ String finalInstanceName = instanceName == null ? "*" : instanceName;
+ ObjectName namePattern = ObjectNameUtil.createRuntimeBeanPattern(
+ finalModuleName, finalInstanceName);
+ return transactionJMXRegistrator.queryNames(namePattern, null);
+ }
@Override
public String toString() {
throw new UnsupportedOperationException();
}
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(final String moduleName, final String instanceName) {
+ throw new UnsupportedOperationException();
+ }
+
@Override
public String toString() {
return "initial";
*/
package org.opendaylight.controller.config.manager.impl.dynamicmbean;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSet.Builder;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.util.ArrayList;
* @return list of found annotations
*/
static <T extends Annotation> List<T> findMethodAnnotationInSuperClassesAndIfcs(
- final Method setter, Class<T> annotationType,
- Set<Class<?>> inspectedInterfaces) {
- List<T> result = new ArrayList<T>();
+ final Method setter, final Class<T> annotationType,
+ final Set<Class<?>> inspectedInterfaces) {
+ Builder<T> result = ImmutableSet.builder();
Class<?> inspectedClass = setter.getDeclaringClass();
do {
try {
} catch (NoSuchMethodException e) {
inspectedClass = Object.class; // no need to go further
}
- } while (inspectedClass.equals(Object.class) == false);
+ } while (!inspectedClass.equals(Object.class));
+
// inspect interfaces
for (Class<?> ifc : inspectedInterfaces) {
if (ifc.isInterface() == false) {
}
}
- return result;
+ return new ArrayList<>(result.build());
}
/**
* @return list of found annotations
*/
static <T extends Annotation> List<T> findClassAnnotationInSuperClassesAndIfcs(
- Class<?> clazz, Class<T> annotationType, Set<Class<?>> interfaces) {
+ final Class<?> clazz, final Class<T> annotationType, final Set<Class<?>> interfaces) {
List<T> result = new ArrayList<T>();
Class<?> declaringClass = clazz;
do {
* @return empty string if no annotation is found, or list of descriptions
* separated by newline
*/
- static String aggregateDescriptions(List<Description> descriptions) {
+ static String aggregateDescriptions(final List<Description> descriptions) {
StringBuilder builder = new StringBuilder();
for (Description d : descriptions) {
if (builder.length() != 0) {
package org.opendaylight.controller.config.manager.impl.util;
import static org.junit.Assert.assertEquals;
-
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import java.util.Collections;
public class InterfacesHelperTest {
- interface SuperA {
+ public interface SuperA {
}
- interface SuperBMXBean {
+ public interface SuperBMXBean {
}
- interface SuperC extends SuperA, SuperBMXBean {
+ public interface SuperC extends SuperA, SuperBMXBean {
}
- class SuperClass implements SuperC {
+ public class SuperClass implements SuperC {
}
@MXBean
- interface SubA {
+ public interface SubA {
}
@ServiceInterfaceAnnotation(value = "a", osgiRegistrationType = SuperA.class, namespace = "n", revision = "r", localName = "l")
- interface Service extends AbstractServiceInterface{}
+ public interface Service extends AbstractServiceInterface{}
@ServiceInterfaceAnnotation(value = "b", osgiRegistrationType = SuperC.class, namespace = "n", revision = "r", localName = "l")
- interface SubService extends Service{}
+ public interface SubService extends Service{}
- abstract class SubClass extends SuperClass implements SubA, Module {
+ public abstract class SubClass extends SuperClass implements SubA, Module {
}
- abstract class SubClassWithService implements SubService, Module {
+ public abstract class SubClassWithService implements SubService, Module {
}
</filesets>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>add-yang-sources</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${jmxGeneratorPath}</source>
+ <source>${salGeneratorPath}</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
</plugins>
</pluginManagement>
</build>
@Override
public void run() {
List<Feature> toInstall = new ArrayList<Feature>();
- FeatureEvent event;
+ FeatureEvent event = null;
boolean interuppted = false;
while(true) {
try {
LOG.error("ConfigPushingRunnable - interupted");
interuppted = true;
} catch (Exception e) {
- LOG.error("Exception while processing features ", e);
+ LOG.error("Exception while processing features {} event {}", toInstall, event, e);
}
}
}
<artifactId>config-plugin-parent</artifactId>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<build>
<pluginManagement>
--- /dev/null
+package org.opendaylight.controller.config.util;
+
+import javax.management.ObjectName;
+
+/**
+ * Created by mmarsale on 20.2.2015.
+ */
+public interface BeanReader {
+ Object getAttributeCurrentValue(ObjectName on, String attributeName);
+}
import javax.management.ObjectName;
import org.opendaylight.controller.config.api.jmx.ConfigRegistryMXBean;
-public interface ConfigRegistryClient extends ConfigRegistryMXBean {
+public interface ConfigRegistryClient extends ConfigRegistryMXBean, BeanReader {
ConfigTransactionClient createTransaction();
Object invokeMethod(ObjectName on, String name, Object[] params,
String[] signature);
- Object getAttributeCurrentValue(ObjectName on, String attributeName);
-
}
} catch (AttributeNotFoundException | InstanceNotFoundException
| MBeanException | ReflectionException e) {
throw new RuntimeException("Unable to get attribute "
- + attributeName + " for " + on, e);
+ + attributeName + " for " + on + ". Available beans: " + lookupConfigBeans(), e);
}
}
import org.opendaylight.controller.config.api.jmx.ConfigTransactionControllerMXBean;
public interface ConfigTransactionClient extends
- ConfigTransactionControllerMXBean {
+ ConfigTransactionControllerMXBean, BeanReader {
CommitStatus commit() throws ConflictingVersionException,
ValidationException;
* @param on - ObjectName of the Object from which the attribute should be read
* @param jmxName - name of the attribute to be read
*
- * @return Attribute of Object on with attribute name jmxName
+ * @return Object of Object on with attribute name jmxName
*/
Attribute getAttribute(ObjectName on, String jmxName);
}
configTransactionControllerMXBeanProxy.checkServiceReferenceExists(objectName);
}
+ @Override
+ public Attribute getAttribute(ObjectName on, String attrName) {
+ if (ObjectNameUtil.getTransactionName(on) == null) {
+ throw new IllegalArgumentException("Not in transaction instance "
+ + on + ", no transaction name present");
+ }
+
+ try {
+ return new Attribute(attrName, configMBeanServer.getAttribute(on,attrName));
+ } catch (JMException e) {
+ throw new IllegalStateException("Unable to get attribute "
+ + attrName + " for " + on, e);
+ }
+ }
+
+ @Override
+ public Object getAttributeCurrentValue(ObjectName on, String attrName) {
+ return getAttribute(on, attrName).getValue();
+ }
+
@Override
public void validateBean(ObjectName configBeanON)
throws ValidationException {
}
@Override
- public Attribute getAttribute(ObjectName on, String attrName) {
- if (ObjectNameUtil.getTransactionName(on) == null) {
- throw new IllegalArgumentException("Not in transaction instance "
- + on + ", no transaction name present");
- }
+ public Set<String> getAvailableModuleFactoryQNames() {
+ return configTransactionControllerMXBeanProxy.getAvailableModuleFactoryQNames();
+ }
- try {
- return new Attribute(attrName, configMBeanServer.getAttribute(on,attrName));
- } catch (JMException e) {
- throw new IllegalStateException("Unable to get attribute "
- + attrName + " for " + on, e);
- }
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return configTransactionControllerMXBeanProxy.lookupRuntimeBeans();
}
@Override
- public Set<String> getAvailableModuleFactoryQNames() {
- return configTransactionControllerMXBeanProxy.getAvailableModuleFactoryQNames();
+ public Set<ObjectName> lookupRuntimeBeans(final String moduleName, final String instanceName) {
+ return configTransactionControllerMXBeanProxy.lookupRuntimeBeans(moduleName, instanceName);
}
}
package org.opendaylight.controller.config.util;
import com.google.common.collect.Sets;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
return Sets.newHashSet("availableModuleFactoryQNames");
}
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans() {
+ return Collections.emptySet();
+ }
+
+ @Override
+ public Set<ObjectName> lookupRuntimeBeans(final String moduleName, final String instanceName) {
+ return Collections.emptySet();
+ }
+
@Override
public ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
return conf3;
<artifactId>logback-config-loader</artifactId>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<artifactId>logback-config</artifactId>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<artifactId>netty-config-api</artifactId>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
<description>Configuration Wrapper around netty's event executor</description>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
<description>Configuration Wrapper around netty's event group</description>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
<description>Configuration Wrapper around netty's timer</description>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
+
<modules>
<module>config-api</module>
<module>config-manager</module>
<artifactId>threadpool-config-api</artifactId>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<artifactId>threadpool-config-impl</artifactId>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<artifactId>guava</artifactId>
</dependency>
- <dependency>
- <groupId>com.jcabi</groupId>
- <artifactId>jcabi-maven-slf4j</artifactId>
- </dependency>
-
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<artifactId>commons-lang3</artifactId>
</dependency>
- <dependency>
- <groupId>org.codehaus.gmaven.runtime</groupId>
- <artifactId>gmaven-runtime-2.0</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.sonatype.gossip</groupId>
- <artifactId>gossip</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>yang-jmx-generator</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-maven-plugin-spi</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.maven</groupId>
+ <artifactId>maven-core</artifactId>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
-import org.apache.maven.plugin.logging.Log;
import org.apache.maven.project.MavenProject;
import org.opendaylight.controller.config.spi.ModuleFactory;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang2sources.spi.CodeGenerator;
+import org.opendaylight.yangtools.yang2sources.spi.BasicCodeGenerator;
+import org.opendaylight.yangtools.yang2sources.spi.MavenProjectAware;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.slf4j.impl.StaticLoggerBinder;
/**
* This class interfaces with yang-maven-plugin. Gets parsed yang modules in
* {@link SchemaContext}, and parameters form the plugin configuration, and
* writes service interfaces and/or modules.
*/
-public class JMXGenerator implements CodeGenerator {
+public class JMXGenerator implements BasicCodeGenerator, MavenProjectAware {
+ private static final class NamespaceMapping {
+ private final String namespace, packageName;
+ public NamespaceMapping(final String namespace, final String packagename) {
+ this.namespace = namespace;
+ this.packageName = packagename;
+ }
+ }
+
+ @VisibleForTesting
static final String NAMESPACE_TO_PACKAGE_DIVIDER = "==";
+ @VisibleForTesting
static final String NAMESPACE_TO_PACKAGE_PREFIX = "namespaceToPackage";
+ @VisibleForTesting
static final String MODULE_FACTORY_FILE_BOOLEAN = "moduleFactoryFile";
+ private static final Logger LOG = LoggerFactory.getLogger(JMXGenerator.class);
+ private static final Pattern NAMESPACE_MAPPING_PATTERN = Pattern.compile("(.+)" + NAMESPACE_TO_PACKAGE_DIVIDER + "(.+)");
+
private PackageTranslator packageTranslator;
private final CodeWriter codeWriter;
- private static final Logger LOG = LoggerFactory
- .getLogger(JMXGenerator.class);
private Map<String, String> namespaceToPackageMapping;
private File resourceBaseDir;
private File projectBaseDir;
private boolean generateModuleFactoryFile = true;
public JMXGenerator() {
- this.codeWriter = new CodeWriter();
+ this(new CodeWriter());
}
- public JMXGenerator(CodeWriter codeWriter) {
+ public JMXGenerator(final CodeWriter codeWriter) {
this.codeWriter = codeWriter;
}
@Override
- public Collection<File> generateSources(SchemaContext context,
- File outputBaseDir, Set<Module> yangModulesInCurrentMavenModule) {
+ public Collection<File> generateSources(final SchemaContext context,
+ final File outputBaseDir, final Set<Module> yangModulesInCurrentMavenModule) {
Preconditions.checkArgument(context != null, "Null context received");
Preconditions.checkArgument(outputBaseDir != null,
return generatedFiles.getFiles();
}
- static File concatFolders(File projectBaseDir, String... folderNames) {
+ @VisibleForTesting
+ static File concatFolders(final File projectBaseDir, final String... folderNames) {
StringBuilder b = new StringBuilder();
for (String folder : folderNames) {
b.append(folder);
}
@Override
- public void setAdditionalConfig(Map<String, String> additionalCfg) {
- if (LOG != null) {
- LOG.debug(getClass().getCanonicalName(),
- ": Additional configuration received: ",
- additionalCfg.toString());
- }
+ public void setAdditionalConfig(final Map<String, String> additionalCfg) {
+ LOG.debug("{}: Additional configuration received: {}", getClass().getCanonicalName(), additionalCfg);
this.namespaceToPackageMapping = extractNamespaceMapping(additionalCfg);
this.generateModuleFactoryFile = extractModuleFactoryBoolean(additionalCfg);
}
private boolean extractModuleFactoryBoolean(
- Map<String, String> additionalCfg) {
+ final Map<String, String> additionalCfg) {
String bool = additionalCfg.get(MODULE_FACTORY_FILE_BOOLEAN);
if (bool == null) {
return true;
return true;
}
- @Override
- public void setLog(Log log) {
- StaticLoggerBinder.getSingleton().setMavenLog(log);
- }
-
private static Map<String, String> extractNamespaceMapping(
- Map<String, String> additionalCfg) {
+ final Map<String, String> additionalCfg) {
Map<String, String> namespaceToPackage = Maps.newHashMap();
for (String key : additionalCfg.keySet()) {
if (key.startsWith(NAMESPACE_TO_PACKAGE_PREFIX)) {
return namespaceToPackage;
}
- static Pattern namespaceMappingPattern = Pattern.compile("(.+)"
- + NAMESPACE_TO_PACKAGE_DIVIDER + "(.+)");
-
- private static NamespaceMapping extractNamespaceMapping(String mapping) {
- Matcher matcher = namespaceMappingPattern.matcher(mapping);
- Preconditions
- .checkArgument(matcher.matches(), String.format("Namespace to package mapping:%s is in invalid " +
- "format, requested format is: %s", mapping, namespaceMappingPattern));
+ private static NamespaceMapping extractNamespaceMapping(final String mapping) {
+ Matcher matcher = NAMESPACE_MAPPING_PATTERN.matcher(mapping);
+ Preconditions.checkArgument(matcher.matches(),
+ "Namespace to package mapping:%s is in invalid format, requested format is: %s",
+ mapping, NAMESPACE_MAPPING_PATTERN);
return new NamespaceMapping(matcher.group(1), matcher.group(2));
}
- private static class NamespaceMapping {
- public NamespaceMapping(String namespace, String packagename) {
- this.namespace = namespace;
- this.packageName = packagename;
- }
-
- private final String namespace, packageName;
- }
-
@Override
- public void setResourceBaseDir(File resourceDir) {
+ public void setResourceBaseDir(final File resourceDir) {
this.resourceBaseDir = resourceDir;
}
@Override
- public void setMavenProject(MavenProject project) {
+ public void setMavenProject(final MavenProject project) {
this.projectBaseDir = project.getBasedir();
-
- if (LOG != null) {
- LOG.debug(getClass().getCanonicalName(), " project base dir: ",
- projectBaseDir);
- }
+ LOG.debug("{}: project base dir: {}", getClass().getCanonicalName(), projectBaseDir);
}
@VisibleForTesting
static class GeneratedFilesTracker {
private final Set<File> files = Sets.newHashSet();
- void addFile(File file) {
+ void addFile(final File file) {
if (files.contains(file)) {
List<File> undeletedFiles = Lists.newArrayList();
for (File presentFile : files) {
files.add(file);
}
- void addFile(Collection<File> files) {
+ void addFile(final Collection<File> files) {
for (File file : files) {
addFile(file);
}
import org.opendaylight.controller.config.yangjmxgenerator.plugin.ftl.model.MethodDefinition;
import org.opendaylight.controller.config.yangjmxgenerator.plugin.ftl.model.ModuleField;
import org.opendaylight.controller.config.yangjmxgenerator.plugin.util.FullyQualifiedNameHelper;
-import org.opendaylight.yangtools.binding.generator.util.BindingGeneratorUtil;
import org.opendaylight.yangtools.sal.binding.model.api.ParameterizedType;
import org.opendaylight.yangtools.sal.binding.model.api.Type;
+import org.opendaylight.yangtools.yang.binding.BindingMapping;
public class TemplateFactory {
* bean as value that should be persisted from this instance.
*/
public static Map<String, FtlTemplate> getTOAndMXInterfaceFtlFiles(
- RuntimeBeanEntry entry) {
+ final RuntimeBeanEntry entry) {
Map<String, FtlTemplate> result = new HashMap<>();
{ // create GeneralInterfaceFtlFile for runtime MXBean. Attributes will
// be transformed to getter methods
}
// FIXME: put into Type.toString
- static String serializeType(Type type, boolean addWildcards) {
+ static String serializeType(final Type type, final boolean addWildcards) {
if (type instanceof ParameterizedType){
ParameterizedType parameterizedType = (ParameterizedType) type;
StringBuilder sb = new StringBuilder();
}
}
- static String serializeType(Type type) {
+ static String serializeType(final Type type) {
return serializeType(type, false);
}
- private static String getReturnType(AttributeIfc attributeIfc) {
+ private static String getReturnType(final AttributeIfc attributeIfc) {
String returnType;
if (attributeIfc instanceof TypedAttribute) {
Type type = ((TypedAttribute) attributeIfc).getType();
}
public static GeneralInterfaceTemplate serviceInterfaceFromSie(
- ServiceInterfaceEntry sie) {
+ final ServiceInterfaceEntry sie) {
List<String> extendedInterfaces = Lists
.newArrayList(AbstractServiceInterface.class.getCanonicalName());
}
public static AbstractFactoryTemplate abstractFactoryTemplateFromMbe(
- ModuleMXBeanEntry mbe) {
+ final ModuleMXBeanEntry mbe) {
AbstractFactoryAttributesProcessor attrProcessor = new AbstractFactoryAttributesProcessor();
attrProcessor.processAttributes(mbe.getAttributes(),
mbe.getPackageName());
}
public static AbstractModuleTemplate abstractModuleTemplateFromMbe(
- ModuleMXBeanEntry mbe) {
+ final ModuleMXBeanEntry mbe) {
AbstractModuleAttributesProcessor attrProcessor = new AbstractModuleAttributesProcessor(mbe.getAttributes());
List<ModuleField> moduleFields = attrProcessor.getModuleFields();
}
public static StubFactoryTemplate stubFactoryTemplateFromMbe(
- ModuleMXBeanEntry mbe) {
+ final ModuleMXBeanEntry mbe) {
return new StubFactoryTemplate(getHeaderFromEntry(mbe),
mbe.getPackageName(), mbe.getStubFactoryName(),
mbe.getFullyQualifiedName(mbe.getAbstractFactoryName())
}
public static GeneralInterfaceTemplate mXBeanInterfaceTemplateFromMbe(
- ModuleMXBeanEntry mbe) {
+ final ModuleMXBeanEntry mbe) {
MXBeanInterfaceAttributesProcessor attrProcessor = new MXBeanInterfaceAttributesProcessor();
attrProcessor.processAttributes(mbe.getAttributes());
GeneralInterfaceTemplate ifcTemplate = new GeneralInterfaceTemplate(
}
public static Map<String, GeneralClassTemplate> tOsFromMbe(
- ModuleMXBeanEntry mbe) {
+ final ModuleMXBeanEntry mbe) {
Map<String, GeneralClassTemplate> retVal = Maps.newHashMap();
TOAttributesProcessor processor = new TOAttributesProcessor();
processor.processAttributes(mbe.getAttributes());
}
public static Map<String, GeneralClassTemplate> tOsFromRbe(
- RuntimeBeanEntry rbe) {
+ final RuntimeBeanEntry rbe) {
Map<String, GeneralClassTemplate> retVal = Maps.newHashMap();
TOAttributesProcessor processor = new TOAttributesProcessor();
Map<String, AttributeIfc> yangPropertiesToTypesMap = Maps.newHashMap(rbe.getYangPropertiesToTypesMap());
return retVal;
}
- private static Header getHeaderFromEntry(AbstractEntry mbe) {
+ private static Header getHeaderFromEntry(final AbstractEntry mbe) {
return new Header(mbe.getYangModuleName(), mbe.getYangModuleLocalname());
}
private final List<TOInternal> tos = Lists.newArrayList();
- void processAttributes(Map<String, AttributeIfc> attributes) {
+ void processAttributes(final Map<String, AttributeIfc> attributes) {
for (Entry<String, AttributeIfc> attrEntry : attributes.entrySet()) {
AttributeIfc attributeIfc = attrEntry.getValue();
if (attributeIfc instanceof TOAttribute) {
}
}
- private void createTOInternal(TOAttribute toAttribute) {
+ private void createTOInternal(final TOAttribute toAttribute) {
Map<String, AttributeIfc> attrs = toAttribute.getCapitalizedPropertiesToTypesMap();
// recursive processing of TO's attributes
private List<Field> fields;
private List<MethodDefinition> methods;
- public TOInternal(Type type, Map<String, AttributeIfc> attrs) {
+ public TOInternal(final Type type, final Map<String, AttributeIfc> attrs) {
this(type.getFullyQualifiedName(), type.getName(), attrs, type.getPackageName());
}
- public TOInternal(String fullyQualifiedName, String name,
- Map<String, AttributeIfc> attrs, String packageName) {
+ public TOInternal(final String fullyQualifiedName, final String name,
+ final Map<String, AttributeIfc> attrs, final String packageName) {
this.fullyQualifiedName = fullyQualifiedName;
this.name = name;
processAttrs(attrs, packageName);
private final static String dependencyResolverVarName = "dependencyResolver";
private final static String dependencyResolverInjectMethodName = "injectDependencyResolver";
- private void processAttrs(Map<String, AttributeIfc> attrs, String packageName) {
+ private void processAttrs(final Map<String, AttributeIfc> attrs, final String packageName) {
fields = Lists.newArrayList();
methods = Lists.newArrayList();
for (Entry<String, AttributeIfc> attrEntry : attrs.entrySet()) {
String innerName = attrEntry.getKey();
- String varName = BindingGeneratorUtil
- .parseToValidParamName(attrEntry.getKey());
+ String varName = BindingMapping.getPropertyName(attrEntry.getKey());
String fullyQualifiedName, nullableDefault = null;
if (attrEntry.getValue() instanceof TypedAttribute) {
private static class MXBeanInterfaceAttributesProcessor {
private final List<MethodDeclaration> methods = Lists.newArrayList();
- void processAttributes(Map<String, AttributeIfc> attributes) {
+ void processAttributes(final Map<String, AttributeIfc> attributes) {
for (Entry<String, AttributeIfc> attrEntry : attributes.entrySet()) {
String returnType;
AttributeIfc attributeIfc = attrEntry.getValue();
MethodDeclaration getter = new MethodDeclaration(returnType,
getterName, Collections.<Field> emptyList());
- String varName = BindingGeneratorUtil
- .parseToValidParamName(attrEntry.getKey());
+ String varName = BindingMapping.getPropertyName(attrEntry.getKey());
String setterName = "set"
+ attributeIfc.getUpperCaseCammelCase();
MethodDeclaration setter = new MethodDeclaration("void",
private final List<Field> fields = Lists.newArrayList();
- void processAttributes(Map<String, AttributeIfc> attributes,
- String packageName) {
+ void processAttributes(final Map<String, AttributeIfc> attributes,
+ final String packageName) {
for (Entry<String, AttributeIfc> attrEntry : attributes.entrySet()) {
String type;
String nullableDefaultWrapped = null;
private final List<ModuleField> moduleFields;
private final List<MethodDefinition> methods;
- private Holder(List<ModuleField> moduleFields, List<MethodDefinition> methods) {
+ private Holder(final List<ModuleField> moduleFields, final List<MethodDefinition> methods) {
this.moduleFields = Collections.unmodifiableList(moduleFields);
this.methods = Collections.unmodifiableList(methods);
}
private final Holder holder;
- private AbstractModuleAttributesProcessor(Map<String, AttributeIfc> attributes) {
+ private AbstractModuleAttributesProcessor(final Map<String, AttributeIfc> attributes) {
this.holder = processAttributes(attributes);
}
- private static Holder processAttributes(Map<String, AttributeIfc> attributes) {
+ private static Holder processAttributes(final Map<String, AttributeIfc> attributes) {
List<ModuleField> moduleFields = new ArrayList<>();
List<MethodDefinition> methods = new ArrayList<>();
for (Entry<String, AttributeIfc> attrEntry : attributes.entrySet()) {
}
}
- String varName = BindingGeneratorUtil
- .parseToValidParamName(attrEntry.getKey());
+ String varName = BindingMapping.getPropertyName(attrEntry.getKey());
ModuleField field;
if (isIdentity) {
}
- private static boolean needsDepResolver(AttributeIfc value) {
+ private static boolean needsDepResolver(final AttributeIfc value) {
if(value instanceof TOAttribute) {
return true;
}
return false;
}
- private static String getInnerTypeFromIdentity(Type type) {
+ private static String getInnerTypeFromIdentity(final Type type) {
Preconditions.checkArgument(type instanceof ParameterizedType);
Type[] args = ((ParameterizedType) type).getActualTypeArguments();
Preconditions.checkArgument(args.length ==1);
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
-
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.commons.io.FileUtils;
-import org.apache.maven.plugin.logging.Log;
import org.apache.maven.project.MavenProject;
import org.eclipse.jdt.core.JavaCore;
import org.eclipse.jdt.core.compiler.IProblem;
File targetDir = new File(generatorOutputPath, "target");
generatedResourcesDir = new File(targetDir, "generated-resources");
jmxGenerator.setResourceBaseDir(generatedResourcesDir);
- Log mockedLog = mock(Log.class);
- doReturn(false).when(mockedLog).isDebugEnabled();
- doNothing().when(mockedLog).debug(any(CharSequence.class));
- doNothing().when(mockedLog).info(any(CharSequence.class));
- doNothing().when(mockedLog).error(any(CharSequence.class),
- any(Throwable.class));
- jmxGenerator.setLog(mockedLog);
MavenProject project = mock(MavenProject.class);
doReturn(generatorOutputPath).when(project).getBasedir();
jmxGenerator.setMavenProject(project);
verifyModuleFactoryFile(false);
}
- private void verifyModuleFactoryFile(boolean shouldBePresent) {
+ private void verifyModuleFactoryFile(final boolean shouldBePresent) {
File factoryFile = new File(generatedResourcesDir, "META-INF"
+ File.separator + "services" + File.separator
+ ModuleFactory.class.getName());
- if (!shouldBePresent)
+ if (!shouldBePresent) {
assertFalse("Factory file should not be generated",
factoryFile.exists());
- else
+ } else {
assertTrue("Factory file should be generated", factoryFile.exists());
+ }
}
- public static List<String> toFileNames(Collection<File> files) {
+ public static List<String> toFileNames(final Collection<File> files) {
List<String> result = new ArrayList<>();
for (File f : files) {
result.add(f.getName());
new Predicate<File>() {
@Override
- public boolean apply(File input) {
+ public boolean apply(final File input) {
return input.getName().endsWith("xml");
}
});
new Predicate<File>() {
@Override
- public boolean apply(File input) {
+ public boolean apply(final File input) {
return input.getName().endsWith("java");
}
});
String name = file.getName();
MbeASTVisitor visitor = new MbeASTVisitor();
verifiers.put(name, visitor);
- if (name.equals("AbstractDynamicThreadPoolModule.java"))
+ if (name.equals("AbstractDynamicThreadPoolModule.java")) {
abstractDynamicThreadPoolModuleVisitor = visitor;
- if (name.equals("AsyncEventBusModuleMXBean.java"))
+ }
+ if (name.equals("AsyncEventBusModuleMXBean.java")) {
asyncEventBusModuleMXBeanVisitor = visitor;
- if (name.equals("AbstractNamingThreadFactoryModuleFactory.java"))
+ }
+ if (name.equals("AbstractNamingThreadFactoryModuleFactory.java")) {
abstractNamingThreadFactoryModuleFactoryVisitor = visitor;
- if (name.equals("AsyncEventBusModule.java"))
+ }
+ if (name.equals("AsyncEventBusModule.java")) {
asyncEventBusModuleVisitor = visitor;
- if (name.equals("EventBusModuleFactory.java"))
+ }
+ if (name.equals("EventBusModuleFactory.java")) {
eventBusModuleFactoryVisitor = visitor;
+ }
}
processGeneratedCode(javaFiles, verifiers);
}
- private void verifyXmlFiles(Collection<File> xmlFiles) throws Exception {
+ private void verifyXmlFiles(final Collection<File> xmlFiles) throws Exception {
ErrorHandler errorHandler = new ErrorHandler() {
@Override
- public void warning(SAXParseException exception)
+ public void warning(final SAXParseException exception)
throws SAXException {
fail("Generated blueprint xml is not well formed "
+ exception.getMessage());
}
@Override
- public void fatalError(SAXParseException exception)
+ public void fatalError(final SAXParseException exception)
throws SAXException {
fail("Generated blueprint xml is not well formed "
+ exception.getMessage());
}
@Override
- public void error(SAXParseException exception) throws SAXException {
+ public void error(final SAXParseException exception) throws SAXException {
fail("Generated blueprint xml is not well formed "
+ exception.getMessage());
}
}
- private void assertEventBusModuleFactory(MbeASTVisitor visitor) {
+ private void assertEventBusModuleFactory(final MbeASTVisitor visitor) {
assertEquals(PackageTranslatorTest.EXPECTED_PACKAGE_PREFIX
+ ".threads.java", visitor.packageName);
assertEquals("EventBusModuleFactory", visitor.type);
visitor.methodJavadoc.size());
}
- private void assertAsyncEventBusModule(MbeASTVisitor visitor) {
+ private void assertAsyncEventBusModule(final MbeASTVisitor visitor) {
assertEquals(PackageTranslatorTest.EXPECTED_PACKAGE_PREFIX
+ ".threads.java", visitor.packageName);
assertEquals("AsyncEventBusModule", visitor.type);
}
private void assertAbstractNamingThreadFactoryModuleFactory(
- MbeASTVisitor visitor) {
+ final MbeASTVisitor visitor) {
assertEquals(PackageTranslatorTest.EXPECTED_PACKAGE_PREFIX
+ ".threads.java", visitor.packageName);
assertEquals("AbstractNamingThreadFactoryModuleFactory", visitor.type);
}
- private void assertFactoryMethods(Set<String> methods, int expectedSize) {
+ private void assertFactoryMethods(final Set<String> methods, final int expectedSize) {
List<ArgumentAssertion> args = Lists.newArrayList();
ArgumentAssertion oldInstanceArg = new ArgumentAssertion(DynamicMBeanWithInstance.class.getCanonicalName(), "old");
}
- private void assertMethodPresent(Set<String> methods, MethodAssertion methodAssertion) {
+ private void assertMethodPresent(final Set<String> methods, final MethodAssertion methodAssertion) {
assertTrue(String.format("Generated methods did not contain %s, generated methods: %s",
methodAssertion.toString(), methods), methods.contains(methodAssertion.toString()));
}
- private void assertAsyncEventBusModuleMXBean(MbeASTVisitor visitor) {
+ private void assertAsyncEventBusModuleMXBean(final MbeASTVisitor visitor) {
assertEquals(PackageTranslatorTest.EXPECTED_PACKAGE_PREFIX
+ ".threads.java", visitor.packageName);
assertEquals("AsyncEventBusModuleMXBean", visitor.type);
}
- private void assertAbstractDynamicThreadPoolModule(MbeASTVisitor visitor) {
+ private void assertAbstractDynamicThreadPoolModule(final MbeASTVisitor visitor) {
assertEquals(PackageTranslatorTest.EXPECTED_PACKAGE_PREFIX
+ ".threads.java", visitor.packageName);
assertNotNull(visitor.javadoc);
visitor.methodJavadoc.get("void setMaximumSize(java.lang.Long maximumSize)"));
}
- private void assertDeclaredField(Set<String> fieldDeclarations,
- String declaration) {
+ private void assertDeclaredField(final Set<String> fieldDeclarations,
+ final String declaration) {
assertTrue("Missing field " + declaration + ", got: "
+ fieldDeclarations,
fieldDeclarations.contains(declaration + ";\n"));
protected Map<String, String> methodDescriptions = Maps.newHashMap();
@Override
- public boolean visit(PackageDeclaration node) {
+ public boolean visit(final PackageDeclaration node) {
packageName = node.getName().toString();
return super.visit(node);
}
@Override
- public boolean visit(NormalAnnotation node) {
+ public boolean visit(final NormalAnnotation node) {
if (node.getTypeName().toString()
.equals(Description.class.getCanonicalName())) {
if (node.getParent() instanceof TypeDeclaration) {
}
@Override
- public boolean visit(TypeDeclaration node) {
+ public boolean visit(final TypeDeclaration node) {
javadoc = node.getJavadoc() == null ? null : node.getJavadoc()
.toString();
type = node.getName().toString();
private final Map<String, String> methodJavadoc = Maps.newHashMap();
@Override
- public boolean visit(NormalAnnotation node) {
+ public boolean visit(final NormalAnnotation node) {
boolean result = super.visit(node);
if (node.getTypeName().toString()
.equals(RequireInterface.class.getCanonicalName())
}
@Override
- public boolean visit(FieldDeclaration node) {
+ public boolean visit(final FieldDeclaration node) {
fieldDeclarations.add(node.toString());
return super.visit(node);
}
@Override
- public boolean visit(MethodDeclaration node) {
- if (node.isConstructor())
+ public boolean visit(final MethodDeclaration node) {
+ if (node.isConstructor()) {
constructors.add(node.toString());
- else {
+ } else {
String methodSignature = node.getReturnType2() + " " + node.getName() + "(";
boolean first = true;
for (Object o : node.parameters()) {
}
@Override
- public boolean visit(TypeDeclaration node) {
+ public boolean visit(final TypeDeclaration node) {
boolean visit = super.visit(node);
List<?> superIfcs = node.superInterfaceTypes();
implmts = superIfcs != null && !superIfcs.isEmpty() ? superIfcs
}
- private void assertContains(String source, String... contained) {
+ private void assertContains(final String source, final String... contained) {
for (String string : contained) {
assertThat(source, containsString(string));
}
}
- private void processGeneratedCode(Collection<File> files,
- Map<String, ASTVisitor> verifiers) throws IOException {
+ private void processGeneratedCode(final Collection<File> files,
+ final Map<String, ASTVisitor> verifiers) throws IOException {
ASTParser parser = ASTParser.newParser(AST.JLS3);
Map<?, ?> options = JavaCore.getOptions();
JavaCore.setComplianceOptions(JavaCore.VERSION_1_7, options);
for (IProblem c : cu.getProblems()) {
// 1610613332 = Syntax error, annotations are only available if
// source level is 5.0
- if (c.getID() == 1610613332)
+ if (c.getID() == 1610613332) {
continue;
+ }
// 1610613332 = Syntax error, parameterized types are only
// available if source level is 5.0
- if (c.getID() == 1610613329)
+ if (c.getID() == 1610613329) {
continue;
- if (c.getID() == 1610613328) // 'for each' statements are only available if source level is 5.0
+ }
+ if (c.getID() == 1610613328) {
continue;
+ }
fail("Error in generated source code " + file + ":"
+ c.getSourceLineNumber() + " id: " + c.getID() + " message:" + c.toString());
}
ASTVisitor visitor = verifiers.get(file.getName());
- if (visitor == null)
+ if (visitor == null) {
fail("Unknown generated file " + file.getName());
+ }
cu.accept(visitor);
}
}
- public static char[] readFileAsChars(File file) throws IOException {
+ public static char[] readFileAsChars(final File file) throws IOException {
List<String> readLines = Files
.readLines(file, Charset.forName("utf-8"));
char[] retVal = new char[0];
private static class MethodAssertion extends ArgumentAssertion{
- private List<ArgumentAssertion> arguments;
+ private final List<ArgumentAssertion> arguments;
- MethodAssertion(String type, String name, List<ArgumentAssertion> arguments) {
+ MethodAssertion(final String type, final String name, final List<ArgumentAssertion> arguments) {
super(type, name);
this.arguments = arguments;
}
- MethodAssertion(String type, String name) {
+ MethodAssertion(final String type, final String name) {
this(type, name, Collections.<ArgumentAssertion>emptyList());
}
for (ArgumentAssertion argument : arguments) {
sb.append(argument.type).append(' ');
sb.append(argument.name);
- if(++i != arguments.size())
+ if(++i != arguments.size()) {
sb.append(',');
+ }
}
sb.append(')');
return sb.toString();
protected final String type, name;
- private ArgumentAssertion(String type, String name) {
+ private ArgumentAssertion(final String type, final String name) {
this.type = type;
this.name = name;
}
assertThat(runtimeBeans.size(), is(4));
{
- RuntimeBeanEntry streamRB = findFirstByYangName(runtimeBeans,
- "stream");
+ RuntimeBeanEntry streamRB = findFirstByNamePrefix(runtimeBeans,
+ "ThreadStream");
assertNotNull(streamRB);
assertFalse(streamRB.getKeyYangName().isPresent());
assertFalse(streamRB.getKeyJavaName().isPresent());
+ " in " + runtimeBeans);
}
+ protected RuntimeBeanEntry findFirstByNamePrefix(final Collection<RuntimeBeanEntry> runtimeBeans, final String namePrefix) {
+ for (RuntimeBeanEntry rb : runtimeBeans) {
+ if (namePrefix.equals(rb.getJavaNamePrefix())) {
+ return rb;
+ }
+ }
+
+ throw new IllegalArgumentException("Name prefix not found:" + namePrefix
+ + " in " + runtimeBeans);
+ }
+
@Test
public void testGetWhenConditionMatcher() {
assertMatches("config",
assertThat(threadRB.getRpcs().size(), is(2));
}
{
- RuntimeBeanEntry streamRB = findFirstByYangName(runtimeBeans,
- "stream");
+ RuntimeBeanEntry streamRB = findFirstByNamePrefix(runtimeBeans,
+ "ThreadStream");
assertNotNull(streamRB);
assertFalse(streamRB.getKeyYangName().isPresent());
assertFalse(streamRB.getKeyJavaName().isPresent());
<name>${project.artifactId}</name>
<description>Remove generated source files, after new files generation, implementation is inserted.</description>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<name>${project.artifactId}</name>
<description>Artifact that contains only generated code from yang files. Suitable for testing.</description>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<dependencies>
<dependency>
<type>xml</type>
</dependency>
+ <!-- MessageBus -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>message-bus-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>message-bus-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
</dependencies>
</dependencyManagement>
</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-parent</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>message-bus-api</artifactId>
+ <name>${project.artifactId}</name>
+
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-topology</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/sal</outputBaseDir>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/models</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.8</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.*</Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+module event-aggregator {
+ // FIXME: this module needs to be split up to concepts and API
+ // as the concepts are shared with the other model in this
+ // package.
+ yang-version 1;
+ namespace "urn:cisco:params:xml:ns:yang:messagebus:eventaggregator";
+ prefix "eventaggregator";
+
+ organization "Cisco Systems, Inc.";
+ contact "Robert Gallas";
+
+ description
+ "Module implementing message but RPC.
+
+ Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2014-12-02" {
+ description "Initial revision";
+ }
+
+ typedef pattern {
+ type string {
+ length 1..max;
+ }
+
+ // FIXME: make this a regular expression
+ description "A match pattern. Specifically this is a wildcard pattern.";
+ }
+
+ typedef notification-pattern {
+ type pattern;
+ description
+ "Pattern for matching candidate notification types. This pattern is to be
+ applied against the concatenation of the namespace of the module which
+ defines that particular notification, followed by a single colon, and
+ then followed by notification identifier, as supplied in the argument to
+ the notification statement.";
+ }
+
+ typedef topic-id {
+ type string {
+ length 1..max;
+ }
+ description
+ "A topic identifier. It uniquely defines a topic as seen by the the user
+ of this model's RPCs";
+ }
+
+ // FIXME: we would really like to share instances here, but that requires some sort
+ // of sane reference counting. The reason for sharing is the data path part
+ // of notification delivery -- multiple creators of topics can still share
+ // a single data path.
+ rpc create-topic {
+ description
+ "Create a new topic. A topic is an aggregation of several notification
+ types from a set of nodes. Each successful invocation results in a unique
+ topic being created. The caller is responsible for removing the topic
+ once it is no longer needed.";
+
+ input {
+ leaf notification-pattern {
+ type notification-pattern;
+ mandatory true;
+ description
+ "Pattern matching notification which should be forwarded into this
+ topic.";
+ }
+
+ leaf node-id-pattern {
+ type pattern;
+ mandatory true;
+ description
+ "Pattern for matching candidate event source nodes when looking
+ for contributors to the topic. The pattern will be applied against
+ /network-topology/topology/node/node-id";
+ }
+ }
+
+ output {
+ leaf topic-id {
+ type topic-id;
+ mandatory true;
+ }
+ }
+ }
+
+ rpc destroy-topic {
+ description
+ "Destroy a topic. No further messages will be delivered to it.";
+
+ input {
+ leaf topic-id {
+ type topic-id;
+ mandatory true;
+ }
+ }
+ }
+
+ notification topic-notification {
+ description
+ "Notification of an event occuring on a particular node. This notification
+ acts as an encapsulation for the event being delivered.";
+
+ leaf topic-id {
+ type topic-id;
+ mandatory true;
+ description
+ "Topic to which this event is being delivered.";
+ }
+
+ leaf node-id {
+ // FIXME: should be topology node ID
+ type string;
+ mandatory true;
+ description
+ "Node ID of the node which generated the event.";
+ }
+
+ anyxml payload {
+ mandatory true;
+ description
+ "Encapsulated notification. The format is the XML representation of
+ a notification according to RFC6020 section 7.14.2.";
+ }
+ }
+}
--- /dev/null
+module event-source {
+ yang-version 1;
+ namespace "urn:cisco:params:xml:ns:yang:messagebus:eventsource";
+ prefix "eventsource";
+
+ import event-aggregator { prefix aggr; }
+ import network-topology { prefix nt; revision-date "2013-10-21"; }
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19"; }
+ import yang-ext {prefix ext; revision-date "2013-07-09"; }
+
+ organization "Cisco Systems, Inc.";
+ contact "Robert Gallas";
+
+ description
+ "Base model for a topology where individual nodes can produce events.
+
+ Module implementing event source topology and encapped notification.
+
+ Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2014-12-02" {
+ description "first revision";
+ }
+
+ // FIXME: expand this
+ typedef join-topic-status {
+ type enumeration {
+ enum up;
+ enum down;
+ }
+ description "Object status";
+ }
+
+ // FIXME: migrate to topology
+ typedef node-ref {
+ type leafref {
+ path "/inv:nodes/inv:node/inv:id";
+ }
+ }
+
+ grouping topology-event-source-type {
+ container topology-event-source {
+ presence "indicates an event source-aware topology";
+ }
+ }
+
+ rpc join-topic {
+ input {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type "instance-identifier";
+ }
+ leaf topic-id {
+ type aggr:topic-id;
+ description "in current implementation notification-pattern is defined by topic-id.
+ By persisting topic definition we could omit notification-pattern";
+ }
+ leaf notification-pattern {
+ type aggr:notification-pattern;
+ }
+ }
+
+ output {
+ leaf status {
+ type join-topic-status;
+ }
+ }
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:topology-types" {
+ uses topology-event-source-type;
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:node" {
+ when "../../nt:topology-types/topology-event-source";
+ leaf event-source-node {
+ type node-ref;
+ }
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>\r
+<project xmlns="http://maven.apache.org/POM/4.0.0"\r
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\r
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">\r
+ <modelVersion>4.0.0</modelVersion>\r
+\r
+ <parent>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-parent</artifactId>\r
+ <version>1.2.0-SNAPSHOT</version>\r
+ </parent>\r
+\r
+ <artifactId>message-bus-impl</artifactId>\r
+ <name>${project.artifactId}</name>\r
+\r
+ <packaging>bundle</packaging>\r
+\r
+ <dependencies>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>ietf-netconf-notifications</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-binding-api</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-core-api</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-common-util</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.yangtools</groupId>\r
+ <artifactId>yang-data-impl</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>config-api</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>message-bus-api</artifactId>\r
+ </dependency>\r
+ <dependency>\r
+ <groupId>org.opendaylight.controller</groupId>\r
+ <artifactId>sal-binding-config</artifactId>\r
+ </dependency>\r
+ </dependencies>\r
+\r
+ <build>\r
+ <plugins>\r
+ <plugin>\r
+ <groupId>org.opendaylight.yangtools</groupId>\r
+ <artifactId>yang-maven-plugin</artifactId>\r
+ <executions>\r
+ <execution>\r
+ <goals>\r
+ <goal>generate-sources</goal>\r
+ </goals>\r
+ <configuration>\r
+ <codeGenerators>\r
+ <generator>\r
+ <codeGeneratorClass>\r
+ org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl\r
+ </codeGeneratorClass>\r
+ <outputBaseDir>\r
+ ${project.build.directory}/generated-sources/sal\r
+ </outputBaseDir>\r
+ </generator>\r
+ <generator>\r
+ <codeGeneratorClass>\r
+ org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator\r
+ </codeGeneratorClass>\r
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>\r
+ <additionalConfiguration>\r
+ <namespaceToPackage1>\r
+ urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang\r
+ </namespaceToPackage1>\r
+ </additionalConfiguration>\r
+ </generator>\r
+ <generator>\r
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>\r
+ <outputBaseDir>target/site/models</outputBaseDir>\r
+ </generator>\r
+ </codeGenerators>\r
+ <inspectDependencies>true</inspectDependencies>\r
+ </configuration>\r
+ </execution>\r
+ </executions>\r
+ </plugin>\r
+ <plugin>\r
+ <groupId>org.codehaus.mojo</groupId>\r
+ <artifactId>build-helper-maven-plugin</artifactId>\r
+ <executions>\r
+ <execution>\r
+ <id>add-source</id>\r
+ <phase>generate-sources</phase>\r
+ <goals>\r
+ <goal>add-source</goal>\r
+ </goals>\r
+ <configuration>\r
+ <sources>\r
+ <source>${project.build.directory}/generated-sources/config</source>\r
+ </sources>\r
+ </configuration>\r
+ </execution>\r
+ </executions>\r
+ </plugin>\r
+ </plugins>\r
+ </build>\r
+</project>\r
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.config.yang.messagebus.app.impl;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.controller.mdsal.InitializationContext;
+import org.opendaylight.controller.mdsal.Providers;
+import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+
+public class MessageBusAppImplModule extends org.opendaylight.controller.config.yang.messagebus.app.impl.AbstractMessageBusAppImplModule {
+ private static final Logger LOGGER = LoggerFactory.getLogger(MessageBusAppImplModule.class);
+
+ private BundleContext bundleContext;
+
+ public BundleContext getBundleContext() {
+ return bundleContext;
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ }
+
+ public MessageBusAppImplModule( ModuleIdentifier identifier, DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public MessageBusAppImplModule( ModuleIdentifier identifier,
+ DependencyResolver dependencyResolver,
+ MessageBusAppImplModule oldModule,
+ java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ protected void customValidation() {}
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ List<NamespaceToStream> namespaceMapping = getNamespaceToStream();
+ InitializationContext ic = new InitializationContext(namespaceMapping);
+
+ final Providers.BindingAware bap = new Providers.BindingAware(ic);
+ final Providers.BindingIndependent bip = new Providers.BindingIndependent(ic);
+
+ getBindingBrokerDependency().registerProvider(bap, getBundleContext());
+ getDomBrokerDependency().registerProvider(bip);
+
+ AutoCloseable closer = new AutoCloseable() {
+ @Override public void close() {
+ closeProvider(bap);
+ closeProvider(bip);
+ }
+ };
+
+ return closer;
+ }
+
+ private void closeProvider(AutoCloseable closable) {
+ try {
+ closable.close();
+ } catch (Exception e) {
+ LOGGER.error("Exception while closing: {}\n Exception: {}", closable, e);
+ }
+ }
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: message-bus-app-impl yang module local name: messagebus-app-impl
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Feb 03 09:03:11 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.messagebus.app.impl;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+public class MessageBusAppImplModuleFactory extends org.opendaylight.controller.config.yang.messagebus.app.impl.AbstractMessageBusAppImplModuleFactory {
+ @Override
+ public Module createModule(String instanceName,
+ DependencyResolver dependencyResolver,
+ BundleContext bundleContext) {
+
+ MessageBusAppImplModule module =
+ (MessageBusAppImplModule) super.createModule(instanceName,
+ dependencyResolver,
+ bundleContext);
+
+ module.setBundleContext(bundleContext);
+
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName,
+ DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old,
+ BundleContext bundleContext)
+ throws Exception {
+
+ MessageBusAppImplModule module =
+ (MessageBusAppImplModule) super.createModule(instanceName,
+ dependencyResolver,
+ old,
+ bundleContext);
+
+ module.setBundleContext(bundleContext);
+
+ return module;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.mdsal;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+public class DataStore {
+ private static final FutureCallback<Void> DEFAULT_CALLBACK =
+ new FutureCallback<Void>() {
+ public void onSuccess(Void result) {
+ // TODO: Implement default behaviour
+ }
+
+ public void onFailure(Throwable t) {
+ // TODO: Implement default behaviour
+ };
+ };
+
+ private final MdSAL mdSAL;
+
+ public DataStore(MdSAL mdSAL) {
+ this.mdSAL = mdSAL;
+ }
+
+ public ListenerRegistration<DataChangeListener> registerDataChangeListener(LogicalDatastoreType store,
+ InstanceIdentifier<?> path,
+ DataChangeListener listener,
+ AsyncDataBroker.DataChangeScope triggeringScope) {
+ return mdSAL.getDataBroker().registerDataChangeListener(store, path, listener, triggeringScope);
+ }
+
+ public <T extends DataObject> void asyncPUT(LogicalDatastoreType datastoreType,
+ InstanceIdentifier<T> path,
+ T data) {
+ asyncPUT(datastoreType, path, data, DEFAULT_CALLBACK);
+ }
+
+ public <T extends DataObject> void asyncPUT(LogicalDatastoreType datastoreType,
+ InstanceIdentifier<T> path,
+ T data,
+ FutureCallback<Void> callback) {
+ WriteTransaction tx = mdSAL.getDataBroker().newWriteOnlyTransaction();
+ tx.put(datastoreType, path, data, true);
+ execPut(tx, callback);
+ }
+
+ public <T extends DataObject> T read(LogicalDatastoreType datastoreType,
+ InstanceIdentifier<T> path) {
+
+ ReadOnlyTransaction tx = mdSAL.getDataBroker().newReadOnlyTransaction();
+ T result = null;
+
+ try {
+ result = tx.read(datastoreType, path).get().get();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ return result;
+ }
+
+ private static void execPut(WriteTransaction tx, FutureCallback<Void> callback) {
+ Futures.addCallback(tx.submit(), callback);
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.mdsal;
+
+import org.opendaylight.controller.config.yang.messagebus.app.impl.NamespaceToStream;
+import org.opendaylight.controller.messagebus.app.impl.EventAggregator;
+import org.opendaylight.controller.messagebus.app.impl.EventSourceManager;
+import org.opendaylight.controller.messagebus.app.impl.EventSourceTopology;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.core.api.Broker;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+
+public class InitializationContext {
+ private static final Logger LOGGER = LoggerFactory.getLogger(InitializationContext.class);
+
+ private final MdSAL mdSal;
+ private final DataStore dataStore;
+ private final EventSourceTopology eventSourceTopology;
+ private final EventSourceManager eventSourceManager;
+ private final EventAggregator eventAggregator;
+
+ public InitializationContext(List<NamespaceToStream> namespaceMapping) {
+ this.mdSal = new MdSAL();
+ this.dataStore = new DataStore(mdSal);
+ this.eventSourceTopology = new EventSourceTopology(dataStore);
+ this.eventSourceManager = new EventSourceManager(dataStore, mdSal, eventSourceTopology, namespaceMapping);
+ this.eventAggregator = new EventAggregator(mdSal, eventSourceTopology);
+ }
+
+ public synchronized void set(BindingAwareBroker.ProviderContext session) {
+ mdSal.setBindingAwareContext(session);
+
+ if (mdSal.isReady()) {
+ initialize();
+ }
+ }
+
+ public synchronized void set(Broker.ProviderSession session) {
+ mdSal.setBindingIndependentContext(session);
+
+ if (mdSal.isReady()) {
+ initialize();
+ }
+ }
+
+ private void initialize() {
+ eventSourceTopology.mdsalReady();
+ eventSourceManager.mdsalReady();
+ eventAggregator.mdsalReady();
+
+ LOGGER.info("InitializationContext started.");
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.mdsal;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareService;
+import org.opendaylight.controller.sal.binding.api.mount.MountInstance;
+import org.opendaylight.controller.sal.binding.api.mount.MountProviderService;
+import org.opendaylight.controller.sal.core.api.Broker;
+import org.opendaylight.controller.sal.core.api.BrokerService;
+import org.opendaylight.controller.sal.core.api.notify.NotificationListener;
+import org.opendaylight.controller.sal.core.api.notify.NotificationPublishService;
+import org.opendaylight.controller.sal.core.api.notify.NotificationService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.RpcService;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MdSAL {
+ private static final Logger LOGGER = LoggerFactory.getLogger(MdSAL.class);
+
+ private BindingAwareBroker.ProviderContext bindingAwareContext;
+ private Broker.ProviderSession bindingIndependentContext;
+
+ // -----------------------------
+ // ----- FRAMEWORK METHODS -----
+ // -----------------------------
+ public void setBindingAwareContext(BindingAwareBroker.ProviderContext bindingAwareContext) {
+ this.bindingAwareContext = bindingAwareContext;
+ }
+
+ public void setBindingIndependentContext(Broker.ProviderSession bindingIndependentContext) {
+ this.bindingIndependentContext = bindingIndependentContext;
+ }
+
+ //TODO: We should hide brokers and expose functionalities instead
+ public DataBroker getDataBroker() {
+ return getBaSalService(DataBroker.class);
+ }
+
+ public synchronized boolean isReady() {
+ return (bindingAwareContext != null && bindingIndependentContext != null);
+ }
+
+ // -----------------------
+ // ----- API METHODS -----
+ // -----------------------
+ // TODO: Factor out API methods to interface
+ // method does not return registration object. Rather will hold references internally and manipulate using node id and API
+ public <T extends RpcService> void addRpcImplementation(Class<T> serviceInterface,
+ T implementation)
+ throws IllegalStateException {
+ bindingAwareContext.addRpcImplementation(serviceInterface, implementation);
+ }
+
+ // method does not return registration object. Rather will hold references internally and manipulate using node id and API
+ public <T extends RpcService> void addRpcImplementation(Node node,
+ Class<T> serviceInterface,
+ T implementation)
+ throws IllegalStateException {
+ BindingAwareBroker.RoutedRpcRegistration<T> registration
+ = addRoutedRpcImplementation(serviceInterface, implementation);
+
+ NodeRef nodeRef = createNodeRef(node.getId());
+ registration.registerPath(NodeContext.class, nodeRef.getValue());
+ }
+
+ public ListenerRegistration<NotificationListener> addNotificationListener(String nodeId,
+ QName notification,
+ NotificationListener listener) {
+ YangInstanceIdentifier yii = inventoryNodeBIIdentifier(nodeId);
+
+ NotificationService notificationService =
+ getBiSalService(DOMMountPointService.class)
+ .getMountPoint(yii)
+ .get()
+ .getService(NotificationPublishService.class)
+ .get();
+
+ ListenerRegistration<NotificationListener> registration =
+ notificationService.addNotificationListener(notification, listener);
+
+ LOGGER.info("Notification listener registered for {}, at node {}", notification, nodeId);
+
+ return registration;
+ }
+
+ public ListenerRegistration<NotificationListener> addNotificationListener(QName notification,
+ NotificationListener listener) {
+ NotificationService notificationService =
+ getBiSalService(NotificationPublishService.class);
+
+ ListenerRegistration<NotificationListener> registration =
+ notificationService.addNotificationListener(notification, listener);
+
+ LOGGER.info("Notification listener registered for {}.", notification);
+
+ return registration;
+ }
+
+ public <T extends RpcService> T getRpcService(Class<T> serviceInterface) {
+ return bindingAwareContext.getRpcService(serviceInterface);
+ }
+
+ public <T extends RpcService> T getRpcService(String nodeId, Class<T> serviceInterface) {
+ MountProviderService mountProviderService = getBaSalService(MountProviderService.class);
+
+ InstanceIdentifier<Node> key = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class,
+ new NodeKey(new NodeId(nodeId)));
+
+ MountInstance mountPoint = mountProviderService.getMountPoint(key);
+ return mountPoint.getRpcService(serviceInterface);
+ }
+
+ public void publishNotification(CompositeNode notification) {
+ getBiSalService(NotificationPublishService.class).publish(notification);
+ }
+
+ public SchemaContext getSchemaContext(String nodeId) {
+ YangInstanceIdentifier yii = inventoryNodeBIIdentifier(nodeId);
+
+ SchemaContext schemaContext =
+ getBiSalService(DOMMountPointService.class)
+ .getMountPoint(yii)
+ .get().getSchemaContext();
+
+ return schemaContext;
+ }
+
+ // ---------------------------
+ // ----- UTILITY METHODS -----
+ // ---------------------------
+ private <T extends BindingAwareService> T getBaSalService(Class<T> service) {
+ return bindingAwareContext.getSALService(service);
+ }
+
+ private <T extends BrokerService> T getBiSalService(Class<T> service) {
+ return bindingIndependentContext.getService(service);
+ }
+
+ private static final String NODE_ID_NAME = "id";
+
+ public static YangInstanceIdentifier inventoryNodeBIIdentifier(String nodeId) {
+ return YangInstanceIdentifier.builder()
+ .node(Nodes.QNAME)
+ .nodeWithKey(Node.QNAME,
+ QName.create(Node.QNAME.getNamespace(),
+ Node.QNAME.getRevision(),
+ NODE_ID_NAME),
+ nodeId)
+ .build();
+ }
+
+ private <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface,
+ T implementation)
+ throws IllegalStateException {
+ return bindingAwareContext.addRoutedRpcImplementation(serviceInterface, implementation);
+ }
+
+ public static NodeRef createNodeRef(NodeId nodeId) {
+ NodeKey nodeKey = new NodeKey(nodeId);
+ InstanceIdentifier<Node> path = InstanceIdentifier
+ .builder(Nodes.class)
+ .child(Node.class, nodeKey)
+ .build();
+ return new NodeRef(path);
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.mdsal;
+
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
+import org.opendaylight.controller.sal.core.api.AbstractProvider;
+import org.opendaylight.controller.sal.core.api.Broker;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Providers {
+ private static final Logger LOGGER = LoggerFactory.getLogger(Providers.class);
+
+ public static class BindingAware implements BindingAwareProvider, AutoCloseable {
+ private final InitializationContext initializationContext;
+
+ public BindingAware(InitializationContext ic) {
+ this.initializationContext = ic;
+ }
+
+ @Override
+ public void onSessionInitiated(BindingAwareBroker.ProviderContext session) {
+ initializationContext.set(session);
+
+ LOGGER.info("BindingAwareBroker.ProviderContext initialized");
+ }
+
+ @Override
+ public void close() throws Exception {}
+ }
+
+ public static class BindingIndependent extends AbstractProvider implements AutoCloseable {
+ private final InitializationContext initializationContext;
+
+ public BindingIndependent(InitializationContext ic) {
+ this.initializationContext = ic;
+ }
+
+ @Override
+ public void onSessionInitiated(Broker.ProviderSession session) {
+ initializationContext.set(session);
+
+ LOGGER.info("Broker.ProviderSession initialized");
+ }
+
+ @Override
+ public void close() throws Exception {}
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.app.impl;
+
+import java.util.List;
+import java.util.concurrent.Future;
+import org.opendaylight.controller.mdsal.MdSAL;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicOutput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// TODO: implement topic created notification
+public class EventAggregator implements EventAggregatorService {
+ private static final Logger LOGGER = LoggerFactory.getLogger(EventAggregator.class);
+
+ private final MdSAL mdSAL;
+ private final EventSourceTopology eventSourceTopology;
+
+ public EventAggregator(final MdSAL mdSAL, final EventSourceTopology eventSourceTopology) {
+ this.mdSAL = mdSAL;
+ this.eventSourceTopology = eventSourceTopology;
+ }
+
+ public void mdsalReady() {
+ mdSAL.addRpcImplementation(EventAggregatorService.class, this);
+ }
+
+ @Override
+ public Future<RpcResult<CreateTopicOutput>> createTopic(final CreateTopicInput input) {
+ LOGGER.info("Received Topic creation request: NotificationPattern -> {}, NodeIdPattern -> {}",
+ input.getNotificationPattern(),
+ input.getNodeIdPattern());
+
+ Topic topic = new Topic(new NotificationPattern(input.getNotificationPattern()), input.getNodeIdPattern().getValue(), mdSAL);
+
+ //# Make sure we capture all nodes from now on
+ eventSourceTopology.registerDataChangeListener(topic);
+
+ //# Notify existing nodes
+ //# Code reader note: Context of Node type is NetworkTopology
+ List<Node> nodes = eventSourceTopology.snapshot();
+ for (Node node : nodes) {
+ NodeId nodeIdToNotify = node.getAugmentation(Node1.class).getEventSourceNode();
+ topic.notifyNode(nodeIdToNotify);
+ }
+
+ CreateTopicOutput cto = new CreateTopicOutputBuilder()
+ .setTopicId(topic.getTopicId())
+ .build();
+
+ return Util.resultFor(cto);
+ }
+
+ @Override
+ public Future<RpcResult<Void>> destroyTopic(final DestroyTopicInput input) {
+ // 1. UNREGISTER DATA CHANGE LISTENER -> ?
+ // 2. CLOSE TOPIC
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.app.impl;
+
+import org.opendaylight.controller.config.yang.messagebus.app.impl.NamespaceToStream;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.mdsal.DataStore;
+import org.opendaylight.controller.mdsal.MdSAL;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.inventory.rev140108.NetconfNode;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public final class EventSourceManager implements DataChangeListener {
+ private static final Logger LOGGER = LoggerFactory.getLogger(EventSourceManager.class);
+ private static final InstanceIdentifier<Node> INVENTORY_PATH = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class);
+ private final DataStore dataStore;
+ private final MdSAL mdSal;
+ private final EventSourceTopology eventSourceTopology;
+ private final Map<String, String> streamMap;
+
+ public EventSourceManager(DataStore dataStore,
+ MdSAL mdSal,
+ EventSourceTopology eventSourceTopology,
+ List<NamespaceToStream> namespaceMapping) {
+ this.dataStore = dataStore;
+ this.mdSal = mdSal;
+ this.eventSourceTopology = eventSourceTopology;
+ this.streamMap = namespaceToStreamMapping(namespaceMapping);
+ }
+
+ private Map namespaceToStreamMapping(List<NamespaceToStream> namespaceMapping) {
+ Map<String, String> streamMap = new HashMap<>(namespaceMapping.size());
+
+ for (NamespaceToStream nToS : namespaceMapping) {
+ streamMap.put(nToS.getUrnPrefix(), nToS.getStreamName());
+ }
+
+ return streamMap;
+ }
+
+ public void mdsalReady() {
+ dataStore.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ INVENTORY_PATH,
+ this,
+ DataBroker.DataChangeScope.SUBTREE);
+
+ LOGGER.info("EventSourceManager initialized.");
+ }
+
+ @Override
+ public void onDataChanged(AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> event) {
+ //FIXME: Prevent creating new event source on subsequent changes in inventory, like disconnect.
+ LOGGER.debug("[DataChangeEvent<InstanceIdentifier<?>, DataObject>: {}]", event);
+
+ Node node = Util.getAffectedNode(event);
+ // we listen on node tree, therefore we should rather throw IllegalStateException when node is null
+ if ( node == null ) {
+ LOGGER.debug("OnDataChanged Event. Node is null.");
+ return;
+ }
+ if ( isNetconfNode(node) == false ) {
+ LOGGER.debug("OnDataChanged Event. Not a Netconf node.");
+ return;
+ }
+ if ( isEventSource(node) == false ) {
+ LOGGER.debug("OnDataChanged Event. Node an EventSource node.");
+ return;
+ }
+
+ NetconfEventSource netconfEventSource = new NetconfEventSource(mdSal,
+ node.getKey().getId().getValue(),
+ streamMap);
+ mdSal.addRpcImplementation(node, EventSourceService.class, netconfEventSource);
+
+ InstanceIdentifier<NetconfNode> nodeInstanceIdentifier =
+ InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, node.getKey())
+ .augmentation(NetconfNode.class);
+
+ dataStore.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ nodeInstanceIdentifier,
+ netconfEventSource,
+ DataBroker.DataChangeScope.SUBTREE);
+
+ eventSourceTopology.insert(node);
+ }
+
+ private boolean isNetconfNode(Node node) {
+ return node.getAugmentation(NetconfNode.class) != null ;
+ }
+
+ public boolean isEventSource(Node node) {
+ NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
+
+ return isEventSource(netconfNode);
+ }
+
+ private boolean isEventSource(NetconfNode node) {
+ for (String capability : node.getInitialCapability()) {
+ if(capability.startsWith("urn:ietf:params:xml:ns:netconf:notification")) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.app.impl;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.mdsal.DataStore;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1Builder;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.TopologyTypes1;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.TopologyTypes1Builder;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.topology.event.source.type.TopologyEventSource;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.topology.event.source.type.TopologyEventSourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.TopologyTypes;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class EventSourceTopology {
+ private static final Logger LOGGER = LoggerFactory.getLogger(EventSourceTopology.class);
+
+ private static final String topologyId = "EVENT-SOURCE-TOPOLOGY" ;
+ private static final TopologyKey topologyKey = new TopologyKey(new TopologyId(topologyId));
+ private static final LogicalDatastoreType datastoreType = LogicalDatastoreType.OPERATIONAL;
+
+ private static final InstanceIdentifier<Topology> topologyInstanceIdentifier =
+ InstanceIdentifier.create(NetworkTopology.class)
+ .child(Topology.class, topologyKey);
+
+ private static final InstanceIdentifier<TopologyTypes1> topologyTypeInstanceIdentifier =
+ topologyInstanceIdentifier
+ .child(TopologyTypes.class)
+ .augmentation(TopologyTypes1.class);
+
+ private static final InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang
+ .network.topology.rev131021.network.topology.topology.Node> eventSourceTopologyPath =
+ InstanceIdentifier.create(NetworkTopology.class)
+ .child(Topology.class)
+ .child(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang
+ .network.topology.rev131021.network.topology.topology.Node.class);
+
+ private final Map<DataChangeListener, ListenerRegistration<DataChangeListener>> registrations =
+ new ConcurrentHashMap<>();
+
+ private final DataStore dataStore;
+
+ public EventSourceTopology(DataStore dataStore) {
+ this.dataStore = dataStore;
+ }
+
+ public void mdsalReady() {
+ TopologyEventSource topologySource = new TopologyEventSourceBuilder().build();
+ TopologyTypes1 topologyTypeAugment = new TopologyTypes1Builder().setTopologyEventSource(topologySource).build();
+
+ dataStore.asyncPUT(datastoreType, topologyTypeInstanceIdentifier, topologyTypeAugment);
+ }
+
+ public void insert(Node node) {
+ String nodeId = node.getKey().getId().getValue();
+ NodeKey nodeKey = new NodeKey(new NodeId(nodeId));
+ InstanceIdentifier<Node1> topologyNodeAugment
+ = topologyInstanceIdentifier
+ .child(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang
+ .network.topology.rev131021.network.topology.topology.Node.class, nodeKey)
+ .augmentation(Node1.class);
+
+ Node1 nodeAgument = new Node1Builder().setEventSourceNode(node.getId()).build();
+ dataStore.asyncPUT(datastoreType, topologyNodeAugment, nodeAgument);
+ }
+
+ // TODO: Should we expose this functioanlity over RPC?
+ public List<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang
+ .network.topology.rev131021.network.topology.topology.Node> snapshot() {
+ Topology topology = dataStore.read(datastoreType, topologyInstanceIdentifier);
+ return topology.getNode();
+ }
+
+ public void registerDataChangeListener(DataChangeListener listener) {
+ ListenerRegistration<DataChangeListener> listenerRegistration = dataStore.registerDataChangeListener(datastoreType,
+ eventSourceTopologyPath,
+ listener,
+ DataBroker.DataChangeScope.SUBTREE);
+
+ registrations.put(listener, listenerRegistration);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.app.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.mdsal.MdSAL;
+import org.opendaylight.controller.sal.core.api.notify.NotificationListener;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutput;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.NotificationsService;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.inventory.rev140108.NetconfNode;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
+import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NetconfEventSource implements EventSourceService, NotificationListener, DataChangeListener {
+ private static final Logger LOGGER = LoggerFactory.getLogger(NetconfEventSource.class);
+
+ private final MdSAL mdSal;
+ private final String nodeId;
+
+ private final List<String> activeStreams = new ArrayList<>();
+
+ private final Map<String, String> urnPrefixToStreamMap;
+
+ public NetconfEventSource(final MdSAL mdSal, final String nodeId, final Map<String, String> streamMap) {
+ Preconditions.checkNotNull(mdSal);
+ Preconditions.checkNotNull(nodeId);
+
+ this.mdSal = mdSal;
+ this.nodeId = nodeId;
+ this.urnPrefixToStreamMap = streamMap;
+
+ LOGGER.info("NetconfEventSource [{}] created.", nodeId);
+ }
+
+ @Override
+ public Future<RpcResult<JoinTopicOutput>> joinTopic(final JoinTopicInput input) {
+ final NotificationPattern notificationPattern = input.getNotificationPattern();
+
+ // FIXME: default language should already be regex
+ final String regex = Util.wildcardToRegex(notificationPattern.getValue());
+
+ final Pattern pattern = Pattern.compile(regex);
+ List<QName> matchingNotifications = Util.expandQname(availableNotifications(), pattern);
+ registerNotificationListener(matchingNotifications);
+ return null;
+ }
+
+ private List<QName> availableNotifications() {
+ // FIXME: use SchemaContextListener to get changes asynchronously
+ Set<NotificationDefinition> availableNotifications = mdSal.getSchemaContext(nodeId).getNotifications();
+ List<QName> qNs = new ArrayList<>(availableNotifications.size());
+ for (NotificationDefinition nd : availableNotifications) {
+ qNs.add(nd.getQName());
+ }
+
+ return qNs;
+ }
+
+ private void registerNotificationListener(final List<QName> notificationsToSubscribe) {
+ for (QName qName : notificationsToSubscribe) {
+ startSubscription(qName);
+ // FIXME: do not lose this registration
+ final ListenerRegistration<NotificationListener> reg = mdSal.addNotificationListener(nodeId, qName, this);
+ }
+ }
+
+ private synchronized void startSubscription(final QName qName) {
+ String streamName = resolveStream(qName);
+
+ if (streamIsActive(streamName) == false) {
+ LOGGER.info("Stream {} is not active on node {}. Will subscribe.", streamName, nodeId);
+ startSubscription(streamName);
+ }
+ }
+
+ private synchronized void resubscribeToActiveStreams() {
+ for (String streamName : activeStreams) {
+ startSubscription(streamName);
+ }
+ }
+
+ private synchronized void startSubscription(final String streamName) {
+ CreateSubscriptionInput subscriptionInput = getSubscriptionInput(streamName);
+ mdSal.getRpcService(nodeId, NotificationsService.class).createSubscription(subscriptionInput);
+ activeStreams.add(streamName);
+ }
+
+ private static CreateSubscriptionInput getSubscriptionInput(final String streamName) {
+ CreateSubscriptionInputBuilder csib = new CreateSubscriptionInputBuilder();
+ csib.setStream(new StreamNameType(streamName));
+ return csib.build();
+ }
+
+ private String resolveStream(final QName qName) {
+ String streamName = null;
+
+ for (Map.Entry<String, String> entry : urnPrefixToStreamMap.entrySet()) {
+ String nameSpace = qName.getNamespace().toString();
+ String urnPrefix = entry.getKey();
+ if( nameSpace.startsWith(urnPrefix) ) {
+ streamName = entry.getValue();
+ break;
+ }
+ }
+
+ return streamName;
+ }
+
+ private boolean streamIsActive(final String streamName) {
+ return activeStreams.contains(streamName);
+ }
+
+ // PASS
+ @Override public Set<QName> getSupportedNotifications() {
+ return null;
+ }
+
+ @Override
+ public void onNotification(final CompositeNode notification) {
+ LOGGER.info("NetconfEventSource {} received notification {}. Will publish to MD-SAL.", nodeId, notification);
+ ImmutableCompositeNode payload = ImmutableCompositeNode.builder()
+ .setQName(QName.create(TopicNotification.QNAME, "payload"))
+ .add(notification).toInstance();
+ ImmutableCompositeNode icn = ImmutableCompositeNode.builder()
+ .setQName(TopicNotification.QNAME)
+ .add(payload)
+ .addLeaf("event-source", nodeId)
+ .toInstance();
+
+ mdSal.publishNotification(icn);
+ }
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ boolean wasConnected = false;
+ boolean nowConnected = false;
+
+ for (Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : change.getOriginalData().entrySet()) {
+ if ( isNetconfNode(changeEntry) ) {
+ NetconfNode nn = (NetconfNode)changeEntry.getValue();
+ wasConnected = nn.isConnected();
+ }
+ }
+
+ for (Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : change.getUpdatedData().entrySet()) {
+ if ( isNetconfNode(changeEntry) ) {
+ NetconfNode nn = (NetconfNode)changeEntry.getValue();
+ nowConnected = nn.isConnected();
+ }
+ }
+
+ if (wasConnected == false && nowConnected == true) {
+ resubscribeToActiveStreams();
+ }
+ }
+
+ private static boolean isNetconfNode(final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry ) {
+ return NetconfNode.class.equals(changeEntry.getKey().getTargetType());
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.app.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.regex.Pattern;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.mdsal.MdSAL;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
+import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.LoggerFactory;
+
+public class Topic implements DataChangeListener {
+ private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(Topic.class);
+ private final NotificationPattern notificationPattern;
+ private final Pattern nodeIdPattern;
+ private final TopicId topicId;
+ private final MdSAL mdSal;
+
+ public Topic(final NotificationPattern notificationPattern, final String nodeIdPattern, final MdSAL mdSal) {
+ this.notificationPattern = Preconditions.checkNotNull(notificationPattern);
+
+ // FIXME: regex should be the language of nodeIdPattern
+ final String regex = Util.wildcardToRegex(nodeIdPattern);
+ this.nodeIdPattern = Pattern.compile(regex);
+ this.mdSal = Preconditions.checkNotNull(mdSal);
+
+ // FIXME: We need to perform some salting in order to make
+ // the topic IDs less predictable.
+ this.topicId = new TopicId(Util.md5String(notificationPattern + nodeIdPattern));
+ }
+
+ public TopicId getTopicId() {
+ return topicId;
+ }
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> event) {
+ // TODO: affected must return topologyNode !!!
+ final Node node = Util.getAffectedNode(event);
+ if (nodeIdPattern.matcher(node.getId().getValue()).matches()) {
+ notifyNode(node.getId());
+ } else {
+ LOG.debug("Skipping node {}", node.getId());
+ }
+ }
+
+ public void notifyNode(final NodeId nodeId) {
+ JoinTopicInput jti = getJoinTopicInputArgument(nodeId);
+ EventSourceService ess = mdSal.getRpcService(EventSourceService.class);
+ Preconditions.checkState(ess != null, "EventSourceService is not registered");
+
+ ess.joinTopic(jti);
+ }
+
+ private JoinTopicInput getJoinTopicInputArgument(final NodeId nodeId) {
+ NodeRef nodeRef = MdSAL.createNodeRef(nodeId);
+ JoinTopicInput jti =
+ new JoinTopicInputBuilder()
+ .setNode(nodeRef.getValue())
+ .setTopicId(topicId)
+ .setNotificationPattern(notificationPattern)
+ .build();
+ return jti;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.messagebus.app.impl;
+
+import com.google.common.util.concurrent.Futures;
+import java.math.BigInteger;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.sal.common.util.Rpcs;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+public final class Util {
+ private static final MessageDigest messageDigestTemplate = getDigestInstance();
+
+ private static MessageDigest getDigestInstance() {
+ try {
+ return MessageDigest.getInstance("MD5");
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException("Unable to get MD5 instance");
+ }
+ }
+
+ public static String md5String(final String inputString) {
+
+ try {
+ MessageDigest md = (MessageDigest)messageDigestTemplate.clone();
+ md.update(inputString.getBytes("UTF-8"), 0, inputString.length());
+ return new BigInteger(1, md.digest()).toString(16);
+ } catch (Exception e) {
+ throw new RuntimeException("Unable to get MD5 instance");
+ }
+ }
+
+ public static <T> Future<RpcResult<T>> resultFor(final T output) {
+ RpcResult<T> result = Rpcs.getRpcResult(true, output, Collections.<RpcError>emptyList());
+ return Futures.immediateFuture(result);
+ }
+
+ /**
+ * Extracts affected node from data change event.
+ * @param event
+ * @return
+ */
+ public static Node getAffectedNode(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> event) {
+ // TODO: expect listener method to be called even when change impact node
+ // TODO: test with change.getCreatedData()
+ for (Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry : event.getUpdatedData().entrySet()) {
+ if (isNode(changeEntry)) {
+ return (Node) changeEntry.getValue();
+ }
+ }
+
+ return null;
+ }
+
+ private static boolean isNode(final Map.Entry<InstanceIdentifier<?>, DataObject> changeEntry ) {
+ return Node.class.equals(changeEntry.getKey().getTargetType());
+ }
+
+ /**
+ * Method filters qnames based on wildcard strings
+ *
+ * @param availableQnames
+ * @param patterh matching pattern
+ * @return list of filtered qnames
+ */
+ public static List<QName> expandQname(final List<QName> availableQnames, final Pattern pattern) {
+ List<QName> matchingQnames = new ArrayList<>();
+
+ for (QName qname : availableQnames) {
+ String namespace = qname.getNamespace().toString();
+ if (pattern.matcher(namespace).matches()) {
+ matchingQnames.add(qname);
+ }
+ }
+
+ return matchingQnames;
+ }
+
+ /**
+ * CREDIT to http://www.rgagnon.com/javadetails/java-0515.html
+ * @param wildcard
+ * @return
+ */
+ static String wildcardToRegex(final String wildcard){
+ StringBuffer s = new StringBuffer(wildcard.length());
+ s.append('^');
+ for (char c : wildcard.toCharArray()) {
+ switch(c) {
+ case '*':
+ s.append(".*");
+ break;
+ case '?':
+ s.append('.');
+ break;
+ // escape special regexp-characters
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '$':
+ case '^':
+ case '.':
+ case '{':
+ case '}':
+ case '|':
+ case '\\':
+ s.append("\\");
+ s.append(c);
+ break;
+ default:
+ s.append(c);
+ break;
+ }
+ }
+ s.append('$');
+ return s.toString();
+ }
+}
--- /dev/null
+module messagebus-app-impl {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:messagebus:app:impl";
+ prefix "binding-impl";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding {prefix sal;}
+ import opendaylight-md-sal-dom {prefix dom;}
+
+
+ description
+ "Service definition for Message Bus application implementation.";
+
+ revision "2015-02-03" {
+ description "Second revision. Message Bus opensourcing";
+ }
+
+ identity messagebus-app-impl {
+ base config:module-type;
+ config:java-name-prefix MessageBusAppImpl;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case messagebus-app-impl {
+ when "/config:modules/config:module/config:type = 'messagebus-app-impl'";
+
+ container binding-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity sal:binding-broker-osgi-registry;
+ }
+ }
+ }
+
+ container dom-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity dom:dom-broker-osgi-registry;
+ }
+ }
+ }
+
+ list namespace-to-stream {
+ key urn-prefix;
+
+ leaf urn-prefix {
+ type string;
+ }
+
+ leaf stream-name {
+ type string;
+ }
+ }
+ }
+ }
+
+ augment "/config:modules/config:module/config:state" {
+ case messagebus-app-impl {
+ when "/config:modules/config:module/config:type = 'messagebus-app-impl'";
+ }
+ }
+}
\ No newline at end of file
<!-- Clustering -->
<module>sal-remoterpc-connector</module>
+
+ <!-- Message Bus -->
+ <module>messagebus-api</module>
+ <module>messagebus-impl</module>
</modules>
<build>
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ClientActor extends UntypedActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
private final ActorRef target;
try {
bs = fromObject(state);
} catch (Exception e) {
- LOG.error(e, "Exception in creating snapshot");
+ LOG.error("Exception in creating snapshot", e);
}
getSelf().tell(new CaptureSnapshotReply(bs.toByteArray()), null);
}
try {
state.putAll((HashMap) toObject(snapshot));
} catch (Exception e) {
- LOG.error(e, "Exception in applying snapshot");
+ LOG.error("Exception in applying snapshot", e);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
*/
package org.opendaylight.controller.cluster.raft;
+import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
}
protected int adjustedIndex(long logEntryIndex) {
- if(snapshotIndex < 0){
+ if (snapshotIndex < 0) {
return (int) logEntryIndex;
}
return (int) (logEntryIndex - (snapshotIndex + 1));
return journal.size();
}
+ @Override
+ public int dataSize() {
+ return dataSize;
+ }
+
@Override
public boolean isPresent(long logEntryIndex) {
if (logEntryIndex > lastIndex()) {
@Override
public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm) {
+ Preconditions.checkArgument(snapshotCapturedIndex >= snapshotIndex,
+ "snapshotCapturedIndex must be greater than or equal to snapshotIndex");
+
snapshottedJournal = new ArrayList<>(journal.size());
- snapshottedJournal.addAll(journal.subList(0, (int)(snapshotCapturedIndex - snapshotIndex)));
+ List<ReplicatedLogEntry> snapshotJournalEntries = journal.subList(0, (int) (snapshotCapturedIndex - snapshotIndex));
+
+ snapshottedJournal.addAll(snapshotJournalEntries);
clear(0, (int) (snapshotCapturedIndex - snapshotIndex));
previousSnapshotIndex = snapshotIndex;
previousSnapshotIndex = -1;
previousSnapshotTerm = -1;
dataSize = 0;
+ // need to recalc the datasize based on the entries left after precommit.
+ for(ReplicatedLogEntry logEntry : journal) {
+ dataSize += logEntry.size();
+ }
+
}
@Override
* The interval in which the leader needs to check itself if its isolated
* @return FiniteDuration
*/
- FiniteDuration getIsolatedCheckInterval();
+ long getIsolatedCheckIntervalInMillis();
/**
private FiniteDuration heartBeatInterval = HEART_BEAT_INTERVAL;
private long snapshotBatchCount = SNAPSHOT_BATCH_COUNT;
private int journalRecoveryLogBatchSize = JOURNAL_RECOVERY_LOG_BATCH_SIZE;
- private FiniteDuration isolatedLeaderCheckInterval =
- new FiniteDuration(HEART_BEAT_INTERVAL.length() * 1000, HEART_BEAT_INTERVAL.unit());
+ private long isolatedLeaderCheckInterval = HEART_BEAT_INTERVAL.$times(1000).toMillis();
// 12 is just an arbitrary percentage. This is the amount of the total memory that a raft actor's
// in-memory journal can use before it needs to snapshot
}
public void setIsolatedLeaderCheckInterval(FiniteDuration isolatedLeaderCheckInterval) {
- this.isolatedLeaderCheckInterval = isolatedLeaderCheckInterval;
+ this.isolatedLeaderCheckInterval = isolatedLeaderCheckInterval.toMillis();
}
public void setElectionTimeoutFactor(long electionTimeoutFactor){
}
@Override
- public FiniteDuration getIsolatedCheckInterval() {
+ public long getIsolatedCheckIntervalInMillis() {
return isolatedLeaderCheckInterval;
}
* This will stop the timeout clock
*/
void markFollowerInActive();
+
+
+ /**
+ * This will return the active time of follower, since it was last reset
+ * @return time in milliseconds
+ */
+ long timeSinceLastActivity();
+
}
private final String id;
- private final Stopwatch stopwatch = new Stopwatch();
+ private final Stopwatch stopwatch = Stopwatch.createUnstarted();
private final long followerTimeoutMillis;
stopwatch.stop();
}
}
+
+ @Override
+ public long timeSinceLastActivity() {
+ return stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("FollowerLogInformationImpl [id=").append(id).append(", nextIndex=").append(nextIndex)
+ .append(", matchIndex=").append(matchIndex).append(", stopwatch=")
+ .append(stopwatch.elapsed(TimeUnit.MILLISECONDS))
+ .append(", followerTimeoutMillis=").append(followerTimeoutMillis).append("]");
+ return builder.toString();
+ }
+
+
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Procedure;
import akka.persistence.RecoveryCompleted;
import akka.persistence.SaveSnapshotFailure;
import com.google.protobuf.ByteString;
import java.io.Serializable;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
import org.opendaylight.controller.cluster.notifications.RoleChanged;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
-import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* RaftActor encapsulates a state machine that needs to be kept synchronized
* </ul>
*/
public abstract class RaftActor extends AbstractUntypedPersistentActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+
+ private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
+
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
/**
* The current state determines the current behavior of a RaftActor
private CaptureSnapshot captureSnapshot = null;
- private volatile boolean hasSnapshotCaptureInitiated = false;
-
private Stopwatch recoveryTimer;
private int currentRecoveryBatchCount;
-
-
public RaftActor(String id, Map<String, String> peerAddresses) {
this(id, peerAddresses, Optional.<ConfigParams>absent());
}
private void initRecoveryTimer() {
if(recoveryTimer == null) {
- recoveryTimer = new Stopwatch();
- recoveryTimer.start();
+ recoveryTimer = Stopwatch.createStarted();
}
}
context.setLastApplied(snapshot.getLastAppliedIndex());
context.setCommitIndex(snapshot.getLastAppliedIndex());
- Stopwatch timer = new Stopwatch();
- timer.start();
+ Stopwatch timer = Stopwatch.createStarted();
// Apply the snapshot to the actors state
applyRecoverySnapshot(snapshot.getState());
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
+ long elapsedTime = (System.nanoTime() - applyState.getStartTime());
+ if(elapsedTime >= APPLY_STATE_DELAY_THRESHOLD_IN_NANOS){
+ LOG.warn("ApplyState took more time than expected. Elapsed Time = {} ms ApplyState = {}",
+ TimeUnit.NANOSECONDS.toMillis(elapsedTime), applyState);
+ }
+
if(LOG.isDebugEnabled()) {
LOG.debug("{}: Applying state for log index {} data {}",
persistenceId(), applyState.getReplicatedLogEntry().getIndex(),
} else if (message instanceof SaveSnapshotFailure) {
SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
- LOG.error(saveSnapshotFailure.cause(), "{}: SaveSnapshotFailure received for snapshot Cause:",
- persistenceId());
+ LOG.error("{}: SaveSnapshotFailure received for snapshot Cause:",
+ persistenceId(), saveSnapshotFailure.cause());
context.getReplicatedLog().snapshotRollback();
handleCaptureSnapshotReply(((CaptureSnapshotReply) message).getSnapshot());
} else {
- if (!(message instanceof AppendEntriesMessages.AppendEntries)
- && !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: onReceiveCommand: message: {}", persistenceId(), message.getClass());
- }
- }
-
RaftActorBehavior oldBehavior = currentBehavior;
currentBehavior = currentBehavior.handleMessage(getSender(), message);
self().tell(new ApplyLogEntries((int) replicatedLogEntry.getIndex()), self());
// Check if the "real" snapshot capture has been initiated. If no then do the fake snapshot
- if(!hasSnapshotCaptureInitiated){
+ if(!context.isSnapshotCaptureInitiated()){
raftContext.getReplicatedLog().snapshotPreCommit(raftContext.getLastApplied(),
raftContext.getTermInformation().getCurrentTerm());
raftContext.getReplicatedLog().snapshotCommit();
/**
* This method is called during recovery to reconstruct the state of the actor.
*
- * @param snapshot A snapshot of the state of the actor
+ * @param snapshotBytes A snapshot of the state of the actor
*/
protected abstract void applyRecoverySnapshot(byte[] snapshotBytes);
LOG.info("{}: Persisting of snapshot done:{}", persistenceId(), sn.getLogMessage());
- //be greedy and remove entries from in-mem journal which are in the snapshot
- // and update snapshotIndex and snapshotTerm without waiting for the success,
+ long dataThreshold = Runtime.getRuntime().totalMemory() *
+ getRaftActorContext().getConfigParams().getSnapshotDataThresholdPercentage() / 100;
+ if (context.getReplicatedLog().dataSize() > dataThreshold) {
+ // if memory is less, clear the log based on lastApplied.
+ // this could/should only happen if one of the followers is down
+ // as normally we keep removing from the log when its replicated to all.
+ context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getLastAppliedIndex(),
+ captureSnapshot.getLastAppliedTerm());
+
+ getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+ } else if(captureSnapshot.getReplicatedToAllIndex() != -1){
+ // clear the log based on replicatedToAllIndex
+ context.getReplicatedLog().snapshotPreCommit(captureSnapshot.getReplicatedToAllIndex(),
+ captureSnapshot.getReplicatedToAllTerm());
+
+ getCurrentBehavior().setReplicatedToAllIndex(captureSnapshot.getReplicatedToAllIndex());
+ } else {
+ // The replicatedToAllIndex was not found in the log
+ // This means that replicatedToAllIndex never moved beyond -1 or that it is already in the snapshot.
+ // In this scenario we may need to save the snapshot to the akka persistence
+ // snapshot for recovery but we do not need to do the replicated log trimming.
+ context.getReplicatedLog().snapshotPreCommit(replicatedLog.getSnapshotIndex(),
+ replicatedLog.getSnapshotTerm());
+ }
- context.getReplicatedLog().snapshotPreCommit(
- captureSnapshot.getLastAppliedIndex(),
- captureSnapshot.getLastAppliedTerm());
LOG.info("{}: Removed in-memory snapshotted entries, adjusted snaphsotIndex:{} " +
"and term:{}", persistenceId(), captureSnapshot.getLastAppliedIndex(),
}
captureSnapshot = null;
- hasSnapshotCaptureInitiated = false;
+ context.setSnapshotCaptureInitiated(false);
}
protected boolean hasFollowers(){
// FIXME: Maybe this should be done after the command is saved
journal.subList(adjustedIndex , journal.size()).clear();
- persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
+ persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>() {
- @Override public void apply(DeleteEntries param)
- throws Exception {
+ @Override
+ public void apply(DeleteEntries param)
+ throws Exception {
//FIXME : Doing nothing for now
dataSize = 0;
- for(ReplicatedLogEntry entry : journal){
+ for (ReplicatedLogEntry entry : journal) {
dataSize += entry.size();
}
}
appendAndPersist(replicatedLogEntry, null);
}
- @Override
- public int dataSize() {
- return dataSize;
- }
-
public void appendAndPersist(
final ReplicatedLogEntry replicatedLogEntry,
final Procedure<ReplicatedLogEntry> callback) {
long dataSizeForCheck = dataSize;
dataSizeSinceLastSnapshot += logEntrySize;
- long journalSize = lastIndex()+1;
+ long journalSize = lastIndex() + 1;
if(!hasFollowers()) {
// When we do not have followers we do not maintain an in-memory log
getRaftActorContext().getConfigParams().getSnapshotDataThresholdPercentage() / 100;
// when a snaphsot is being taken, captureSnapshot != null
- if (hasSnapshotCaptureInitiated == false &&
+ if (!context.isSnapshotCaptureInitiated() &&
( journalSize % context.getConfigParams().getSnapshotBatchCount() == 0 ||
dataSizeForCheck > dataThreshold)) {
dataSizeSinceLastSnapshot = 0;
- LOG.info("{}: Initiating Snapshot Capture..", persistenceId());
+ LOG.info("{}: Initiating Snapshot Capture, journalSize = {}, dataSizeForCheck = {}," +
+ " dataThreshold = {}", persistenceId(), journalSize, dataSizeForCheck, dataThreshold);
+
long lastAppliedIndex = -1;
long lastAppliedTerm = -1;
}
// send a CaptureSnapshot to self to make the expensive operation async.
- getSelf().tell(new CaptureSnapshot(
- lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm),
+ long replicatedToAllIndex = getCurrentBehavior().getReplicatedToAllIndex();
+ ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
+ getSelf().tell(new CaptureSnapshot(lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1)),
null);
- hasSnapshotCaptureInitiated = true;
+ context.setSnapshotCaptureInitiated(true);
}
- if(callback != null){
+ if (callback != null){
callback.apply(replicatedLogEntry);
}
}
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
-import akka.event.LoggingAdapter;
-
import java.util.Map;
+import org.slf4j.Logger;
/**
* The RaftActorContext contains that portion of the RaftActors state that
*
* @param replicatedLog
*/
- public void setReplicatedLog(ReplicatedLog replicatedLog);
+ void setReplicatedLog(ReplicatedLog replicatedLog);
/**
* @return A representation of the log
*
* @return
*/
- LoggingAdapter getLogger();
+ Logger getLogger();
/**
* Get a mapping of peerId's to their addresses
*
* @param name
*/
- public void removePeer(String name);
+ void removePeer(String name);
/**
* Given a peerId return the corresponding actor
/**
* @return ConfigParams
*/
- public ConfigParams getConfigParams();
+ ConfigParams getConfigParams();
+
+ void setSnapshotCaptureInitiated(boolean snapshotCaptureInitiated);
+
+ boolean isSnapshotCaptureInitiated();
+
}
package org.opendaylight.controller.cluster.raft;
+import static com.google.common.base.Preconditions.checkState;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.UntypedActorContext;
-import akka.event.LoggingAdapter;
-
import java.util.Map;
-
-import static com.google.common.base.Preconditions.checkState;
+import org.slf4j.Logger;
public class RaftActorContextImpl implements RaftActorContext {
private final Map<String, String> peerAddresses;
- private final LoggingAdapter LOG;
+ private final Logger LOG;
private final ConfigParams configParams;
+ private boolean snapshotCaptureInitiated;
+
public RaftActorContextImpl(ActorRef actor, UntypedActorContext context,
String id,
ElectionTerm termInformation, long commitIndex,
long lastApplied, ReplicatedLog replicatedLog,
Map<String, String> peerAddresses, ConfigParams configParams,
- LoggingAdapter logger) {
+ Logger logger) {
this.actor = actor;
this.context = context;
this.id = id;
return context.system();
}
- @Override public LoggingAdapter getLogger() {
+ @Override public Logger getLogger() {
return this.LOG;
}
return configParams;
}
+ @Override
+ public void setSnapshotCaptureInitiated(boolean snapshotCaptureInitiated) {
+ this.snapshotCaptureInitiated = snapshotCaptureInitiated;
+ }
+
+ @Override
+ public boolean isSnapshotCaptureInitiated() {
+ return snapshotCaptureInitiated;
+ }
+
@Override public void addToPeers(String name, String address) {
peerAddresses.put(name, address);
}
* sets snapshot term
* @param snapshotTerm
*/
- public void setSnapshotTerm(long snapshotTerm);
+ void setSnapshotTerm(long snapshotTerm);
/**
* Clears the journal entries with startIndex(inclusive) and endIndex (exclusive)
* @param startIndex
* @param endIndex
*/
- public void clear(int startIndex, int endIndex);
+ void clear(int startIndex, int endIndex);
/**
* Handles all the bookkeeping in order to perform a rollback in the
* @param snapshotCapturedIndex
* @param snapshotCapturedTerm
*/
- public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
+ void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
/**
* Sets the Replicated log to state after snapshot success.
*/
- public void snapshotCommit();
+ void snapshotCommit();
/**
* Restores the replicated log to a state in the event of a save snapshot failure
*/
- public void snapshotRollback();
+ void snapshotRollback();
/**
* Size of the data in the log (in bytes)
*/
- public int dataSize();
+ int dataSize();
+
}
public class Snapshot implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -8298574936724056236L;
+
private final byte[] state;
private final List<ReplicatedLogEntry> unAppliedEntries;
private final long lastIndex;
package org.opendaylight.controller.cluster.raft.base.messages;
import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-
import java.io.Serializable;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
public class ApplyState implements Serializable {
private static final long serialVersionUID = 1L;
private final ActorRef clientActor;
private final String identifier;
private final ReplicatedLogEntry replicatedLogEntry;
+ private final long startTime;
public ApplyState(ActorRef clientActor, String identifier,
ReplicatedLogEntry replicatedLogEntry) {
this.clientActor = clientActor;
this.identifier = identifier;
this.replicatedLogEntry = replicatedLogEntry;
+ this.startTime = System.nanoTime();
}
public ActorRef getClientActor() {
public ReplicatedLogEntry getReplicatedLogEntry() {
return replicatedLogEntry;
}
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ @Override
+ public String toString() {
+ return "ApplyState{" +
+ "identifier='" + identifier + '\'' +
+ ", replicatedLogEntry.index =" + replicatedLogEntry.getIndex() +
+ ", startTime=" + startTime +
+ '}';
+ }
}
private long lastIndex;
private long lastTerm;
private boolean installSnapshotInitiated;
+ private long replicatedToAllIndex;
+ private long replicatedToAllTerm;
public CaptureSnapshot(long lastIndex, long lastTerm,
- long lastAppliedIndex, long lastAppliedTerm) {
- this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, false);
+ long lastAppliedIndex, long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm) {
+ this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, replicatedToAllIndex , replicatedToAllTerm, false);
}
public CaptureSnapshot(long lastIndex, long lastTerm,long lastAppliedIndex,
- long lastAppliedTerm, boolean installSnapshotInitiated) {
+ long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm, boolean installSnapshotInitiated) {
this.lastIndex = lastIndex;
this.lastTerm = lastTerm;
this.lastAppliedIndex = lastAppliedIndex;
this.lastAppliedTerm = lastAppliedTerm;
this.installSnapshotInitiated = installSnapshotInitiated;
+ this.replicatedToAllIndex = replicatedToAllIndex;
+ this.replicatedToAllTerm = replicatedToAllTerm;
}
public long getLastAppliedIndex() {
public boolean isInstallSnapshotInitiated() {
return installSnapshotInitiated;
}
+
+ public long getReplicatedToAllIndex() {
+ return replicatedToAllIndex;
+ }
+
+ public long getReplicatedToAllTerm() {
+ return replicatedToAllTerm;
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import java.io.Serializable;
-
-/**
- * Message sent to commit an entry to the log
- */
-public class CommitEntry implements Serializable {
- private static final long serialVersionUID = 1L;
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import java.io.Serializable;
-
-/**
- * Message sent to Persist an entry into the transaction journal
- */
-public class PersistEntry implements Serializable {
- private static final long serialVersionUID = 1L;
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import java.io.Serializable;
-
-/**
- * This message is sent by a RaftActor to itself so that a subclass can process
- * it and use it to save it's state
- */
-public class SaveSnapshot implements Serializable {
- private static final long serialVersionUID = 1L;
-}
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
private Optional<ByteString> snapshot;
public AbstractLeader(RaftActorContext context) {
- super(context);
+ super(context, RaftState.Leader);
final Builder<String, FollowerLogInformation> ftlBuilder = ImmutableMap.builder();
for (String followerId : context.getPeerAddresses().keySet()) {
leaderId = context.getId();
- LOG.debug("{}: Election: Leader has following peers: {}", context.getId(), getFollowerIds());
+ LOG.debug("{}: Election: Leader has following peers: {}", logName(), getFollowerIds());
minReplicationCount = getMajorityVoteCount(getFollowerIds().size());
// Upon election: send initial empty AppendEntries RPCs
// (heartbeat) to each server; repeat during idle periods to
// prevent election timeouts (§5.2)
- scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
+ sendAppendEntries(0, false);
+
+ // It is important to schedule this heartbeat here
+ scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
}
/**
return followerToLog.keySet();
}
- private Optional<ByteString> getSnapshot() {
- return snapshot;
- }
-
@VisibleForTesting
void setSnapshot(Optional<ByteString> snapshot) {
this.snapshot = snapshot;
protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
- }
+ LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
return this;
}
protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
AppendEntriesReply appendEntriesReply) {
- if(! appendEntriesReply.isSuccess()) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntriesReply: {}", context.getId(), appendEntriesReply);
- }
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
+ } else if(LOG.isDebugEnabled() && !appendEntriesReply.isSuccess()) {
+ LOG.debug("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
}
// Update the FollowerLogInformation
followerToLog.get(followerId);
if(followerLogInformation == null){
- LOG.error("{}: handleAppendEntriesReply - unknown follower {}", context.getId(), followerId);
+ LOG.error("{}: handleAppendEntriesReply - unknown follower {}", logName(), followerId);
return this;
}
+ if(followerLogInformation.timeSinceLastActivity() >
+ context.getConfigParams().getElectionTimeOutInterval().toMillis()) {
+ LOG.error("{} : handleAppendEntriesReply delayed beyond election timeout, " +
+ "appendEntriesReply : {}, timeSinceLastActivity : {}, lastApplied : {}, commitIndex : {}",
+ logName(), appendEntriesReply, followerLogInformation.timeSinceLastActivity(),
+ context.getLastApplied(), context.getCommitIndex());
+ }
+
followerLogInformation.markFollowerActive();
if (appendEntriesReply.isSuccess()) {
// Apply the change to the state machine
if (context.getCommitIndex() > context.getLastApplied()) {
+ LOG.debug("{}: handleAppendEntriesReply: applying to log - commitIndex: {}, lastAppliedIndex: {}",
+ logName(), context.getCommitIndex(), context.getLastApplied());
+
applyLogToStateMachine(context.getCommitIndex());
}
+ if (!context.isSnapshotCaptureInitiated()) {
+ purgeInMemoryLog();
+ }
+
+ //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
+ sendUpdatesToFollower(followerId, followerLogInformation, false, false);
return this;
}
+ private void purgeInMemoryLog() {
+ //find the lowest index across followers which has been replicated to all.
+ // lastApplied if there are no followers, so that we keep clearing the log for single-node
+ // we would delete the in-mem log from that index on, in-order to minimize mem usage
+ // we would also share this info thru AE with the followers so that they can delete their log entries as well.
+ long minReplicatedToAllIndex = followerToLog.isEmpty() ? context.getLastApplied() : Long.MAX_VALUE;
+ for (FollowerLogInformation info : followerToLog.values()) {
+ minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
+ }
+
+ super.performSnapshotWithoutCapture(minReplicatedToAllIndex);
+ }
+
@Override
protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
final Iterator<ClientRequestTracker> it = trackerList.iterator();
return this;
}
- @Override
- public RaftState state() {
- return RaftState.Leader;
- }
+ protected void beforeSendHeartbeat(){}
@Override
public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+ LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
+ logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
return switchBehavior(new Follower(context));
}
}
- try {
- if (message instanceof SendHeartBeat) {
- sendHeartBeat();
- return this;
-
- } else if(message instanceof InitiateInstallSnapshot) {
- installSnapshotIfNeeded();
+ if (message instanceof SendHeartBeat) {
+ beforeSendHeartbeat();
+ sendHeartBeat();
+ scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
+ return this;
- } else if(message instanceof SendInstallSnapshot) {
- // received from RaftActor
- setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
- sendInstallSnapshot();
+ } else if(message instanceof SendInstallSnapshot) {
+ // received from RaftActor
+ setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
+ sendInstallSnapshot();
- } else if (message instanceof Replicate) {
- replicate((Replicate) message);
+ } else if (message instanceof Replicate) {
+ replicate((Replicate) message);
- } else if (message instanceof InstallSnapshotReply){
- handleInstallSnapshotReply((InstallSnapshotReply) message);
+ } else if (message instanceof InstallSnapshotReply){
+ handleInstallSnapshotReply((InstallSnapshotReply) message);
- }
- } finally {
- scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
}
+
return super.handleMessage(sender, message);
}
private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
+ LOG.debug("{}: handleInstallSnapshotReply: {}", logName(), reply);
+
String followerId = reply.getFollowerId();
FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+
+ if (followerToSnapshot == null) {
+ LOG.error("{}: FollowerId {} in InstallSnapshotReply not known to Leader",
+ logName(), followerId);
+ return;
+ }
+
FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
followerLogInformation.markFollowerActive();
- if (followerToSnapshot != null &&
- followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
-
+ if (followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
+ boolean wasLastChunk = false;
if (reply.isSuccess()) {
if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
//this was the last chunk reply
if(LOG.isDebugEnabled()) {
LOG.debug("{}: InstallSnapshotReply received, " +
- "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
- context.getId(), reply.getChunkIndex(), followerId,
+ "last chunk received, Chunk: {}. Follower: {} Setting nextIndex: {}",
+ logName(), reply.getChunkIndex(), followerId,
context.getReplicatedLog().getSnapshotIndex() + 1
);
}
context.getReplicatedLog().getSnapshotIndex() + 1);
mapFollowerToSnapshot.remove(followerId);
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: followerToLog.get(followerId).getNextIndex()=" +
- context.getId(), followerToLog.get(followerId).getNextIndex());
- }
+ LOG.debug("{}: follower: {}, matchIndex set to {}, nextIndex set to {}",
+ logName(), followerId, followerLogInformation.getMatchIndex(),
+ followerLogInformation.getNextIndex());
if (mapFollowerToSnapshot.isEmpty()) {
// once there are no pending followers receiving snapshots
// we can remove snapshot from the memory
setSnapshot(Optional.<ByteString>absent());
}
+ wasLastChunk = true;
} else {
followerToSnapshot.markSendStatus(true);
}
} else {
LOG.info("{}: InstallSnapshotReply received sending snapshot chunk failed, Will retry, Chunk: {}",
- context.getId(), reply.getChunkIndex());
+ logName(), reply.getChunkIndex());
followerToSnapshot.markSendStatus(false);
}
+ if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ if(followerActor != null) {
+ sendSnapshotChunk(followerActor, followerId);
+ }
+ }
+
} else {
- LOG.error("{}: FollowerId in InstallSnapshotReply not known to Leader" +
- " or Chunk Index in InstallSnapshotReply not matching {} != {}",
- context.getId(), followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
- );
+ LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
+ logName(), reply.getChunkIndex(), followerId,
+ followerToSnapshot.getChunkIndex());
if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
// Since the Follower did not find this index to be valid we should reset the follower snapshot
private void replicate(Replicate replicate) {
long logIndex = replicate.getReplicatedLogEntry().getIndex();
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Replicate message {}", context.getId(), logIndex);
- }
+ LOG.debug("{}: Replicate message: identifier: {}, logIndex: {}", logName(),
+ replicate.getIdentifier(), logIndex);
// Create a tracker entry we will use this later to notify the
// client actor
context.setCommitIndex(logIndex);
applyLogToStateMachine(logIndex);
} else {
- sendAppendEntries();
+ sendAppendEntries(0, false);
}
}
- private void sendAppendEntries() {
+ private void sendAppendEntries(long timeSinceLastActivityInterval, boolean isHeartbeat) {
// Send an AppendEntries to all followers
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
final String followerId = e.getKey();
- ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ final FollowerLogInformation followerLogInformation = e.getValue();
+ // This checks helps not to send a repeat message to the follower
+ if(!followerLogInformation.isFollowerActive() ||
+ followerLogInformation.timeSinceLastActivity() >= timeSinceLastActivityInterval) {
+ sendUpdatesToFollower(followerId, followerLogInformation, true, isHeartbeat);
+ }
+ }
+ }
- if (followerActor != null) {
- FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
- long followerNextIndex = followerLogInformation.getNextIndex();
- boolean isFollowerActive = followerLogInformation.isFollowerActive();
-
- if (mapFollowerToSnapshot.get(followerId) != null) {
- // if install snapshot is in process , then sent next chunk if possible
- if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
- sendSnapshotChunk(followerActor, followerId);
- } else {
- // we send a heartbeat even if we have not received a reply for the last chunk
- sendAppendEntriesToFollower(followerActor, followerNextIndex,
- Collections.<ReplicatedLogEntry>emptyList());
- }
+ /**
+ *
+ * This method checks if any update needs to be sent to the given follower. This includes append log entries,
+ * sending next snapshot chunk, and initiating a snapshot.
+ * @return true if any update is sent, false otherwise
+ */
- } else {
- long leaderLastIndex = context.getReplicatedLog().lastIndex();
- long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
- final List<ReplicatedLogEntry> entries;
-
- if (isFollowerActive &&
- context.getReplicatedLog().isPresent(followerNextIndex)) {
- // FIXME : Sending one entry at a time
- entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
-
- } else if (isFollowerActive && followerNextIndex >= 0 &&
- leaderLastIndex >= followerNextIndex ) {
- // if the followers next index is not present in the leaders log, and
- // if the follower is just not starting and if leader's index is more than followers index
- // then snapshot should be sent
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
- "follower-nextIndex: %s, leader-snapshot-index: %s, " +
- "leader-last-index: %s", context.getId(), followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
- }
- actor().tell(new InitiateInstallSnapshot(), actor());
-
- // we would want to sent AE as the capture snapshot might take time
- entries = Collections.<ReplicatedLogEntry>emptyList();
-
- } else {
- //we send an AppendEntries, even if the follower is inactive
- // in-order to update the followers timestamp, in case it becomes active again
- entries = Collections.<ReplicatedLogEntry>emptyList();
+ private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
+ boolean sendHeartbeat, boolean isHeartbeat) {
+
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ if (followerActor != null) {
+ long followerNextIndex = followerLogInformation.getNextIndex();
+ boolean isFollowerActive = followerLogInformation.isFollowerActive();
+
+ if (mapFollowerToSnapshot.get(followerId) != null) {
+ // if install snapshot is in process , then sent next chunk if possible
+ if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
+ sendSnapshotChunk(followerActor, followerId);
+ } else if(sendHeartbeat) {
+ // we send a heartbeat even if we have not received a reply for the last chunk
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ }
+ } else {
+ long leaderLastIndex = context.getReplicatedLog().lastIndex();
+ long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
+
+ if(!isHeartbeat || LOG.isTraceEnabled()) {
+ LOG.debug("{}: Checking sendAppendEntries for follower {}, leaderLastIndex: {}, leaderSnapShotIndex: {}",
+ logName(), followerId, leaderLastIndex, leaderSnapShotIndex);
+ }
+
+ if (isFollowerActive && context.getReplicatedLog().isPresent(followerNextIndex)) {
+
+ LOG.debug("{}: sendAppendEntries: {} is present for follower {}", logName(),
+ followerNextIndex, followerId);
+
+ // FIXME : Sending one entry at a time
+ final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
+
+ sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
+
+ } else if (isFollowerActive && followerNextIndex >= 0 &&
+ leaderLastIndex > followerNextIndex && !context.isSnapshotCaptureInitiated()) {
+ // if the followers next index is not present in the leaders log, and
+ // if the follower is just not starting and if leader's index is more than followers index
+ // then snapshot should be sent
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
+ "follower-nextIndex: %d, leader-snapshot-index: %d, " +
+ "leader-last-index: %d", logName(), followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
}
- sendAppendEntriesToFollower(followerActor, followerNextIndex, entries);
+ // Send heartbeat to follower whenever install snapshot is initiated.
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
+ initiateCaptureSnapshot(followerId, followerNextIndex);
+
+ } else if(sendHeartbeat) {
+ //we send an AppendEntries, even if the follower is inactive
+ // in-order to update the followers timestamp, in case it becomes active again
+ sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
+ Collections.<ReplicatedLogEntry>emptyList(), followerId);
}
+
}
}
}
private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
- List<ReplicatedLogEntry> entries) {
- followerActor.tell(
- new AppendEntries(currentTerm(), context.getId(),
- prevLogIndex(followerNextIndex),
- prevLogTerm(followerNextIndex), entries,
- context.getCommitIndex()).toSerializable(),
- actor()
- );
+ List<ReplicatedLogEntry> entries, String followerId) {
+ AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
+ prevLogIndex(followerNextIndex),
+ prevLogTerm(followerNextIndex), entries,
+ context.getCommitIndex(), super.getReplicatedToAllIndex());
+
+ if(!entries.isEmpty() || LOG.isTraceEnabled()) {
+ LOG.debug("{}: Sending AppendEntries to follower {}: {}", logName(), followerId,
+ appendEntries);
+ }
+
+ followerActor.tell(appendEntries.toSerializable(), actor());
}
/**
- * An installSnapshot is scheduled at a interval that is a multiple of
- * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
- * snapshots at every heartbeat.
- *
* Install Snapshot works as follows
- * 1. Leader sends a InitiateInstallSnapshot message to self
- * 2. Leader then initiates the capture snapshot by sending a CaptureSnapshot message to actor
- * 3. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
+ * 1. Leader initiates the capture snapshot by sending a CaptureSnapshot message to actor
+ * 2. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
* and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
- * 4. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
- * 5. On complete, Follower sends back a InstallSnapshotReply.
- * 6. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
+ * 3. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
+ * 4. On complete, Follower sends back a InstallSnapshotReply.
+ * 5. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
* and replenishes the memory by deleting the snapshot in Replicated log.
- *
+ * 6. If another follower requires a snapshot and a snapshot has been collected (via CaptureSnapshotReply)
+ * then send the existing snapshot in chunks to the follower.
+ * @param followerId
+ * @param followerNextIndex
*/
- private void installSnapshotIfNeeded() {
- for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
- final ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
-
- if (followerActor != null) {
- long nextIndex = e.getValue().getNextIndex();
-
- if (!context.getReplicatedLog().isPresent(nextIndex) &&
- context.getReplicatedLog().isInSnapshot(nextIndex)) {
- LOG.info("{}: {} follower needs a snapshot install", context.getId(), e.getKey());
- if (snapshot.isPresent()) {
- // if a snapshot is present in the memory, most likely another install is in progress
- // no need to capture snapshot
- sendSnapshotChunk(followerActor, e.getKey());
-
- } else {
- initiateCaptureSnapshot();
- //we just need 1 follower who would need snapshot to be installed.
- // when we have the snapshot captured, we would again check (in SendInstallSnapshot)
- // who needs an install and send to all who need
- break;
- }
+ private void initiateCaptureSnapshot(String followerId, long followerNextIndex) {
+ if (!context.getReplicatedLog().isPresent(followerNextIndex) &&
+ context.getReplicatedLog().isInSnapshot(followerNextIndex)) {
+ if (snapshot.isPresent()) {
+ // if a snapshot is present in the memory, most likely another install is in progress
+ // no need to capture snapshot.
+ // This could happen if another follower needs an install when one is going on.
+ final ActorSelection followerActor = context.getPeerActorSelection(followerId);
+ sendSnapshotChunk(followerActor, followerId);
+
+ } else if (!context.isSnapshotCaptureInitiated()) {
+
+ LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", logName(), getLeaderId());
+ ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
+ long lastAppliedIndex = -1;
+ long lastAppliedTerm = -1;
+
+ if (lastAppliedEntry != null) {
+ lastAppliedIndex = lastAppliedEntry.getIndex();
+ lastAppliedTerm = lastAppliedEntry.getTerm();
+ } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
+ lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
+ lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
}
+
+ boolean isInstallSnapshotInitiated = true;
+ long replicatedToAllIndex = super.getReplicatedToAllIndex();
+ ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
+ actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
+ (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1),
+ isInstallSnapshotInitiated), actor());
+ context.setSnapshotCaptureInitiated(true);
}
}
}
- // on every install snapshot, we try to capture the snapshot.
- // Once a capture is going on, another one issued will get ignored by RaftActor.
- private void initiateCaptureSnapshot() {
- LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", context.getId(), getLeaderId());
- ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
- long lastAppliedIndex = -1;
- long lastAppliedTerm = -1;
-
- if (lastAppliedEntry != null) {
- lastAppliedIndex = lastAppliedEntry.getIndex();
- lastAppliedTerm = lastAppliedEntry.getTerm();
- } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
- lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
- lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
- }
-
- boolean isInstallSnapshotInitiated = true;
- actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
- lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
- actor());
- }
-
private void sendInstallSnapshot() {
+ LOG.debug("{}: sendInstallSnapshot", logName());
for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
try {
if (snapshot.isPresent()) {
+ ByteString nextSnapshotChunk = getNextSnapshotChunk(followerId,snapshot.get());
+
+ // Note: the previous call to getNextSnapshotChunk has the side-effect of adding
+ // followerId to the followerToSnapshot map.
+ FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+
followerActor.tell(
new InstallSnapshot(currentTerm(), context.getId(),
context.getReplicatedLog().getSnapshotIndex(),
context.getReplicatedLog().getSnapshotTerm(),
- getNextSnapshotChunk(followerId,snapshot.get()),
- mapFollowerToSnapshot.get(followerId).incrementChunkIndex(),
- mapFollowerToSnapshot.get(followerId).getTotalChunks(),
- Optional.of(mapFollowerToSnapshot.get(followerId).getLastChunkHashCode())
+ nextSnapshotChunk,
+ followerToSnapshot.incrementChunkIndex(),
+ followerToSnapshot.getTotalChunks(),
+ Optional.of(followerToSnapshot.getLastChunkHashCode())
).toSerializable(),
actor()
);
LOG.info("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
- context.getId(), followerActor.path(),
- mapFollowerToSnapshot.get(followerId).getChunkIndex(),
- mapFollowerToSnapshot.get(followerId).getTotalChunks());
+ logName(), followerActor.path(),
+ followerToSnapshot.getChunkIndex(),
+ followerToSnapshot.getTotalChunks());
}
} catch (IOException e) {
- LOG.error(e, "{}: InstallSnapshot failed for Leader.", context.getId());
+ LOG.error("{}: InstallSnapshot failed for Leader.", logName(), e);
}
}
mapFollowerToSnapshot.put(followerId, followerToSnapshot);
}
ByteString nextChunk = followerToSnapshot.getNextChunk();
- if (LOG.isDebugEnabled()) {
- LOG.debug("{}: Leader's snapshot nextChunk size:{}", context.getId(), nextChunk.size());
- }
+
+ LOG.debug("{}: next snapshot chunk size for follower {}: {}", logName(), followerId, nextChunk.size());
+
return nextChunk;
}
private void sendHeartBeat() {
if (!followerToLog.isEmpty()) {
- sendAppendEntries();
+ LOG.trace("{}: Sending heartbeat", logName());
+ sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis(), true);
}
}
((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
if(LOG.isDebugEnabled()) {
LOG.debug("{}: Snapshot {} bytes, total chunks to send:{}",
- context.getId(), size, totalChunks);
+ logName(), size, totalChunks);
}
replyReceivedForOffset = -1;
chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
}
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Next chunk: length={}, offset={},size={}", context.getId(),
+
+ LOG.debug("{}: Next chunk: length={}, offset={},size={}", logName(),
snapshotLength, start, size);
- }
+
ByteString substring = getSnapshotBytes().substring(start, start + size);
nextChunkHashCode = substring.hashCode();
return substring;
import akka.actor.ActorRef;
import akka.actor.Cancellable;
-import akka.event.LoggingAdapter;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.slf4j.Logger;
import scala.concurrent.duration.FiniteDuration;
/**
/**
*
*/
- protected final LoggingAdapter LOG;
+ protected final Logger LOG;
/**
*
*/
protected String leaderId = null;
+ private long replicatedToAllIndex = -1;
- protected AbstractRaftActorBehavior(RaftActorContext context) {
+ private final String logName;
+
+ private final RaftState state;
+
+ protected AbstractRaftActorBehavior(RaftActorContext context, RaftState state) {
this.context = context;
+ this.state = state;
this.LOG = context.getLogger();
+
+ logName = String.format("%s (%s)", context.getId(), state);
+ }
+
+ @Override
+ public RaftState state() {
+ return state;
+ }
+
+ public String logName() {
+ return logName;
+ }
+
+ @Override
+ public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+ this.replicatedToAllIndex = replicatedToAllIndex;
+ }
+
+ @Override
+ public long getReplicatedToAllIndex() {
+ return replicatedToAllIndex;
}
/**
if (appendEntries.getTerm() < currentTerm()) {
if(LOG.isDebugEnabled()) {
LOG.debug("{}: Cannot append entries because sender term {} is less than {}",
- context.getId(), appendEntries.getTerm(), currentTerm());
+ logName(), appendEntries.getTerm(), currentTerm());
}
sender.tell(
* @param requestVote
* @return
*/
- protected RaftActorBehavior requestVote(ActorRef sender,
- RequestVote requestVote) {
+ protected RaftActorBehavior requestVote(ActorRef sender, RequestVote requestVote) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Received {}", context.getId(), requestVote);
- }
+ LOG.debug("{}: In requestVote: {}", logName(), requestVote);
boolean grantVote = false;
}
}
- sender.tell(new RequestVoteReply(currentTerm(), grantVote), actor());
+ RequestVoteReply reply = new RequestVoteReply(currentTerm(), grantVote);
+
+ LOG.debug("{}: requestVote returning: {}", logName(), reply);
+
+ sender.tell(reply, actor());
return this;
}
} else {
//if one index is not present in the log, no point in looping
// around as the rest wont be present either
- LOG.warning(
+ LOG.warn(
"{}: Missing index {} from log. Cannot apply state. Ignoring {} to {}",
- context.getId(), i, i, index);
+ logName(), i, i, index);
break;
}
}
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Setting last applied to {}", context.getId(), newLastApplied);
+ LOG.debug("{}: Setting last applied to {}", logName(), newLastApplied);
}
context.setLastApplied(newLastApplied);
}
protected RaftActorBehavior switchBehavior(RaftActorBehavior behavior) {
- LOG.info("{} :- Switching from behavior {} to {}", context.getId(), this.state(), behavior.state());
+ LOG.info("{} :- Switching from behavior {} to {}", logName(), this.state(), behavior.state());
try {
close();
} catch (Exception e) {
- LOG.error(e, "{}: Failed to close behavior : {}", context.getId(), this.state());
+ LOG.error("{}: Failed to close behavior : {}", logName(), this.state(), e);
}
return behavior;
return numMajority;
}
+
+
+ /**
+ * Performs a snapshot with no capture on the replicated log.
+ * It clears the log from the supplied index or last-applied-1 which ever is minimum.
+ *
+ * @param snapshotCapturedIndex
+ */
+ protected void performSnapshotWithoutCapture(final long snapshotCapturedIndex) {
+ // we would want to keep the lastApplied as its used while capturing snapshots
+ long lastApplied = context.getLastApplied();
+ long tempMin = Math.min(snapshotCapturedIndex, (lastApplied > -1 ? lastApplied - 1 : -1));
+
+ if (tempMin > -1 && context.getReplicatedLog().isPresent(tempMin)) {
+ LOG.debug("{}: fakeSnapshot purging log to {} for term {}", logName(), tempMin,
+ context.getTermInformation().getCurrentTerm());
+
+ //use the term of the temp-min, since we check for isPresent, entry will not be null
+ ReplicatedLogEntry entry = context.getReplicatedLog().get(tempMin);
+ context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
+ context.getReplicatedLog().snapshotCommit();
+ setReplicatedToAllIndex(tempMin);
+ }
+ }
+
}
private final Set<String> peers;
public Candidate(RaftActorContext context) {
- super(context);
+ super(context, RaftState.Candidate);
peers = context.getPeerAddresses().keySet();
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Election: Candidate has following peers: {}", context.getId(), peers);
+ LOG.debug("{}: Election: Candidate has following peers: {}", logName(), peers);
}
votesRequired = getMajorityVoteCount(peers.size());
AppendEntries appendEntries) {
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
+ LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
}
return this;
}
@Override protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
- RequestVoteReply requestVoteReply) {
+ RequestVoteReply requestVoteReply) {
+
+ LOG.debug("{}: handleRequestVoteReply: {}, current voteCount: {}", logName(), requestVoteReply,
+ voteCount);
if (requestVoteReply.isVoteGranted()) {
voteCount++;
return this;
}
- @Override public RaftState state() {
- return RaftState.Candidate;
- }
-
@Override
public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
RaftRPC rpc = (RaftRPC) message;
if(LOG.isDebugEnabled()) {
- LOG.debug("{}: RaftRPC message received {} my term is {}", context.getId(), rpc,
+ LOG.debug("{}: RaftRPC message received {}, my term is {}", logName(), rpc,
context.getTermInformation().getCurrentTerm());
}
}
if (message instanceof ElectionTimeout) {
+ LOG.debug("{}: Received ElectionTimeout", logName());
+
if (votesRequired == 0) {
// If there are no peers then we should be a Leader
// We wait for the election timeout to occur before declare
// Increment the election term and vote for self
long currentTerm = context.getTermInformation().getCurrentTerm();
- context.getTermInformation().updateAndPersist(currentTerm + 1,
- context.getId());
+ long newTerm = currentTerm + 1;
+ context.getTermInformation().updateAndPersist(newTerm, context.getId());
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Starting new term {}", context.getId(), (currentTerm + 1));
- }
+ LOG.debug("{}: Starting new term {}", logName(), newTerm);
// Request for a vote
// TODO: Retry request for vote if replies do not arrive in a reasonable
for (String peerId : peers) {
ActorSelection peerActor = context.getPeerActorSelection(peerId);
if(peerActor != null) {
- peerActor.tell(new RequestVote(
+ RequestVote requestVote = new RequestVote(
context.getTermInformation().getCurrentTerm(),
context.getId(),
context.getReplicatedLog().lastIndex(),
- context.getReplicatedLog().lastTerm()),
- context.getActor()
- );
- }
- }
+ context.getReplicatedLog().lastTerm());
+ LOG.debug("{}: Sending {} to peer {}", logName(), requestVote, peerId);
+ peerActor.tell(requestVote, context.getActor());
+ }
+ }
}
@Override public void close() throws Exception {
import akka.actor.ActorRef;
import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.ByteString;
import java.util.ArrayList;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
private SnapshotTracker snapshotTracker = null;
public Follower(RaftActorContext context) {
- super(context);
+ super(context, RaftState.Follower);
scheduleElection(electionDuration());
}
@Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
- }
+ int numLogEntries = appendEntries.getEntries() != null ? appendEntries.getEntries().size() : 0;
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
+ } else if(LOG.isDebugEnabled() && numLogEntries > 0) {
+ LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
}
// TODO : Refactor this method into a bunch of smaller methods
boolean outOfSync = true;
// First check if the logs are in sync or not
- if (lastIndex() == -1
- && appendEntries.getPrevLogIndex() != -1) {
+ long lastIndex = lastIndex();
+ if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
// The follower's log is out of sync because the leader does have
// an entry at prevLogIndex and this follower has no entries in
// it's log.
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: The followers log is empty and the senders prevLogIndex is {}",
- context.getId(), appendEntries.getPrevLogIndex());
- }
-
- } else if (lastIndex() > -1
- && appendEntries.getPrevLogIndex() != -1
- && !prevEntryPresent) {
+ LOG.debug("{}: The followers log is empty and the senders prevLogIndex is {}",
+ logName(), appendEntries.getPrevLogIndex());
+ } else if (lastIndex > -1 && appendEntries.getPrevLogIndex() != -1 && !prevEntryPresent) {
// The follower's log is out of sync because the Leader's
// prevLogIndex entry was not found in it's log
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: The log is not empty but the prevLogIndex {} was not found in it",
- context.getId(), appendEntries.getPrevLogIndex());
- }
-
- } else if (lastIndex() > -1
- && prevEntryPresent
- && prevLogTerm != appendEntries.getPrevLogTerm()) {
+ LOG.debug("{}: The log is not empty but the prevLogIndex {} was not found in it",
+ logName(), appendEntries.getPrevLogIndex());
+ } else if (lastIndex > -1 && prevEntryPresent && prevLogTerm != appendEntries.getPrevLogTerm()) {
// The follower's log is out of sync because the Leader's
// prevLogIndex entry does exist in the follower's log but it has
// a different term in it
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "{}: Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}"
- , context.getId(), prevLogTerm
- , appendEntries.getPrevLogTerm());
- }
+ LOG.debug(
+ "{}: Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}",
+ logName(), prevLogTerm, appendEntries.getPrevLogTerm());
} else {
outOfSync = false;
}
if (outOfSync) {
// We found that the log was out of sync so just send a negative
// reply and return
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Follower ({}) is out-of-sync, " +
- "so sending negative reply, lastIndex():{}, lastTerm():{}",
- context.getId(), context.getId(), lastIndex(), lastTerm()
- );
- }
- sender.tell(
- new AppendEntriesReply(context.getId(), currentTerm(), false,
- lastIndex(), lastTerm()), actor()
- );
+
+ LOG.debug("{}: Follower is out-of-sync, so sending negative reply, lastIndex: {}, lastTerm: {}",
+ logName(), lastIndex, lastTerm());
+
+ sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
+ lastTerm()), actor());
return this;
}
- if (appendEntries.getEntries() != null
- && appendEntries.getEntries().size() > 0) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Number of entries to be appended = {}", context.getId(),
+ if (appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
+
+ LOG.debug("{}: Number of entries to be appended = {}", logName(),
appendEntries.getEntries().size());
- }
// 3. If an existing entry conflicts with a new one (same index
// but different terms), delete the existing entry and all that
break;
}
- if (newEntry.getTerm() == matchEntry
- .getTerm()) {
+ if (newEntry.getTerm() == matchEntry.getTerm()) {
continue;
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Removing entries from log starting at {}", context.getId(),
+ LOG.debug("{}: Removing entries from log starting at {}", logName(),
matchEntry.getIndex());
- }
// Entries do not match so remove all subsequent entries
- context.getReplicatedLog()
- .removeFromAndPersist(matchEntry.getIndex());
+ context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex());
break;
}
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: After cleanup entries to be added from = {}", context.getId(),
- (addEntriesFrom + lastIndex()));
- }
+ lastIndex = lastIndex();
+ LOG.debug("{}: After cleanup entries to be added from = {}", logName(),
+ (addEntriesFrom + lastIndex));
// 4. Append any new entries not already in the log
- for (int i = addEntriesFrom;
- i < appendEntries.getEntries().size(); i++) {
+ for (int i = addEntriesFrom; i < appendEntries.getEntries().size(); i++) {
+ ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Append entry to log {}", context.getId(),
- appendEntries.getEntries().get(i).getData());
- }
- context.getReplicatedLog().appendAndPersist(appendEntries.getEntries().get(i));
- }
+ LOG.debug("{}: Append entry to log {}", logName(), entry.getData());
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Log size is now {}", context.getId(), context.getReplicatedLog().size());
+ context.getReplicatedLog().appendAndPersist(entry);
}
- }
+ LOG.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
+ }
// 5. If leaderCommit > commitIndex, set commitIndex =
// min(leaderCommit, index of last new entry)
+ lastIndex = lastIndex();
long prevCommitIndex = context.getCommitIndex();
- context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(),
- context.getReplicatedLog().lastIndex()));
+ context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
if (prevCommitIndex != context.getCommitIndex()) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Commit index set to {}", context.getId(), context.getCommitIndex());
- }
+ LOG.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
}
// If commitIndex > lastApplied: increment lastApplied, apply
// log[lastApplied] to state machine (§5.3)
// check if there are any entries to be applied. last-applied can be equal to last-index
if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
- context.getLastApplied() < lastIndex()) {
+ context.getLastApplied() < lastIndex) {
if(LOG.isDebugEnabled()) {
LOG.debug("{}: applyLogToStateMachine, " +
- "appendEntries.getLeaderCommit():{}," +
- "context.getLastApplied():{}, lastIndex():{}", context.getId(),
- appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()
- );
+ "appendEntries.getLeaderCommit(): {}," +
+ "context.getLastApplied(): {}, lastIndex(): {}", logName(),
+ appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
}
applyLogToStateMachine(appendEntries.getLeaderCommit());
}
- sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), true,
- lastIndex(), lastTerm()), actor());
+ AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
+ lastIndex, lastTerm());
+
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: handleAppendEntries returning : {}", logName(), reply);
+ } else if(LOG.isDebugEnabled() && numLogEntries > 0) {
+ LOG.debug("{}: handleAppendEntries returning : {}", logName(), reply);
+ }
+
+ sender.tell(reply, actor());
+
+ if (!context.isSnapshotCaptureInitiated()) {
+ super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
+ }
return this;
}
return this;
}
- @Override public RaftState state() {
- return RaftState.Follower;
- }
-
@Override public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Object message = fromSerializableMessage(originalMessage);
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+ LOG.debug("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
+ logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
}
}
if (message instanceof ElectionTimeout) {
+ LOG.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
return switchBehavior(new Candidate(context));
} else if (message instanceof InstallSnapshot) {
private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: InstallSnapshot received by follower " +
- "datasize:{} , Chunk:{}/{}", context.getId(), installSnapshot.getData().size(),
- installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()
- );
- }
+
+ LOG.debug("{}: InstallSnapshot received from leader {}, datasize: {} , Chunk: {}/{}",
+ logName(), installSnapshot.getLeaderId(), installSnapshot.getData().size(),
+ installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
if(snapshotTracker == null){
snapshotTracker = new SnapshotTracker(LOG, installSnapshot.getTotalChunks());
}
- sender.tell(new InstallSnapshotReply(
- currentTerm(), context.getId(), installSnapshot.getChunkIndex(),
- true), actor());
+ InstallSnapshotReply reply = new InstallSnapshotReply(
+ currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
+
+ LOG.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
+
+ sender.tell(reply, actor());
} catch (SnapshotTracker.InvalidChunkException e) {
+ LOG.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
-1, false), actor());
snapshotTracker = null;
} catch (Exception e){
- LOG.error(e, "{}: Exception in InstallSnapshot of follower", context.getId());
+ LOG.error("{}: Exception in InstallSnapshot of follower", logName(), e);
+
//send reply with success as false. The chunk will be sent again on failure
sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
installSnapshot.getChunkIndex(), false), actor());
}
}
- @Override public void close() throws Exception {
+ @Override
+ public void close() throws Exception {
stopElection();
}
@VisibleForTesting
- ByteString getSnapshotChunksCollected(){
- return snapshotTracker != null ? snapshotTracker.getCollectedChunks() : ByteString.EMPTY;
+ SnapshotTracker getSnapshotTracker(){
+ return snapshotTracker;
}
-
-
}
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
-import akka.actor.Cancellable;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.IsolatedLeaderCheck;
-import scala.concurrent.duration.FiniteDuration;
/**
* The behavior of a RaftActor when it is in the Leader state
* set commitIndex = N (§5.3, §5.4).
*/
public class Leader extends AbstractLeader {
- private Cancellable installSnapshotSchedule = null;
- private Cancellable isolatedLeaderCheckSchedule = null;
+ private static final IsolatedLeaderCheck ISOLATED_LEADER_CHECK = new IsolatedLeaderCheck();
+ private final Stopwatch isolatedLeaderCheck;
public Leader(RaftActorContext context) {
super(context);
-
- scheduleInstallSnapshotCheck(context.getConfigParams().getIsolatedCheckInterval());
-
- scheduleIsolatedLeaderCheck(
- new FiniteDuration(context.getConfigParams().getHeartBeatInterval().length() * 10,
- context.getConfigParams().getHeartBeatInterval().unit()));
+ isolatedLeaderCheck = Stopwatch.createStarted();
}
@Override public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
if (originalMessage instanceof IsolatedLeaderCheck) {
if (isLeaderIsolated()) {
- LOG.info("{}: At least {} followers need to be active, Switching {} from Leader to IsolatedLeader",
+ LOG.warn("{}: At least {} followers need to be active, Switching {} from Leader to IsolatedLeader",
context.getId(), minIsolatedLeaderPeerCount, leaderId);
+
return switchBehavior(new IsolatedLeader(context));
}
}
return super.handleMessage(sender, originalMessage);
}
- protected void stopInstallSnapshotSchedule() {
- if (installSnapshotSchedule != null && !installSnapshotSchedule.isCancelled()) {
- installSnapshotSchedule.cancel();
- }
- }
-
- protected void scheduleInstallSnapshotCheck(FiniteDuration interval) {
- if (getFollowerIds().isEmpty()) {
- // Optimization - do not bother scheduling a heartbeat as there are
- // no followers
- return;
- }
-
- stopInstallSnapshotSchedule();
-
- // Schedule a message to send append entries to followers that can
- // accept an append entries with some data in it
- installSnapshotSchedule =
- context.getActorSystem().scheduler().scheduleOnce(
- interval,
- context.getActor(), new InitiateInstallSnapshot(),
- context.getActorSystem().dispatcher(), context.getActor());
- }
-
- protected void stopIsolatedLeaderCheckSchedule() {
- if (isolatedLeaderCheckSchedule != null && !isolatedLeaderCheckSchedule.isCancelled()) {
- isolatedLeaderCheckSchedule.cancel();
+ @Override
+ protected void beforeSendHeartbeat(){
+ if(isolatedLeaderCheck.elapsed(TimeUnit.MILLISECONDS) > context.getConfigParams().getIsolatedCheckIntervalInMillis()){
+ context.getActor().tell(ISOLATED_LEADER_CHECK, context.getActor());
+ isolatedLeaderCheck.reset().start();
}
- }
- protected void scheduleIsolatedLeaderCheck(FiniteDuration isolatedCheckInterval) {
- isolatedLeaderCheckSchedule = context.getActorSystem().scheduler().schedule(isolatedCheckInterval, isolatedCheckInterval,
- context.getActor(), new IsolatedLeaderCheck(),
- context.getActorSystem().dispatcher(), context.getActor());
}
@Override
public void close() throws Exception {
- stopInstallSnapshotSchedule();
- stopIsolatedLeaderCheckSchedule();
super.close();
}
* @return
*/
String getLeaderId();
+
+ /**
+ * setting the index of the log entry which is replicated to all nodes
+ * @param replicatedToAllIndex
+ */
+ void setReplicatedToAllIndex(long replicatedToAllIndex);
+
+ /**
+ * getting the index of the log entry which is replicated to all nodes
+ * @return
+ */
+ long getReplicatedToAllIndex();
}
package org.opendaylight.controller.cluster.raft.behaviors;
-import akka.event.LoggingAdapter;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
+import org.slf4j.Logger;
/**
* SnapshotTracker does house keeping for a snapshot that is being installed in chunks on the Follower
*/
public class SnapshotTracker {
- private final LoggingAdapter LOG;
+ private final Logger LOG;
private final int totalChunks;
private ByteString collectedChunks = ByteString.EMPTY;
private int lastChunkIndex = AbstractLeader.FIRST_CHUNK_INDEX - 1;
private boolean sealed = false;
private int lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
- SnapshotTracker(LoggingAdapter LOG, int totalChunks){
+ SnapshotTracker(Logger LOG, int totalChunks){
this.LOG = LOG;
this.totalChunks = totalChunks;
}
}
public static class InvalidChunkException extends Exception {
+ private static final long serialVersionUID = 1L;
+
InvalidChunkException(String message){
super(message);
}
// leader's commitIndex
private final long leaderCommit;
+ // index which has been replicated successfully to all followers, -1 if none
+ private final long replicatedToAllIndex;
+
public AppendEntries(long term, String leaderId, long prevLogIndex,
- long prevLogTerm, List<ReplicatedLogEntry> entries, long leaderCommit) {
+ long prevLogTerm, List<ReplicatedLogEntry> entries, long leaderCommit, long replicatedToAllIndex) {
super(term);
this.leaderId = leaderId;
this.prevLogIndex = prevLogIndex;
this.prevLogTerm = prevLogTerm;
this.entries = entries;
this.leaderCommit = leaderCommit;
+ this.replicatedToAllIndex = replicatedToAllIndex;
}
private void writeObject(ObjectOutputStream out) throws IOException {
return leaderCommit;
}
+ public long getReplicatedToAllIndex() {
+ return replicatedToAllIndex;
+ }
+
+
@Override
public String toString() {
- final StringBuilder sb =
- new StringBuilder("AppendEntries{");
- sb.append("term=").append(getTerm());
- sb.append("leaderId='").append(leaderId).append('\'');
- sb.append(", prevLogIndex=").append(prevLogIndex);
- sb.append(", prevLogTerm=").append(prevLogTerm);
- sb.append(", entries=").append(entries);
- sb.append(", leaderCommit=").append(leaderCommit);
- sb.append('}');
- return sb.toString();
+ StringBuilder builder = new StringBuilder();
+ builder.append("AppendEntries [term=").append(term).append(", leaderId=").append(leaderId)
+ .append(", prevLogIndex=").append(prevLogIndex).append(", prevLogTerm=").append(prevLogTerm)
+ .append(", entries=").append(entries).append(", leaderCommit=").append(leaderCommit)
+ .append(", replicatedToAllIndex=").append(replicatedToAllIndex).append("]");
+ return builder.toString();
}
public <T extends Object> Object toSerializable() {
from.getPrevLogIndex(),
from.getPrevLogTerm(),
logEntryList,
- from.getLeaderCommit());
+ from.getLeaderCommit(), -1);
return to;
}
* Reply for the AppendEntriesRpc message
*/
public class AppendEntriesReply extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -7487547356392536683L;
// true if follower contained entry matching
// prevLogIndex and prevLogTerm
this.logLastTerm = logLastTerm;
}
+ @Override
public long getTerm() {
return term;
}
return followerId;
}
- @Override public String toString() {
- final StringBuilder sb =
- new StringBuilder("AppendEntriesReply{");
- sb.append("term=").append(term);
- sb.append(", success=").append(success);
- sb.append(", logLastIndex=").append(logLastIndex);
- sb.append(", logLastTerm=").append(logLastTerm);
- sb.append(", followerId='").append(followerId).append('\'');
- sb.append('}');
- return sb.toString();
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("AppendEntriesReply [term=").append(term).append(", success=").append(success)
+ .append(", logLastIndex=").append(logLastIndex).append(", logLastTerm=").append(logLastTerm)
+ .append(", followerId=").append(followerId).append("]");
+ return builder.toString();
}
}
public <T extends Object> Object toSerializable(){
InstallSnapshotMessages.InstallSnapshot.Builder builder = InstallSnapshotMessages.InstallSnapshot.newBuilder()
+ .setTerm(this.getTerm())
.setLeaderId(this.getLeaderId())
.setChunkIndex(this.getChunkIndex())
.setData(this.getData())
return installSnapshot;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("InstallSnapshot [term=").append(term).append(", leaderId=").append(leaderId)
+ .append(", lastIncludedIndex=").append(lastIncludedIndex).append(", lastIncludedTerm=")
+ .append(lastIncludedTerm).append(", data=").append(data).append(", chunkIndex=").append(chunkIndex)
+ .append(", totalChunks=").append(totalChunks).append(", lastChunkHashCode=").append(lastChunkHashCode)
+ .append("]");
+ return builder.toString();
+ }
}
package org.opendaylight.controller.cluster.raft.messages;
public class InstallSnapshotReply extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 642227896390779503L;
// The followerId - this will be used to figure out which follower is
// responding
private final String followerId;
private final int chunkIndex;
- private boolean success;
+ private final boolean success;
public InstallSnapshotReply(long term, String followerId, int chunkIndex,
boolean success) {
public boolean isSuccess() {
return success;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("InstallSnapshotReply [term=").append(term).append(", followerId=").append(followerId)
+ .append(", chunkIndex=").append(chunkIndex).append(", success=").append(success).append("]");
+ return builder.toString();
+ }
}
* Invoked by candidates to gather votes (§5.2).
*/
public class RequestVote extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -6967509186297108657L;
// candidate requesting vote
private String candidateId;
public RequestVote() {
}
+ @Override
public long getTerm() {
return term;
}
this.lastLogTerm = lastLogTerm;
}
- @Override public String toString() {
- final StringBuilder sb =
- new StringBuilder("RequestVote{");
- sb.append("term='").append(getTerm()).append('\'');
- sb.append("candidateId='").append(candidateId).append('\'');
- sb.append(", lastLogIndex=").append(lastLogIndex);
- sb.append(", lastLogTerm=").append(lastLogTerm);
- sb.append('}');
- return sb.toString();
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("RequestVote [term=").append(term).append(", candidateId=").append(candidateId)
+ .append(", lastLogIndex=").append(lastLogIndex).append(", lastLogTerm=").append(lastLogTerm)
+ .append("]");
+ return builder.toString();
}
}
package org.opendaylight.controller.cluster.raft.messages;
public class RequestVoteReply extends AbstractRaftRPC {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 8427899326488775660L;
// true means candidate received vot
private final boolean voteGranted;
this.voteGranted = voteGranted;
}
+ @Override
public long getTerm() {
return term;
}
public boolean isVoteGranted() {
return voteGranted;
}
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("RequestVoteReply [term=").append(term).append(", voteGranted=").append(voteGranted).append("]");
+ return builder.toString();
+ }
}
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+
/**
*
*/
}
+ @Test
+ public void testSnapshotPreCommit() {
+ //add 4 more entries
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("E")));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("F")));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(3, 6, new MockPayload("G")));
+ replicatedLogImpl.append(new MockReplicatedLogEntry(3, 7, new MockPayload("H")));
+
+ //sending negative values should not cause any changes
+ replicatedLogImpl.snapshotPreCommit(-1, -1);
+ assertEquals(8, replicatedLogImpl.size());
+ assertEquals(-1, replicatedLogImpl.getSnapshotIndex());
+
+ replicatedLogImpl.snapshotPreCommit(4, 3);
+ assertEquals(3, replicatedLogImpl.size());
+ assertEquals(4, replicatedLogImpl.getSnapshotIndex());
+
+ replicatedLogImpl.snapshotPreCommit(6, 3);
+ assertEquals(1, replicatedLogImpl.size());
+ assertEquals(6, replicatedLogImpl.getSnapshotIndex());
+
+ replicatedLogImpl.snapshotPreCommit(7, 3);
+ assertEquals(0, replicatedLogImpl.size());
+ assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+
+ //running it again on an empty list should not throw exception
+ replicatedLogImpl.snapshotPreCommit(7, 3);
+ assertEquals(0, replicatedLogImpl.size());
+ assertEquals(7, replicatedLogImpl.getSnapshotIndex());
+
+ }
+
+ @Test
+ public void testIsPresent() {
+ assertTrue(replicatedLogImpl.isPresent(0));
+ assertTrue(replicatedLogImpl.isPresent(1));
+ assertTrue(replicatedLogImpl.isPresent(2));
+ assertTrue(replicatedLogImpl.isPresent(3));
+
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 4, new MockPayload("D")));
+ replicatedLogImpl.snapshotPreCommit(3, 2); //snapshot on 3
+ replicatedLogImpl.snapshotCommit();
+
+ assertFalse(replicatedLogImpl.isPresent(0));
+ assertFalse(replicatedLogImpl.isPresent(1));
+ assertFalse(replicatedLogImpl.isPresent(2));
+ assertFalse(replicatedLogImpl.isPresent(3));
+ assertTrue(replicatedLogImpl.isPresent(4));
+
+ replicatedLogImpl.snapshotPreCommit(4, 2); //snapshot on 4
+ replicatedLogImpl.snapshotCommit();
+ assertFalse(replicatedLogImpl.isPresent(4));
+
+ replicatedLogImpl.append(new MockReplicatedLogEntry(2, 5, new MockPayload("D")));
+ assertTrue(replicatedLogImpl.isPresent(5));
+ }
+
// create a snapshot for test
public Map<Long, String> takeSnapshot(final int numEntries) {
Map<Long, String> map = new HashMap<>(numEntries);
// hence getting the actual elapsed time and do a match.
// if the sleep has spilled over, then return the test gracefully
private long sleepWithElaspsedTimeReturned(long millis) {
- Stopwatch stopwatch = new Stopwatch();
- stopwatch.start();
+ Stopwatch stopwatch = Stopwatch.createStarted();
Uninterruptibles.sleepUninterruptibly(millis, TimeUnit.MILLISECONDS);
stopwatch.stop();
return stopwatch.elapsed(TimeUnit.MILLISECONDS);
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import com.google.common.base.Preconditions;
import com.google.protobuf.GeneratedMessage;
import java.io.Serializable;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.test.MockPayloadMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class MockRaftActorContext implements RaftActorContext {
private ReplicatedLog replicatedLog;
private Map<String, String> peerAddresses = new HashMap<>();
private ConfigParams configParams;
+ private boolean snapshotCaptureInitiated;
public MockRaftActorContext(){
electionTerm = null;
* Identifier of the actor whose election term information this is
*/
private final String id = id1;
- private long currentTerm = 0;
+ private long currentTerm = 1;
private String votedFor = "";
@Override
public void initReplicatedLog(){
this.replicatedLog = new SimpleReplicatedLog();
- this.replicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("")));
+ long term = getTermInformation().getCurrentTerm();
+ this.replicatedLog.append(new MockReplicatedLogEntry(term, 0, new MockPayload("1")));
+ this.replicatedLog.append(new MockReplicatedLogEntry(term, 1, new MockPayload("2")));
}
@Override public ActorRef actorOf(Props props) {
}
@Override
+ // FIXME : A lot of tests try to manipulate the replicated log by setting it using this method
+ // This is OK to do if the underlyingActor is not RafActor or a derived class. If not then you should not
+ // used this way to manipulate the log because the RaftActor actually has a field replicatedLog
+ // which it creates internally and sets on the RaftActorContext
+ // The only right way to manipulate the replicated log therefore is to get it from either the RaftActor
+ // or the RaftActorContext and modify the entries in there instead of trying to replace it by using this setter
+ // Simple assertion that will fail if you do so
+ // ReplicatedLog log = new ReplicatedLogImpl();
+ // raftActor.underlyingActor().getRaftActorContext().setReplicatedLog(log);
+ // assertEquals(log, raftActor.underlyingActor().getReplicatedLog())
public void setReplicatedLog(ReplicatedLog replicatedLog) {
this.replicatedLog = replicatedLog;
}
return this.system;
}
- @Override public LoggingAdapter getLogger() {
- return Logging.getLogger(system, this);
+ @Override public Logger getLogger() {
+ return LoggerFactory.getLogger(getClass());
}
@Override public Map<String, String> getPeerAddresses() {
return configParams;
}
+ @Override
+ public void setSnapshotCaptureInitiated(boolean snapshotCaptureInitiated) {
+ this.snapshotCaptureInitiated = snapshotCaptureInitiated;
+ }
+
+ @Override
+ public boolean isSnapshotCaptureInitiated() {
+ return snapshotCaptureInitiated;
+ }
+
public void setConfigParams(ConfigParams configParams) {
this.configParams = configParams;
}
public String toString() {
return value;
}
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((value == null) ? 0 : value.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ MockPayload other = (MockPayload) obj;
+ if (value == null) {
+ if (other.value != null) {
+ return false;
+ }
+ } else if (!value.equals(other.value)) {
+ return false;
+ }
+ return true;
+ }
}
public static class MockReplicatedLogEntry implements ReplicatedLogEntry, Serializable {
public int size() {
return getData().size();
}
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((data == null) ? 0 : data.hashCode());
+ result = prime * result + (int) (index ^ (index >>> 32));
+ result = prime * result + (int) (term ^ (term >>> 32));
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ MockReplicatedLogEntry other = (MockReplicatedLogEntry) obj;
+ if (data == null) {
+ if (other.data != null) {
+ return false;
+ }
+ } else if (!data.equals(other.data)) {
+ return false;
+ }
+ if (index != other.index) {
+ return false;
+ }
+ if (term != other.term) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("MockReplicatedLogEntry [term=").append(term).append(", index=").append(index)
+ .append(", data=").append(data).append("]");
+ return builder.toString();
+ }
}
public static class MockReplicatedLogBuilder {
package org.opendaylight.controller.cluster.raft;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeoutException;
import org.junit.After;
import org.junit.Assert;
+import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
public class RaftActorTest extends AbstractActorTest {
+ private TestActorFactory factory;
+
+ @Before
+ public void setUp(){
+ factory = new TestActorFactory(getSystem());
+ }
@After
- public void tearDown() {
+ public void tearDown() throws Exception {
+ factory.close();
MockAkkaJournal.clearJournal();
MockSnapshotStore.setMockSnapshot(null);
}
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
private final List<Object> state;
private ActorRef roleChangeNotifier;
+ private final CountDownLatch initializeBehaviorComplete = new CountDownLatch(1);
public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
private static final long serialVersionUID = 1L;
}
}
- public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider) {
+ public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
+ DataPersistenceProvider dataPersistenceProvider) {
super(id, peerAddresses, config);
state = new ArrayList<>();
this.delegate = mock(RaftActor.class);
}
}
+ public void waitForInitializeBehaviorComplete() {
+ try {
+ assertEquals("Behavior initialized", true, initializeBehaviorComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
public List<Object> getState() {
return state;
}
recoveryComplete.countDown();
}
+ @Override
+ protected void initializeBehavior() {
+ super.initializeBehavior();
+ initializeBehaviorComplete.countDown();
+ }
+
@Override
protected void applyRecoverySnapshot(byte[] bytes) {
delegate.applyRecoverySnapshot(bytes);
@Test
public void testRaftActorRecovery() throws Exception {
new JavaTestKit(getSystem()) {{
- String persistenceId = "follower10";
+ String persistenceId = factory.generateActorId("follower-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
// Set the heartbeat interval high to essentially disable election otherwise the test
// log entry.
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- ActorRef followerActor = getSystem().actorOf(MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
+ ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
watch(followerActor);
int lastAppliedDuringSnapshotCapture = 3;
int lastIndexDuringSnapshotCapture = 4;
- // 4 messages as part of snapshot, which are applied to state
- ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
+ // 4 messages as part of snapshot, which are applied to state
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1 ,
+ snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1,
lastAppliedDuringSnapshotCapture, 1);
MockSnapshotStore.setMockSnapshot(snapshot);
MockSnapshotStore.setPersistenceId(persistenceId);
unwatch(followerActor);
//reinstate the actor
- TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
- MockRaftActor.props(persistenceId, Collections.<String,String>emptyMap(),
+ TestActorRef<MockRaftActor> ref = factory.createTestActor(
+ MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
Optional.<ConfigParams>of(config)));
ref.underlyingActor().waitForRecoveryComplete();
public void testHandleRecoveryWhenDataPersistenceRecoveryApplicable() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testHandleRecoveryWhenDataPersistenceRecoveryApplicable";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
// Wait for akka's recovery to complete so it doesn't interfere.
mockRaftActor.waitForRecoveryComplete();
- ByteString snapshotBytes = fromObject(Arrays.asList(
+ ByteString snapshotBytes = fromObject(Arrays.asList(
new MockRaftActorContext.MockPayload("A"),
new MockRaftActorContext.MockPayload("B"),
new MockRaftActorContext.MockPayload("C"),
new MockRaftActorContext.MockPayload("D")));
Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- Lists.<ReplicatedLogEntry>newArrayList(), 3, 1 ,3, 1);
+ Lists.<ReplicatedLogEntry>newArrayList(), 3, 1, 3, 1);
mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}};
}
public void testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), new DataPersistenceProviderMonitor()), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), new DataPersistenceProviderMonitor()), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
// Wait for akka's recovery to complete so it doesn't interfere.
mockRaftActor.waitForRecoveryComplete();
- ByteString snapshotBytes = fromObject(Arrays.asList(
+ ByteString snapshotBytes = fromObject(Arrays.asList(
new MockRaftActorContext.MockPayload("A"),
new MockRaftActorContext.MockPayload("B"),
new MockRaftActorContext.MockPayload("C"),
new MockRaftActorContext.MockPayload("D")));
Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- Lists.<ReplicatedLogEntry>newArrayList(), 3, 1 ,3, 1);
+ Lists.<ReplicatedLogEntry>newArrayList(), 3, 1, 3, 1);
mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
-
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
}};
}
public void testUpdatingElectionTermCallsDataPersistence() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testUpdatingElectionTermCallsDataPersistence";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
mockRaftActor.getRaftActorContext().getTermInformation().updateAndPersist(10, "foobar");
assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
-
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
public void testAddingReplicatedLogEntryCallsDataPersistence() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testAddingReplicatedLogEntryCallsDataPersistence";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(10, 10, mock(Payload.class));
mockRaftActor.getRaftActorContext().getReplicatedLog().appendAndPersist(logEntry);
verify(dataPersistenceProvider).persist(eq(logEntry), any(Procedure.class));
-
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
public void testRemovingReplicatedLogEntryCallsDataPersistence() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testRemovingReplicatedLogEntryCallsDataPersistence";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
mockRaftActor.getReplicatedLog().appendAndPersist(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
mockRaftActor.getRaftActorContext().getReplicatedLog().removeFromAndPersist(0);
verify(dataPersistenceProvider, times(2)).persist(anyObject(), any(Procedure.class));
-
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
public void testApplyLogEntriesCallsDataPersistence() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testApplyLogEntriesCallsDataPersistence";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
mockRaftActor.onReceiveCommand(new ApplyLogEntries(10));
verify(dataPersistenceProvider, times(1)).persist(anyObject(), any(Procedure.class));
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
+
};
}
public void testCaptureSnapshotReplyCallsDataPersistence() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testCaptureSnapshotReplyCallsDataPersistence";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(),
- MockRaftActor.props(persistenceId,Collections.<String,String>emptyMap(),
- Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, Collections.<String, String>emptyMap(),
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- ByteString snapshotBytes = fromObject(Arrays.asList(
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
new MockRaftActorContext.MockPayload("A"),
new MockRaftActorContext.MockPayload("B"),
new MockRaftActorContext.MockPayload("C"),
new MockRaftActorContext.MockPayload("D")));
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1,1,-1,1));
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1,-1, 1, -1, 1));
RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
verify(dataPersistenceProvider).saveSnapshot(anyObject());
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
public void testSaveSnapshotSuccessCallsDataPersistence() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testSaveSnapshotSuccessCallsDataPersistence";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,0, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,1, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,2, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,3, mock(Payload.class)));
- mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,4, mock(Payload.class)));
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 1, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 2, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 3, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1, 4, mock(Payload.class)));
ByteString snapshotBytes = fromObject(Arrays.asList(
new MockRaftActorContext.MockPayload("A"),
RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
mockRaftActor.setCurrentBehavior(new Follower(raftActorContext));
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, 2, 1));
+ long replicatedToAllIndex = 1;
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, 2, 1, replicatedToAllIndex, 1));
verify(mockRaftActor.delegate).createSnapshot();
verify(dataPersistenceProvider).deleteMessages(100);
- assertEquals(2, mockRaftActor.getReplicatedLog().size());
+ assertEquals(3, mockRaftActor.getReplicatedLog().size());
+ assertEquals(1, mockRaftActor.getCurrentBehavior().getReplicatedToAllIndex());
+ assertNotNull(mockRaftActor.getReplicatedLog().get(2));
assertNotNull(mockRaftActor.getReplicatedLog().get(3));
assertNotNull(mockRaftActor.getReplicatedLog().get(4));
// Index 2 will not be in the log because it was removed due to snapshotting
- assertNull(mockRaftActor.getReplicatedLog().get(2));
-
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
+ assertNull(mockRaftActor.getReplicatedLog().get(1));
+ assertNull(mockRaftActor.getReplicatedLog().get(0));
}
};
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testApplyState";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
ReplicatedLogEntry entry = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
new MockRaftActorContext.MockPayload("F"));
verify(mockRaftActor.delegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
public void testApplySnapshot() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testApplySnapshot";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
ReplicatedLog oldReplicatedLog = mockRaftActor.getReplicatedLog();
- oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,0,mock(Payload.class)));
- oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,1,mock(Payload.class)));
+ oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
+ oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1, 1, mock(Payload.class)));
oldReplicatedLog.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
- mock(Payload.class)));
+ new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
+ mock(Payload.class)));
ByteString snapshotBytes = fromObject(Arrays.asList(
- new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
Snapshot snapshot = mock(Snapshot.class);
verify(mockRaftActor.delegate).applySnapshot(eq(snapshot.getState()));
assertTrue("The replicatedLog should have changed",
- oldReplicatedLog != mockRaftActor.getReplicatedLog());
+ oldReplicatedLog != mockRaftActor.getReplicatedLog());
assertEquals("lastApplied should be same as in the snapshot",
- (Long) 3L, mockRaftActor.getLastApplied());
+ (Long) 3L, mockRaftActor.getLastApplied());
assertEquals(0, mockRaftActor.getReplicatedLog().size());
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
public void testSaveSnapshotFailure() throws Exception {
new JavaTestKit(getSystem()) {
{
- String persistenceId = "testSaveSnapshotFailure";
+ String persistenceId = factory.generateActorId("leader-");
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
- ByteString snapshotBytes = fromObject(Arrays.asList(
+ mockRaftActor.waitForInitializeBehaviorComplete();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
new MockRaftActorContext.MockPayload("A"),
new MockRaftActorContext.MockPayload("B"),
new MockRaftActorContext.MockPayload("C"),
mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
- mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1,1,-1,1));
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, -1, 1, -1, 1));
mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
assertEquals("Snapshot index should not have advanced because save snapshot failed", -1,
mockRaftActor.getReplicatedLog().getSnapshotIndex());
- mockActorRef.tell(PoisonPill.getInstance(), getRef());
-
}
};
}
@Test
public void testRaftRoleChangeNotifier() throws Exception {
new JavaTestKit(getSystem()) {{
- ActorRef notifierActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ ActorRef notifierActor = factory.createActor(Props.create(MessageCollectorActor.class));
+ MessageCollectorActor.waitUntilReady(notifierActor);
+
DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
- String id = "testRaftRoleChangeNotifier";
+ long heartBeatInterval = 100;
+ config.setHeartBeatInterval(FiniteDuration.create(heartBeatInterval, TimeUnit.MILLISECONDS));
+ config.setElectionTimeoutFactor(1);
- TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(id,
- Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), notifierActor), id);
+ String persistenceId = factory.generateActorId("notifier-");
- // sleeping for a minimum of 2 seconds, if it spans more its fine.
- Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
+ factory.createTestActor(MockRaftActor.props(persistenceId,
+ Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), notifierActor), persistenceId);
+
+ List<RoleChanged> matches = null;
+ for(int i = 0; i < 5000 / heartBeatInterval; i++) {
+ matches = MessageCollectorActor.getAllMatching(notifierActor, RoleChanged.class);
+ assertNotNull(matches);
+ if(matches.size() == 3) {
+ break;
+ }
+ Uninterruptibles.sleepUninterruptibly(heartBeatInterval, TimeUnit.MILLISECONDS);
+ }
- List<Object> matches = MessageCollectorActor.getAllMatching(notifierActor, RoleChanged.class);
- assertNotNull(matches);
assertEquals(3, matches.size());
// check if the notifier got a role change from null to Follower
- RoleChanged raftRoleChanged = (RoleChanged) matches.get(0);
- assertEquals(id, raftRoleChanged.getMemberId());
+ RoleChanged raftRoleChanged = matches.get(0);
+ assertEquals(persistenceId, raftRoleChanged.getMemberId());
assertNull(raftRoleChanged.getOldRole());
assertEquals(RaftState.Follower.name(), raftRoleChanged.getNewRole());
// check if the notifier got a role change from Follower to Candidate
- raftRoleChanged = (RoleChanged) matches.get(1);
- assertEquals(id, raftRoleChanged.getMemberId());
+ raftRoleChanged = matches.get(1);
+ assertEquals(persistenceId, raftRoleChanged.getMemberId());
assertEquals(RaftState.Follower.name(), raftRoleChanged.getOldRole());
assertEquals(RaftState.Candidate.name(), raftRoleChanged.getNewRole());
// check if the notifier got a role change from Candidate to Leader
- raftRoleChanged = (RoleChanged) matches.get(2);
- assertEquals(id, raftRoleChanged.getMemberId());
+ raftRoleChanged = matches.get(2);
+ assertEquals(persistenceId, raftRoleChanged.getMemberId());
assertEquals(RaftState.Candidate.name(), raftRoleChanged.getOldRole());
assertEquals(RaftState.Leader.name(), raftRoleChanged.getNewRole());
}};
}
+ @Test
+ public void testFakeSnapshotsForLeaderWithInRealSnapshots() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = factory.generateActorId("leader-");
+ String follower1Id = factory.generateActorId("follower-");
+
+ ActorRef followerActor1 =
+ factory.createActor(Props.create(MessageCollectorActor.class));
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(follower1Id, followerActor1.path().toString());
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+
+ leaderActor.getRaftActorContext().setCommitIndex(4);
+ leaderActor.getRaftActorContext().setLastApplied(4);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+
+ // create 8 entries in the log - 0 to 4 are applied and will get picked up as part of the capture snapshot
+
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
+ leaderActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(0, 8, 1).build());
+
+ assertEquals(8, leaderActor.getReplicatedLog().size());
+
+ leaderActor.onReceiveCommand(new CaptureSnapshot(6, 1, 4, 1, 4, 1));
+
+ leaderActor.getRaftActorContext().setSnapshotCaptureInitiated(true);
+ verify(leaderActor.delegate).createSnapshot();
+
+ assertEquals(8, leaderActor.getReplicatedLog().size());
+
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+ //fake snapshot on index 5
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 5, 1));
+
+ assertEquals(8, leaderActor.getReplicatedLog().size());
+
+ //fake snapshot on index 6
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 6, 1));
+ assertEquals(8, leaderActor.getReplicatedLog().size());
+
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ assertEquals(8, leaderActor.getReplicatedLog().size());
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("foo-0"),
+ new MockRaftActorContext.MockPayload("foo-1"),
+ new MockRaftActorContext.MockPayload("foo-2"),
+ new MockRaftActorContext.MockPayload("foo-3"),
+ new MockRaftActorContext.MockPayload("foo-4")));
+ leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+
+ // capture snapshot reply should remove the snapshotted entries only
+ assertEquals(3, leaderActor.getReplicatedLog().size());
+ assertEquals(7, leaderActor.getReplicatedLog().lastIndex());
+
+ // add another non-replicated entry
+ leaderActor.getReplicatedLog().append(
+ new ReplicatedLogImplEntry(8, 1, new MockRaftActorContext.MockPayload("foo-8")));
+
+ //fake snapshot on index 7, since lastApplied = 7 , we would keep the last applied
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 7, 1));
+ assertEquals(2, leaderActor.getReplicatedLog().size());
+ assertEquals(8, leaderActor.getReplicatedLog().lastIndex());
+
+ }
+ };
+ }
+
+ @Test
+ public void testFakeSnapshotsForFollowerWithInRealSnapshots() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = factory.generateActorId("follower-");
+ String leaderId = factory.generateActorId("leader-");
+
+
+ ActorRef leaderActor1 =
+ factory.createActor(Props.create(MessageCollectorActor.class));
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(leaderId, leaderActor1.path().toString());
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor followerActor = mockActorRef.underlyingActor();
+ followerActor.getRaftActorContext().setCommitIndex(4);
+ followerActor.getRaftActorContext().setLastApplied(4);
+ followerActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ followerActor.waitForInitializeBehaviorComplete();
+
+
+ Follower follower = new Follower(followerActor.getRaftActorContext());
+ followerActor.setCurrentBehavior(follower);
+ assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
+
+ // create 6 entries in the log - 0 to 4 are applied and will get picked up as part of the capture snapshot
+ MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
+ followerActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(0, 6, 1).build());
+
+ // log has indices 0-5
+ assertEquals(6, followerActor.getReplicatedLog().size());
+
+ //snapshot on 4
+ followerActor.onReceiveCommand(new CaptureSnapshot(5, 1, 4, 1, 4, 1));
+
+ followerActor.getRaftActorContext().setSnapshotCaptureInitiated(true);
+ verify(followerActor.delegate).createSnapshot();
+
+ assertEquals(6, followerActor.getReplicatedLog().size());
+
+ //fake snapshot on index 6
+ List<ReplicatedLogEntry> entries =
+ Arrays.asList(
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
+ new MockRaftActorContext.MockPayload("foo-6"))
+ );
+ followerActor.onReceiveCommand(new AppendEntries(1, leaderId, 5, 1, entries, 5, 5));
+ assertEquals(7, followerActor.getReplicatedLog().size());
+
+ //fake snapshot on index 7
+ assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
+
+ entries =
+ Arrays.asList(
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
+ new MockRaftActorContext.MockPayload("foo-7"))
+ );
+ followerActor.onReceiveCommand(new AppendEntries(1, leaderId, 6, 1, entries, 6, 6));
+ assertEquals(8, followerActor.getReplicatedLog().size());
+
+ assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
+
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("foo-0"),
+ new MockRaftActorContext.MockPayload("foo-1"),
+ new MockRaftActorContext.MockPayload("foo-2"),
+ new MockRaftActorContext.MockPayload("foo-3"),
+ new MockRaftActorContext.MockPayload("foo-4")));
+ followerActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ assertFalse(followerActor.getRaftActorContext().isSnapshotCaptureInitiated());
+
+ // capture snapshot reply should remove the snapshotted entries only till replicatedToAllIndex
+ assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
+ assertEquals(7, followerActor.getReplicatedLog().lastIndex());
+
+ entries =
+ Arrays.asList(
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(1, 8,
+ new MockRaftActorContext.MockPayload("foo-7"))
+ );
+ // send an additional entry 8 with leaderCommit = 7
+ followerActor.onReceiveCommand(new AppendEntries(1, leaderId, 7, 1, entries, 7, 7));
+
+ // 7 and 8, as lastapplied is 7
+ assertEquals(2, followerActor.getReplicatedLog().size());
+
+ }
+ };
+ }
+
+ @Test
+ public void testFakeSnapshotsForLeaderWithInInitiateSnapshots() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = factory.generateActorId("leader-");
+ String follower1Id = factory.generateActorId("follower-");
+ String follower2Id = factory.generateActorId("follower-");
+
+ ActorRef followerActor1 =
+ factory.createActor(Props.create(MessageCollectorActor.class), follower1Id);
+ ActorRef followerActor2 =
+ factory.createActor(Props.create(MessageCollectorActor.class), follower2Id);
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(follower1Id, followerActor1.path().toString());
+ peerAddresses.put(follower2Id, followerActor2.path().toString());
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(9);
+ leaderActor.getRaftActorContext().setLastApplied(9);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // create 5 entries in the log
+ MockRaftActorContext.MockReplicatedLogBuilder logBuilder = new MockRaftActorContext.MockReplicatedLogBuilder();
+ leaderActor.getRaftActorContext().setReplicatedLog(logBuilder.createEntries(5, 10, 1).build());
+
+ //set the snapshot index to 4 , 0 to 4 are snapshotted
+ leaderActor.getRaftActorContext().getReplicatedLog().setSnapshotIndex(4);
+ //setting replicatedToAllIndex = 9, for the log to clear
+ leader.setReplicatedToAllIndex(9);
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 9, 1));
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // set the 2nd follower nextIndex to 1 which has been snapshotted
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 0, 1));
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // simulate a real snapshot
+ leaderActor.onReceiveCommand(new SendHeartBeat());
+ assertEquals(5, leaderActor.getReplicatedLog().size());
+ assertEquals(String.format("expected to be Leader but was %s. Current Leader = %s ",
+ leaderActor.getCurrentBehavior().state(), leaderActor.getLeaderId())
+ , RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+
+ //reply from a slow follower does not initiate a fake snapshot
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 9, 1));
+ assertEquals("Fake snapshot should not happen when Initiate is in progress", 5, leaderActor.getReplicatedLog().size());
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("foo-0"),
+ new MockRaftActorContext.MockPayload("foo-1"),
+ new MockRaftActorContext.MockPayload("foo-2"),
+ new MockRaftActorContext.MockPayload("foo-3"),
+ new MockRaftActorContext.MockPayload("foo-4")));
+ leaderActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes.toByteArray()));
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+
+ assertEquals("Real snapshot didn't clear the log till replicatedToAllIndex", 0, leaderActor.getReplicatedLog().size());
+
+ //reply from a slow follower after should not raise errors
+ leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 5, 1));
+ assertEquals(0, leaderActor.getReplicatedLog().size());
+ }
+ };
+ }
+
+
+ private static class NonPersistentProvider implements DataPersistenceProvider {
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ try {
+ procedure.apply(o);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+
+ }
+ }
+
+ @Test
+ public void testRealSnapshotWhenReplicatedToAllIndexMinusOne() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ String persistenceId = factory.generateActorId("leader-");
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setSnapshotBatchCount(5);
+
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(3);
+ leaderActor.getRaftActorContext().setLastApplied(3);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+ for(int i=0;i< 4;i++) {
+ leaderActor.getReplicatedLog()
+ .append(new MockRaftActorContext.MockReplicatedLogEntry(1, i,
+ new MockRaftActorContext.MockPayload("A")));
+ }
+
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // Persist another entry (this will cause a CaptureSnapshot to be triggered
+ leaderActor.persistData(mockActorRef, "x", new MockRaftActorContext.MockPayload("duh"));
+
+ // Now send a CaptureSnapshotReply
+ mockActorRef.tell(new CaptureSnapshotReply(fromObject("foo").toByteArray()), mockActorRef);
+
+ // Trimming log in this scenario is a no-op
+ assertEquals(-1, leaderActor.getReplicatedLog().getSnapshotIndex());
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertEquals(-1, leader.getReplicatedToAllIndex());
+
+ }};
+ }
+
+ @Test
+ public void testRealSnapshotWhenReplicatedToAllIndexNotInReplicatedLog() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ String persistenceId = factory.generateActorId("leader-");
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ config.setSnapshotBatchCount(5);
+
+ DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+
+ TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
+ MockRaftActor.props(persistenceId, peerAddresses,
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor leaderActor = mockActorRef.underlyingActor();
+ leaderActor.getRaftActorContext().setCommitIndex(3);
+ leaderActor.getRaftActorContext().setLastApplied(3);
+ leaderActor.getRaftActorContext().getTermInformation().update(1, persistenceId);
+ leaderActor.getReplicatedLog().setSnapshotIndex(3);
+
+ leaderActor.waitForInitializeBehaviorComplete();
+ Leader leader = new Leader(leaderActor.getRaftActorContext());
+ leaderActor.setCurrentBehavior(leader);
+ leader.setReplicatedToAllIndex(3);
+ assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
+
+ // Persist another entry (this will cause a CaptureSnapshot to be triggered
+ leaderActor.persistData(mockActorRef, "x", new MockRaftActorContext.MockPayload("duh"));
+
+ // Now send a CaptureSnapshotReply
+ mockActorRef.tell(new CaptureSnapshotReply(fromObject("foo").toByteArray()), mockActorRef);
+
+ // Trimming log in this scenario is a no-op
+ assertEquals(3, leaderActor.getReplicatedLog().getSnapshotIndex());
+ assertFalse(leaderActor.getRaftActorContext().isSnapshotCaptureInitiated());
+ assertEquals(3, leader.getReplicatedToAllIndex());
+
+ }};
+ }
+
private ByteString fromObject(Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
}
}
}
+
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft;
+
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+import akka.actor.Actor;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import java.util.LinkedList;
+import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * TestActorFactory provides methods to create both normal and test actors and to kill them when the factory is closed
+ * The ideal usage for TestActorFactory is with try with resources, <br/>
+ * For example <br/>
+ * <pre>
+ * try (TestActorFactory factory = new TestActorFactory(getSystem())){
+ * factory.createActor(props);
+ * factory.createTestActor(props);
+ * factory.generateActorId("leader-");
+ * }
+ * </pre>
+ */
+public class TestActorFactory implements AutoCloseable {
+ private final ActorSystem system;
+ List<ActorRef> createdActors = new LinkedList<>();
+ Logger LOG = LoggerFactory.getLogger(getClass());
+ private static int actorCount = 1;
+
+ public TestActorFactory(ActorSystem system){
+ this.system = system;
+ }
+
+ /**
+ * Create a normal actor with an auto-generated name
+ *
+ * @param props
+ * @return
+ */
+ public ActorRef createActor(Props props){
+ ActorRef actorRef = system.actorOf(props);
+ createdActors.add(actorRef);
+ return actorRef;
+ }
+
+ /**
+ * Create a normal actor with the passed in name
+ * @param props
+ * @param actorId name of actor
+ * @return
+ */
+ public ActorRef createActor(Props props, String actorId){
+ ActorRef actorRef = system.actorOf(props, actorId);
+ createdActors.add(actorRef);
+ return actorRef;
+ }
+
+ /**
+ * Create a test actor with the passed in name
+ * @param props
+ * @param actorId
+ * @param <T>
+ * @return
+ */
+ public <T extends Actor> TestActorRef<T> createTestActor(Props props, String actorId){
+ TestActorRef<T> actorRef = TestActorRef.create(system, props, actorId);
+ createdActors.add(actorRef);
+ return actorRef;
+ }
+
+ /**
+ * Create a test actor with an auto-generated name
+ * @param props
+ * @param <T>
+ * @return
+ */
+ public <T extends Actor> TestActorRef<T> createTestActor(Props props){
+ TestActorRef<T> actorRef = TestActorRef.create(system, props);
+ createdActors.add(actorRef);
+ return actorRef;
+ }
+
+ /**
+ * Generate a friendly but unique actor id/name
+ * @param prefix
+ * @return
+ */
+ public String generateActorId(String prefix){
+ return prefix + actorCount++;
+ }
+
+ @Override
+ public void close() {
+ new JavaTestKit(system) {{
+ for(ActorRef actor : createdActors) {
+ watch(actor);
+ LOG.info("Killing actor {}", actor);
+ actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ expectTerminated(duration("5 seconds"), actor);
+ }
+ }};
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertTrue;
+import akka.actor.ActorRef;
+import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import scala.concurrent.duration.FiniteDuration;
+
+public abstract class AbstractLeaderTest extends AbstractRaftActorBehaviorTest{
+
+ /**
+ * When we removed scheduling of heartbeat in the AbstractLeader constructor we ended up with a situation where
+ * if no follower responded to an initial AppendEntries heartbeats would not be sent to it. This test verifies
+ * that regardless of whether followers respond or not we schedule heartbeats.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testLeaderSchedulesHeartbeatsEvenWhenNoFollowersRespondToInitialAppendEntries() throws Exception {
+ logStart("testLeaderSchedulesHeartbeatsEvenWhenNoFollowersRespondToInitialAppendEntries");
+ new JavaTestKit(getSystem()) {{
+ String leaderActorId = actorFactory.generateActorId("leader");
+ String follower1ActorId = actorFactory.generateActorId("follower");
+ String follower2ActorId = actorFactory.generateActorId("follower");
+
+ TestActorRef<ForwardMessageToBehaviorActor> leaderActor =
+ actorFactory.createTestActor(ForwardMessageToBehaviorActor.props(), leaderActorId);
+ ActorRef follower1Actor = actorFactory.createActor(MessageCollectorActor.props(), follower1ActorId);
+ ActorRef follower2Actor = actorFactory.createActor(MessageCollectorActor.props(), follower2ActorId);
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext(leaderActorId, getSystem(), leaderActor);
+
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(200, TimeUnit.MILLISECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
+
+ leaderActorContext.setConfigParams(configParams);
+
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(1,5,1).build());
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(follower1ActorId,
+ follower1Actor.path().toString());
+ peerAddresses.put(follower2ActorId,
+ follower2Actor.path().toString());
+
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ RaftActorBehavior leader = createBehavior(leaderActorContext);
+
+ leaderActor.underlyingActor().setBehavior(leader);
+
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+
+ List<SendHeartBeat> allMessages = MessageCollectorActor.getAllMatching(leaderActor, SendHeartBeat.class);
+
+ // Need more than 1 heartbeat to be delivered because we waited for 1 second with heartbeat interval 200ms
+ assertTrue(String.format("%s messages is less than expected", allMessages.size()),
+ allMessages.size() > 1);
+
+ }};
+ }
+
+}
package org.opendaylight.controller.cluster.raft.behaviors;
+import static org.junit.Assert.assertEquals;
import akka.actor.ActorRef;
import akka.actor.Props;
-import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import com.google.protobuf.ByteString;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.junit.After;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.AbstractActorTest;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.TestActorFactory;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.slf4j.LoggerFactory;
-import java.util.ArrayList;
-import java.util.List;
+public abstract class AbstractRaftActorBehaviorTest extends AbstractActorTest {
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+ protected final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-public abstract class AbstractRaftActorBehaviorTest extends AbstractActorTest {
+ private final TestActorRef<MessageCollectorActor> behaviorActor = actorFactory.createTestActor(
+ Props.create(MessageCollectorActor.class), actorFactory.generateActorId("behavior"));
+
+ RaftActorBehavior behavior;
- private final ActorRef behaviorActor = getSystem().actorOf(Props.create(
- DoNothingActor.class));
+ @After
+ public void tearDown() throws Exception {
+ if(behavior != null) {
+ behavior.close();
+ }
+
+ actorFactory.close();
+ }
/**
* This test checks that when a new Raft RPC message is received with a newer
*/
@Test
public void testHandleRaftRPCWithNewerTerm() throws Exception {
- new JavaTestKit(getSystem()) {{
+ RaftActorContext actorContext = createActorContext();
- assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+ assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, behaviorActor,
createAppendEntriesWithNewerTerm());
- assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+ assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, behaviorActor,
createAppendEntriesReplyWithNewerTerm());
- assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+ assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, behaviorActor,
createRequestVoteWithNewerTerm());
- assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(getTestActor(),
+ assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, behaviorActor,
createRequestVoteReplyWithNewerTerm());
-
-
- }};
}
* @throws Exception
*/
@Test
- public void testHandleAppendEntriesSenderTermLessThanReceiverTerm()
- throws Exception {
- new JavaTestKit(getSystem()) {{
-
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
+ public void testHandleAppendEntriesSenderTermLessThanReceiverTerm() throws Exception {
+ MockRaftActorContext context = createActorContext();
// First set the receivers term to a high number (1000)
context.getTermInformation().update(1000, "test");
- AppendEntries appendEntries =
- new AppendEntries(100, "leader-1", 0, 0, null, 101);
+ AppendEntries appendEntries = new AppendEntries(100, "leader-1", 0, 0, null, 101, -1);
- RaftActorBehavior behavior = createBehavior(context);
+ behavior = createBehavior(context);
// Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior expected = behavior.handleMessage(behaviorActor, "unknown");
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
+ RaftActorBehavior raftBehavior = behavior.handleMessage(behaviorActor, appendEntries);
- assertEquals(expected, raftBehavior);
+ assertEquals("Raft state", expected.state(), raftBehavior.state());
// Also expect an AppendEntriesReply to be sent where success is false
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
- "AppendEntriesReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(false, out);
-
-
- }};
- }
+ AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(
+ behaviorActor, AppendEntriesReply.class);
- @Test
- public void testHandleAppendEntriesAddSameEntryToLog(){
- new JavaTestKit(getSystem()) {
- {
+ assertEquals("isSuccess", false, reply.isSuccess());
+ }
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
- // First set the receivers term to lower number
- context.getTermInformation().update(2, "test");
+ @Test
+ public void testHandleAppendEntriesAddSameEntryToLog() throws Exception {
+ MockRaftActorContext context = createActorContext();
- // Prepare the receivers log
- MockRaftActorContext.SimpleReplicatedLog log =
- new MockRaftActorContext.SimpleReplicatedLog();
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 0, new MockRaftActorContext.MockPayload("zero")));
+ context.getTermInformation().update(2, "test");
- context.setReplicatedLog(log);
+ // Prepare the receivers log
+ MockRaftActorContext.MockPayload payload = new MockRaftActorContext.MockPayload("zero");
+ setLastLogEntry(context, 2, 0, payload);
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 0, new MockRaftActorContext.MockPayload("zero")));
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ entries.add(new MockRaftActorContext.MockReplicatedLogEntry(2, 0, payload));
- AppendEntries appendEntries =
- new AppendEntries(2, "leader-1", -1, 1, entries, 0);
+ AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 2, -1);
- RaftActorBehavior behavior = createBehavior(context);
+ behavior = createBehavior(context);
- if (AbstractRaftActorBehaviorTest.this instanceof CandidateTest) {
- // Resetting the Candidates term to make sure it will match
- // the term sent by AppendEntries. If this was not done then
- // the test will fail because the Candidate will assume that
- // the message was sent to it from a lower term peer and will
- // thus respond with a failure
- context.getTermInformation().update(2, "test");
- }
+ if (behavior instanceof Candidate) {
+ // Resetting the Candidates term to make sure it will match
+ // the term sent by AppendEntries. If this was not done then
+ // the test will fail because the Candidate will assume that
+ // the message was sent to it from a lower term peer and will
+ // thus respond with a failure
+ context.getTermInformation().update(2, "test");
+ }
- // Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
+ // Send an unknown message so that the state of the RaftActor remains unchanged
+ RaftActorBehavior expected = behavior.handleMessage(behaviorActor, "unknown");
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
+ RaftActorBehavior raftBehavior = behavior.handleMessage(behaviorActor, appendEntries);
- assertEquals(expected, raftBehavior);
+ assertEquals("Raft state", expected.state(), raftBehavior.state());
- assertEquals(1, log.size());
+ assertEquals("ReplicatedLog size", 1, context.getReplicatedLog().size());
+ handleAppendEntriesAddSameEntryToLogReply(behaviorActor);
+ }
- }};
+ protected void handleAppendEntriesAddSameEntryToLogReply(TestActorRef<MessageCollectorActor> replyActor)
+ throws Exception {
+ AppendEntriesReply reply = MessageCollectorActor.getFirstMatching(replyActor, AppendEntriesReply.class);
+ Assert.assertNull("Expected no AppendEntriesReply", reply);
}
/**
* This test verifies that when a RequestVote is received by the RaftActor
- * with a term which is greater than the RaftActors' currentTerm and the
- * senders' log is more upto date than the receiver that the receiver grants
- * the vote to the sender
+ * with the senders' log is more up to date than the receiver that the receiver grants
+ * the vote to the sender.
*/
@Test
- public void testHandleRequestVoteWhenSenderTermGreaterThanCurrentTermAndSenderLogMoreUpToDate() {
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- RaftActorBehavior behavior = createBehavior(
- createActorContext(behaviorActor));
-
- RaftActorBehavior raftBehavior = behavior.handleMessage(getTestActor(),
- new RequestVote(1000, "test", 10000, 999));
-
- if(!(behavior instanceof Follower)){
- assertTrue(raftBehavior instanceof Follower);
- } else {
-
- final Boolean out =
- new ExpectMsg<Boolean>(duration("1 seconds"),
- "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply =
- (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(true, out);
- }
- }
- };
- }};
+ public void testHandleRequestVoteWhenSenderLogMoreUpToDate() {
+ MockRaftActorContext context = createActorContext();
+
+ behavior = createBehavior(context);
+
+ context.getTermInformation().update(1, "test");
+
+ behavior.handleMessage(behaviorActor, new RequestVote(context.getTermInformation().getCurrentTerm(),
+ "test", 10000, 999));
+
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(behaviorActor,
+ RequestVoteReply.class);
+ assertEquals("isVoteGranted", true, reply.isVoteGranted());
}
/**
* log then the receiving RaftActor will not grant the vote to the sender
*/
@Test
- public void testHandleRequestVoteWhenSenderTermGreaterThanCurrentTermButSenderLogLessUptoDate() {
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- RaftActorContext actorContext =
- createActorContext(behaviorActor);
-
- MockRaftActorContext.SimpleReplicatedLog
- log = new MockRaftActorContext.SimpleReplicatedLog();
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(20000,
- 1000000, new MockRaftActorContext.MockPayload("")));
-
- ((MockRaftActorContext) actorContext).setReplicatedLog(log);
-
- RaftActorBehavior behavior = createBehavior(actorContext);
-
- RaftActorBehavior raftBehavior = behavior.handleMessage(getTestActor(),
- new RequestVote(1000, "test", 10000, 999));
-
- if(!(behavior instanceof Follower)){
- assertTrue(raftBehavior instanceof Follower);
- } else {
- final Boolean out =
- new ExpectMsg<Boolean>(duration("1 seconds"),
- "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply =
- (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(false, out);
- }
- }
- };
- }};
+ public void testHandleRequestVoteWhenSenderLogLessUptoDate() {
+ MockRaftActorContext context = createActorContext();
+
+ behavior = createBehavior(context);
+
+ context.getTermInformation().update(1, "test");
+
+ int index = 2000;
+ setLastLogEntry(context, context.getTermInformation().getCurrentTerm(), index,
+ new MockRaftActorContext.MockPayload(""));
+
+ behavior.handleMessage(behaviorActor, new RequestVote(
+ context.getTermInformation().getCurrentTerm(), "test",
+ index - 1, context.getTermInformation().getCurrentTerm()));
+
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(behaviorActor,
+ RequestVoteReply.class);
+ assertEquals("isVoteGranted", false, reply.isVoteGranted());
}
*/
@Test
public void testHandleRequestVoteWhenSenderTermLessThanCurrentTerm() {
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- RaftActorContext context =
- createActorContext(behaviorActor);
-
- context.getTermInformation().update(1000, null);
-
- RaftActorBehavior follower = createBehavior(context);
-
- follower.handleMessage(getTestActor(),
- new RequestVote(999, "test", 10000, 999));
-
- final Boolean out =
- new ExpectMsg<Boolean>(duration("1 seconds"),
- "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply =
- (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(false, out);
- }
- };
- }};
+ RaftActorContext context = createActorContext();
+
+ context.getTermInformation().update(1000, null);
+
+ behavior = createBehavior(context);
+
+ behavior.handleMessage(behaviorActor, new RequestVote(999, "test", 10000, 999));
+
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(behaviorActor,
+ RequestVoteReply.class);
+ assertEquals("isVoteGranted", false, reply.isVoteGranted());
}
- protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(
- ActorRef actorRef, RaftRPC rpc) {
+ @Test
+ public void testPerformSnapshot() {
+ MockRaftActorContext context = new MockRaftActorContext("test", getSystem(), behaviorActor);
+ AbstractRaftActorBehavior abstractBehavior = (AbstractRaftActorBehavior) createBehavior(context);
+ if (abstractBehavior instanceof Candidate) {
+ return;
+ }
+
+ context.getTermInformation().update(1, "test");
+
+ //log has 1 entry with replicatedToAllIndex = 0, does not do anything, returns the
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 1, 1).build());
+ context.setLastApplied(0);
+ abstractBehavior.performSnapshotWithoutCapture(0);
+ assertEquals(-1, abstractBehavior.getReplicatedToAllIndex());
+ assertEquals(1, context.getReplicatedLog().size());
+
+ //2 entries, lastApplied still 0, no purging.
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1).build());
+ context.setLastApplied(0);
+ abstractBehavior.performSnapshotWithoutCapture(0);
+ assertEquals(-1, abstractBehavior.getReplicatedToAllIndex());
+ assertEquals(2, context.getReplicatedLog().size());
+
+ //2 entries, lastApplied still 0, no purging.
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1).build());
+ context.setLastApplied(1);
+ abstractBehavior.performSnapshotWithoutCapture(0);
+ assertEquals(0, abstractBehavior.getReplicatedToAllIndex());
+ assertEquals(1, context.getReplicatedLog().size());
+
+ //5 entries, lastApplied =2 and replicatedIndex = 3, but since we want to keep the lastapplied, indices 0 and 1 will only get purged
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 5, 1).build());
+ context.setLastApplied(2);
+ abstractBehavior.performSnapshotWithoutCapture(3);
+ assertEquals(1, abstractBehavior.getReplicatedToAllIndex());
+ assertEquals(3, context.getReplicatedLog().size());
+
+ // scenario where Last applied > Replicated to all index (becoz of a slow follower)
+ context.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ context.setLastApplied(2);
+ abstractBehavior.performSnapshotWithoutCapture(1);
+ assertEquals(1, abstractBehavior.getReplicatedToAllIndex());
+ assertEquals(1, context.getReplicatedLog().size());
+ }
+
+
+ protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(RaftActorContext actorContext,
+ ActorRef actorRef, RaftRPC rpc) throws Exception {
- RaftActorContext actorContext = createActorContext();
Payload p = new MockRaftActorContext.MockPayload("");
- setLastLogEntry(
- (MockRaftActorContext) actorContext, 0, 0, p);
+ setLastLogEntry((MockRaftActorContext) actorContext, 1, 0, p);
+ actorContext.getTermInformation().update(1, "test");
+
+ RaftActorBehavior origBehavior = createBehavior(actorContext);
+ RaftActorBehavior raftBehavior = origBehavior.handleMessage(actorRef, rpc);
- RaftActorBehavior raftBehavior = createBehavior(actorContext)
- .handleMessage(actorRef, rpc);
+ assertEquals("New raft state", RaftState.Follower, raftBehavior.state());
+ assertEquals("New election term", rpc.getTerm(), actorContext.getTermInformation().getCurrentTerm());
- assertTrue(raftBehavior instanceof Follower);
+ origBehavior.close();
+ raftBehavior.close();
}
protected MockRaftActorContext.SimpleReplicatedLog setLastLogEntry(
new MockRaftActorContext.MockReplicatedLogEntry(term, index, data));
}
- protected MockRaftActorContext.SimpleReplicatedLog setLastLogEntry(
- MockRaftActorContext actorContext, ReplicatedLogEntry logEntry) {
- MockRaftActorContext.SimpleReplicatedLog
- log = new MockRaftActorContext.SimpleReplicatedLog();
+ protected MockRaftActorContext.SimpleReplicatedLog setLastLogEntry(MockRaftActorContext actorContext,
+ ReplicatedLogEntry logEntry) {
+ MockRaftActorContext.SimpleReplicatedLog log = new MockRaftActorContext.SimpleReplicatedLog();
log.append(logEntry);
actorContext.setReplicatedLog(log);
return createBehavior(createActorContext());
}
- protected RaftActorContext createActorContext() {
+ protected MockRaftActorContext createActorContext() {
return new MockRaftActorContext();
}
- protected RaftActorContext createActorContext(ActorRef actor) {
+ protected MockRaftActorContext createActorContext(ActorRef actor) {
return new MockRaftActorContext("test", getSystem(), actor);
}
protected AppendEntries createAppendEntriesWithNewerTerm() {
- return new AppendEntries(100, "leader-1", 0, 0, null, 1);
+ return new AppendEntries(100, "leader-1", 0, 0, null, 1, -1);
}
protected AppendEntriesReply createAppendEntriesReplyWithNewerTerm() {
protected Object fromSerializableMessage(Object serializable){
return SerializationUtils.fromSerializable(serializable);
}
+
+ protected ByteString toByteString(Map<String, String> state) {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ try(ObjectOutputStream oos = new ObjectOutputStream(bos)) {
+ oos.writeObject(state);
+ return ByteString.copyFrom(bos.toByteArray());
+ } catch (IOException e) {
+ throw new AssertionError("IOException occurred converting Map to Bytestring", e);
+ }
+ }
+
+ protected void logStart(String name) {
+ LoggerFactory.getLogger(LeaderTest.class).info("Starting " + name);
+ }
}
package org.opendaylight.controller.cluster.raft.behaviors;
+import static org.junit.Assert.assertEquals;
import akka.actor.ActorRef;
import akka.actor.Props;
-import akka.testkit.JavaTestKit;
-import org.junit.Assert;
+import akka.testkit.TestActorRef;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import static org.junit.Assert.assertEquals;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
public class CandidateTest extends AbstractRaftActorBehaviorTest {
- private final ActorRef candidateActor = getSystem().actorOf(Props.create(
- DoNothingActor.class));
+ private final TestActorRef<MessageCollectorActor> candidateActor = actorFactory.createTestActor(
+ Props.create(MessageCollectorActor.class), actorFactory.generateActorId("candidate"));
- private final ActorRef peerActor1 = getSystem().actorOf(Props.create(
- DoNothingActor.class));
+ private TestActorRef<MessageCollectorActor>[] peerActors;
- private final ActorRef peerActor2 = getSystem().actorOf(Props.create(
- DoNothingActor.class));
-
- private final ActorRef peerActor3 = getSystem().actorOf(Props.create(
- DoNothingActor.class));
-
- private final ActorRef peerActor4 = getSystem().actorOf(Props.create(
- DoNothingActor.class));
-
- private final Map<String, String> onePeer = new HashMap<>();
- private final Map<String, String> twoPeers = new HashMap<>();
- private final Map<String, String> fourPeers = new HashMap<>();
+ private RaftActorBehavior candidate;
@Before
public void setUp(){
- onePeer.put(peerActor1.path().toString(),
- peerActor1.path().toString());
-
- twoPeers.put(peerActor1.path().toString(),
- peerActor1.path().toString());
- twoPeers.put(peerActor2.path().toString(),
- peerActor2.path().toString());
-
- fourPeers.put(peerActor1.path().toString(),
- peerActor1.path().toString());
- fourPeers.put(peerActor2.path().toString(),
- peerActor2.path().toString());
- fourPeers.put(peerActor3.path().toString(),
- peerActor3.path().toString());
- fourPeers.put(peerActor4.path().toString(),
- peerActor3.path().toString());
+ }
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ if(candidate != null) {
+ candidate.close();
+ }
+ super.tearDown();
}
@Test
RaftActorContext raftActorContext = createActorContext();
long expectedTerm = raftActorContext.getTermInformation().getCurrentTerm();
- new Candidate(raftActorContext);
+ candidate = new Candidate(raftActorContext);
- assertEquals(expectedTerm+1, raftActorContext.getTermInformation().getCurrentTerm());
- assertEquals(raftActorContext.getId(), raftActorContext.getTermInformation().getVotedFor());
+ assertEquals("getCurrentTerm", expectedTerm+1, raftActorContext.getTermInformation().getCurrentTerm());
+ assertEquals("getVotedFor", raftActorContext.getId(), raftActorContext.getTermInformation().getVotedFor());
}
@Test
public void testThatAnElectionTimeoutIsTriggered(){
- new JavaTestKit(getSystem()) {{
-
- new Within(DefaultConfigParamsImpl.HEART_BEAT_INTERVAL.$times(6)) {
- protected void run() {
-
- Candidate candidate = new Candidate(createActorContext(getTestActor()));
-
- final Boolean out = new ExpectMsg<Boolean>(DefaultConfigParamsImpl.HEART_BEAT_INTERVAL.$times(6), "ElectionTimeout") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof ElectionTimeout) {
- return true;
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(true, out);
- }
- };
- }};
+ MockRaftActorContext actorContext = createActorContext();
+ candidate = new Candidate(actorContext);
+
+ MessageCollectorActor.expectFirstMatching(candidateActor, ElectionTimeout.class,
+ actorContext.getConfigParams().getElectionTimeOutInterval().$times(6).toMillis());
}
@Test
public void testHandleElectionTimeoutWhenThereAreZeroPeers(){
RaftActorContext raftActorContext = createActorContext();
- Candidate candidate =
- new Candidate(raftActorContext);
+ candidate = new Candidate(raftActorContext);
- RaftActorBehavior raftBehavior =
+ RaftActorBehavior newBehavior =
candidate.handleMessage(candidateActor, new ElectionTimeout());
- Assert.assertTrue(raftBehavior instanceof Leader);
+ assertEquals("Behavior", RaftState.Leader, newBehavior.state());
}
@Test
- public void testHandleElectionTimeoutWhenThereAreTwoNodesInCluster(){
- MockRaftActorContext raftActorContext =
- (MockRaftActorContext) createActorContext();
- raftActorContext.setPeerAddresses(onePeer);
- Candidate candidate =
- new Candidate(raftActorContext);
-
- RaftActorBehavior raftBehavior =
- candidate.handleMessage(candidateActor, new ElectionTimeout());
+ public void testHandleElectionTimeoutWhenThereAreTwoNodeCluster(){
+ MockRaftActorContext raftActorContext = createActorContext();
+ raftActorContext.setPeerAddresses(setupPeers(1));
+ candidate = new Candidate(raftActorContext);
- Assert.assertTrue(raftBehavior instanceof Candidate);
+ candidate = candidate.handleMessage(candidateActor, new ElectionTimeout());
+
+ assertEquals("Behavior", RaftState.Candidate, candidate.state());
}
@Test
- public void testBecomeLeaderOnReceivingMajorityVotesInThreeNodesInCluster(){
- MockRaftActorContext raftActorContext =
- (MockRaftActorContext) createActorContext();
- raftActorContext.setPeerAddresses(twoPeers);
- Candidate candidate =
- new Candidate(raftActorContext);
-
- RaftActorBehavior behaviorOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
+ public void testBecomeLeaderOnReceivingMajorityVotesInThreeNodeCluster(){
+ MockRaftActorContext raftActorContext = createActorContext();
+ raftActorContext.setPeerAddresses(setupPeers(2));
+ candidate = new Candidate(raftActorContext);
- Assert.assertTrue(behaviorOnFirstVote instanceof Leader);
+ candidate = candidate.handleMessage(peerActors[0], new RequestVoteReply(1, true));
+ assertEquals("Behavior", RaftState.Leader, candidate.state());
}
@Test
- public void testBecomeLeaderOnReceivingMajorityVotesInFiveNodesInCluster(){
- MockRaftActorContext raftActorContext =
- (MockRaftActorContext) createActorContext();
- raftActorContext.setPeerAddresses(fourPeers);
- Candidate candidate =
- new Candidate(raftActorContext);
+ public void testBecomeLeaderOnReceivingMajorityVotesInFiveNodeCluster(){
+ MockRaftActorContext raftActorContext = createActorContext();
+ raftActorContext.setPeerAddresses(setupPeers(4));
+ candidate = new Candidate(raftActorContext);
- RaftActorBehavior behaviorOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
+ // First peers denies the vote.
+ candidate = candidate.handleMessage(peerActors[0], new RequestVoteReply(1, false));
- RaftActorBehavior behaviorOnSecondVote = candidate.handleMessage(peerActor2, new RequestVoteReply(0, true));
+ assertEquals("Behavior", RaftState.Candidate, candidate.state());
- Assert.assertTrue(behaviorOnFirstVote instanceof Candidate);
- Assert.assertTrue(behaviorOnSecondVote instanceof Leader);
+ candidate = candidate.handleMessage(peerActors[1], new RequestVoteReply(1, true));
- }
+ assertEquals("Behavior", RaftState.Candidate, candidate.state());
- @Test
- public void testResponseToAppendEntriesWithLowerTerm(){
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- Candidate candidate = new Candidate(createActorContext(getTestActor()));
-
- candidate.handleMessage(getTestActor(), new AppendEntries(0, "test", 0,0,Collections.<ReplicatedLogEntry>emptyList(), 0));
-
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "AppendEntriesResponse") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(false, out);
- }
- };
- }};
+ candidate = candidate.handleMessage(peerActors[2], new RequestVoteReply(1, true));
+
+ assertEquals("Behavior", RaftState.Leader, candidate.state());
}
@Test
- public void testResponseToRequestVoteWithLowerTerm(){
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- Candidate candidate = new Candidate(createActorContext(getTestActor()));
-
- candidate.handleMessage(getTestActor(), new RequestVote(0, "test", 0, 0));
-
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "AppendEntriesResponse") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply = (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(false, out);
- }
- };
- }};
+ public void testResponseToHandleAppendEntriesWithLowerTerm() {
+ candidate = new Candidate(createActorContext());
+
+ setupPeers(1);
+ candidate.handleMessage(peerActors[0], new AppendEntries(1, "test", 0, 0,
+ Collections.<ReplicatedLogEntry>emptyList(), 0, -1));
+
+ AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(
+ peerActors[0], AppendEntriesReply.class);
+ assertEquals("isSuccess", false, reply.isSuccess());
+ assertEquals("getTerm", 2, reply.getTerm());
}
@Test
- public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNull(){
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- RaftActorContext context = createActorContext(getTestActor());
-
- context.getTermInformation().update(1000, null);
-
- // Once a candidate is created it will immediately increment the current term so after
- // construction the currentTerm should be 1001
- RaftActorBehavior follower = createBehavior(context);
-
- follower.handleMessage(getTestActor(), new RequestVote(1001, "test", 10000, 999));
-
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply = (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(true, out);
- }
- };
- }};
+ public void testResponseToRequestVoteWithLowerTerm() {
+ candidate = new Candidate(createActorContext());
+
+ setupPeers(1);
+ candidate.handleMessage(peerActors[0], new RequestVote(1, "test", 0, 0));
+
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(
+ peerActors[0], RequestVoteReply.class);
+ assertEquals("isVoteGranted", false, reply.isVoteGranted());
+ assertEquals("getTerm", 2, reply.getTerm());
}
@Test
- public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNotTheSameAsCandidateId(){
- new JavaTestKit(getSystem()) {{
+ public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForMatches() {
+ MockRaftActorContext context = createActorContext();
+ context.getTermInformation().update(1000, null);
+
+ // Once a candidate is created it will immediately increment the current term so after
+ // construction the currentTerm should be 1001
+ candidate = new Candidate(context);
- new Within(duration("1 seconds")) {
- protected void run() {
+ setupPeers(1);
+ candidate.handleMessage(peerActors[0], new RequestVote(1001, context.getId(), 10000, 999));
- RaftActorContext context = createActorContext(getTestActor());
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(
+ peerActors[0], RequestVoteReply.class);
+ assertEquals("isVoteGranted", true, reply.isVoteGranted());
+ assertEquals("getTerm", 1001, reply.getTerm());
+ }
- context.getTermInformation().update(1000, "test");
+ @Test
+ public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForDoesNotMatch() {
+ MockRaftActorContext context = createActorContext();
+ context.getTermInformation().update(1000, null);
- RaftActorBehavior follower = createBehavior(context);
+ // Once a candidate is created it will immediately increment the current term so after
+ // construction the currentTerm should be 1001
+ candidate = new Candidate(context);
- follower.handleMessage(getTestActor(), new RequestVote(1001, "candidate", 10000, 999));
+ setupPeers(1);
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply = (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
+ // RequestVote candidate ID ("candidate2") does not match this candidate's votedFor
+ // (it votes for itself)
+ candidate.handleMessage(peerActors[0], new RequestVote(1001, "candidate2", 10000, 999));
- assertEquals(false, out);
- }
- };
- }};
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(
+ peerActors[0], RequestVoteReply.class);
+ assertEquals("isVoteGranted", false, reply.isVoteGranted());
+ assertEquals("getTerm", 1001, reply.getTerm());
}
- @Override protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
+ @Override
+ protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
return new Candidate(actorContext);
}
- @Override protected RaftActorContext createActorContext() {
- return new MockRaftActorContext("test", getSystem(), candidateActor);
+ @Override protected MockRaftActorContext createActorContext() {
+ return new MockRaftActorContext("candidate", getSystem(), candidateActor);
}
+ private Map<String, String> setupPeers(int count) {
+ Map<String, String> peerMap = new HashMap<>();
+ peerActors = new TestActorRef[count];
+ for(int i = 0; i < count; i++) {
+ peerActors[i] = actorFactory.createTestActor(Props.create(MessageCollectorActor.class),
+ actorFactory.generateActorId("peer"));
+ peerMap.put("peer" + (i+1), peerActors[i].path().toString());
+ }
+ return peerMap;
+ }
+
+ @Override
+ protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(RaftActorContext actorContext,
+ ActorRef actorRef, RaftRPC rpc) throws Exception {
+ super.assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, actorRef, rpc);
+ assertEquals("New votedFor", null, actorContext.getTermInformation().getVotedFor());
+ }
}
package org.opendaylight.controller.cluster.raft.behaviors;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.Props;
-import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
import com.google.protobuf.ByteString;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
-import java.util.Map;
+import org.junit.After;
+import org.junit.Assert;
import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.Snapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
public class FollowerTest extends AbstractRaftActorBehaviorTest {
- private final ActorRef followerActor = getSystem().actorOf(Props.create(
- DoNothingActor.class));
+ private final TestActorRef<MessageCollectorActor> followerActor = actorFactory.createTestActor(
+ Props.create(MessageCollectorActor.class), actorFactory.generateActorId("follower"));
+ private final TestActorRef<MessageCollectorActor> leaderActor = actorFactory.createTestActor(
+ Props.create(MessageCollectorActor.class), actorFactory.generateActorId("leader"));
- @Override protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
+ private RaftActorBehavior follower;
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ if(follower != null) {
+ follower.close();
+ }
+
+ super.tearDown();
+ }
+
+ @Override
+ protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
return new Follower(actorContext);
}
- @Override protected RaftActorContext createActorContext() {
+ @Override
+ protected MockRaftActorContext createActorContext() {
return createActorContext(followerActor);
}
- protected RaftActorContext createActorContext(ActorRef actorRef){
- return new MockRaftActorContext("test", getSystem(), actorRef);
+ @Override
+ protected MockRaftActorContext createActorContext(ActorRef actorRef){
+ return new MockRaftActorContext("follower", getSystem(), actorRef);
}
@Test
public void testThatAnElectionTimeoutIsTriggered(){
- new JavaTestKit(getSystem()) {{
-
- new Within(DefaultConfigParamsImpl.HEART_BEAT_INTERVAL.$times(6)) {
- protected void run() {
-
- Follower follower = new Follower(createActorContext(getTestActor()));
-
- final Boolean out = new ExpectMsg<Boolean>(DefaultConfigParamsImpl.HEART_BEAT_INTERVAL.$times(6), "ElectionTimeout") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof ElectionTimeout) {
- return true;
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(true, out);
- }
- };
- }};
+ MockRaftActorContext actorContext = createActorContext();
+ follower = new Follower(actorContext);
+
+ MessageCollectorActor.expectFirstMatching(followerActor, ElectionTimeout.class,
+ actorContext.getConfigParams().getElectionTimeOutInterval().$times(6).toMillis());
}
@Test
public void testHandleElectionTimeout(){
- RaftActorContext raftActorContext = createActorContext();
- Follower follower =
- new Follower(raftActorContext);
+ logStart("testHandleElectionTimeout");
- RaftActorBehavior raftBehavior =
- follower.handleMessage(followerActor, new ElectionTimeout());
+ follower = new Follower(createActorContext());
+
+ RaftActorBehavior raftBehavior = follower.handleMessage(followerActor, new ElectionTimeout());
assertTrue(raftBehavior instanceof Candidate);
}
@Test
public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNull(){
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- RaftActorContext context = createActorContext(getTestActor());
+ logStart("testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNull");
- context.getTermInformation().update(1000, null);
+ RaftActorContext context = createActorContext();
+ long term = 1000;
+ context.getTermInformation().update(term, null);
- RaftActorBehavior follower = createBehavior(context);
+ follower = createBehavior(context);
- follower.handleMessage(getTestActor(), new RequestVote(1000, "test", 10000, 999));
+ follower.handleMessage(leaderActor, new RequestVote(term, "test", 10000, 999));
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply = (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, RequestVoteReply.class);
- assertEquals(true, out);
- }
- };
- }};
+ assertEquals("isVoteGranted", true, reply.isVoteGranted());
+ assertEquals("getTerm", term, reply.getTerm());
}
@Test
public void testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNotTheSameAsCandidateId(){
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- RaftActorContext context = createActorContext(getTestActor());
+ logStart("testHandleRequestVoteWhenSenderTermEqualToCurrentTermAndVotedForIsNotTheSameAsCandidateId");
- context.getTermInformation().update(1000, "test");
+ RaftActorContext context = createActorContext();
+ long term = 1000;
+ context.getTermInformation().update(term, "test");
- RaftActorBehavior follower = createBehavior(context);
+ follower = createBehavior(context);
- follower.handleMessage(getTestActor(), new RequestVote(1000, "candidate", 10000, 999));
+ follower.handleMessage(leaderActor, new RequestVote(term, "candidate", 10000, 999));
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "RequestVoteReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof RequestVoteReply) {
- RequestVoteReply reply = (RequestVoteReply) in;
- return reply.isVoteGranted();
- } else {
- throw noMatch();
- }
- }
- }.get();
+ RequestVoteReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, RequestVoteReply.class);
- assertEquals(false, out);
- }
- };
- }};
+ assertEquals("isVoteGranted", false, reply.isVoteGranted());
}
/**
*/
@Test
public void testHandleAppendEntriesWithNewerCommitIndex() throws Exception {
- new JavaTestKit(getSystem()) {{
+ logStart("testHandleAppendEntriesWithNewerCommitIndex");
- RaftActorContext context =
- createActorContext();
+ MockRaftActorContext context = createActorContext();
- context.setLastApplied(100);
- setLastLogEntry((MockRaftActorContext) context, 1, 100,
+ context.setLastApplied(100);
+ setLastLogEntry(context, 1, 100,
new MockRaftActorContext.MockPayload(""));
- ((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
+ context.getReplicatedLog().setSnapshotIndex(99);
- List<ReplicatedLogEntry> entries =
- Arrays.asList(
- (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
- new MockRaftActorContext.MockPayload("foo"))
- );
+ List<ReplicatedLogEntry> entries = Arrays.<ReplicatedLogEntry>asList(
+ newReplicatedLogEntry(2, 101, "foo"));
- // The new commitIndex is 101
- AppendEntries appendEntries =
- new AppendEntries(2, "leader-1", 100, 1, entries, 101);
+ // The new commitIndex is 101
+ AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100);
- RaftActorBehavior raftBehavior =
- createBehavior(context).handleMessage(getRef(), appendEntries);
+ follower = createBehavior(context);
+ follower.handleMessage(leaderActor, appendEntries);
- assertEquals(101L, context.getLastApplied());
-
- }};
+ assertEquals("getLastApplied", 101L, context.getLastApplied());
}
/**
* @throws Exception
*/
@Test
- public void testHandleAppendEntriesSenderPrevLogTermNotSameAsReceiverPrevLogTerm()
- throws Exception {
- new JavaTestKit(getSystem()) {{
-
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
-
- // First set the receivers term to lower number
- context.getTermInformation().update(95, "test");
-
- // Set the last log entry term for the receiver to be greater than
- // what we will be sending as the prevLogTerm in AppendEntries
- MockRaftActorContext.SimpleReplicatedLog mockReplicatedLog =
- setLastLogEntry(context, 20, 0, new MockRaftActorContext.MockPayload(""));
-
- // AppendEntries is now sent with a bigger term
- // this will set the receivers term to be the same as the sender's term
- AppendEntries appendEntries =
- new AppendEntries(100, "leader-1", 0, 0, null, 101);
+ public void testHandleAppendEntriesSenderPrevLogTermNotSameAsReceiverPrevLogTerm() {
+ logStart("testHandleAppendEntriesSenderPrevLogTermNotSameAsReceiverPrevLogTerm");
- RaftActorBehavior behavior = createBehavior(context);
+ MockRaftActorContext context = createActorContext();
- // Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
+ // First set the receivers term to lower number
+ context.getTermInformation().update(95, "test");
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
+ // AppendEntries is now sent with a bigger term
+ // this will set the receivers term to be the same as the sender's term
+ AppendEntries appendEntries = new AppendEntries(100, "leader", 0, 0, null, 101, -1);
- assertEquals(expected, raftBehavior);
+ follower = createBehavior(context);
- // Also expect an AppendEntriesReply to be sent where success is false
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
- "AppendEntriesReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
+ RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- assertEquals(false, out);
+ Assert.assertSame(follower, newBehavior);
+ AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor,
+ AppendEntriesReply.class);
- }};
+ assertEquals("isSuccess", false, reply.isSuccess());
}
-
-
/**
* This test verifies that when a new AppendEntries message is received with
* new entries and the logs of the sender and receiver match that the new
* @throws Exception
*/
@Test
- public void testHandleAppendEntriesAddNewEntries() throws Exception {
- new JavaTestKit(getSystem()) {{
-
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
-
- // First set the receivers term to lower number
- context.getTermInformation().update(1, "test");
-
- // Prepare the receivers log
- MockRaftActorContext.SimpleReplicatedLog log =
- new MockRaftActorContext.SimpleReplicatedLog();
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 0, new MockRaftActorContext.MockPayload("zero")));
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 1, new MockRaftActorContext.MockPayload("one")));
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 2, new MockRaftActorContext.MockPayload("two")));
-
- context.setReplicatedLog(log);
-
- // Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 3, new MockRaftActorContext.MockPayload("three")));
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("four")));
-
- // Send appendEntries with the same term as was set on the receiver
- // before the new behavior was created (1 in this case)
- // This will not work for a Candidate because as soon as a Candidate
- // is created it increments the term
- AppendEntries appendEntries =
- new AppendEntries(1, "leader-1", 2, 1, entries, 4);
-
- RaftActorBehavior behavior = createBehavior(context);
-
- // Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
-
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
-
- assertEquals(expected, raftBehavior);
- assertEquals(5, log.last().getIndex() + 1);
- assertNotNull(log.get(3));
- assertNotNull(log.get(4));
-
- // Also expect an AppendEntriesReply to be sent where success is false
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
- "AppendEntriesReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(true, out);
-
-
- }};
- }
+ public void testHandleAppendEntriesAddNewEntries() {
+ logStart("testHandleAppendEntriesAddNewEntries");
+
+ MockRaftActorContext context = createActorContext();
+
+ // First set the receivers term to lower number
+ context.getTermInformation().update(1, "test");
+ // Prepare the receivers log
+ MockRaftActorContext.SimpleReplicatedLog log = new MockRaftActorContext.SimpleReplicatedLog();
+ log.append(newReplicatedLogEntry(1, 0, "zero"));
+ log.append(newReplicatedLogEntry(1, 1, "one"));
+ log.append(newReplicatedLogEntry(1, 2, "two"));
+ context.setReplicatedLog(log);
+
+ // Prepare the entries to be sent with AppendEntries
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ entries.add(newReplicatedLogEntry(1, 3, "three"));
+ entries.add(newReplicatedLogEntry(1, 4, "four"));
+
+ // Send appendEntries with the same term as was set on the receiver
+ // before the new behavior was created (1 in this case)
+ // This will not work for a Candidate because as soon as a Candidate
+ // is created it increments the term
+ AppendEntries appendEntries = new AppendEntries(1, "leader-1", 2, 1, entries, 4, -1);
+
+ follower = createBehavior(context);
+
+ RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
+
+ Assert.assertSame(follower, newBehavior);
+
+ assertEquals("Next index", 5, log.last().getIndex() + 1);
+ assertEquals("Entry 3", entries.get(0), log.get(3));
+ assertEquals("Entry 4", entries.get(1), log.get(4));
+
+ expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 4);
+ }
/**
* This test verifies that when a new AppendEntries message is received with
* new entries and the logs of the sender and receiver are out-of-sync that
* the log is first corrected by removing the out of sync entries from the
* log and then adding in the new entries sent with the AppendEntries message
- *
- * @throws Exception
*/
@Test
- public void testHandleAppendEntriesCorrectReceiverLogEntries()
- throws Exception {
- new JavaTestKit(getSystem()) {{
-
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
-
- // First set the receivers term to lower number
- context.getTermInformation().update(2, "test");
-
- // Prepare the receivers log
- MockRaftActorContext.SimpleReplicatedLog log =
- new MockRaftActorContext.SimpleReplicatedLog();
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 0, new MockRaftActorContext.MockPayload("zero")));
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 1, new MockRaftActorContext.MockPayload("one")));
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 2, new MockRaftActorContext.MockPayload("two")));
-
- context.setReplicatedLog(log);
-
- // Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(2, 2, new MockRaftActorContext.MockPayload("two-1")));
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(2, 3, new MockRaftActorContext.MockPayload("three")));
-
- // Send appendEntries with the same term as was set on the receiver
- // before the new behavior was created (1 in this case)
- // This will not work for a Candidate because as soon as a Candidate
- // is created it increments the term
- AppendEntries appendEntries =
- new AppendEntries(2, "leader-1", 1, 1, entries, 3);
-
- RaftActorBehavior behavior = createBehavior(context);
-
- // Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
-
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
-
- assertEquals(expected, raftBehavior);
-
- // The entry at index 2 will be found out-of-sync with the leader
- // and will be removed
- // Then the two new entries will be added to the log
- // Thus making the log to have 4 entries
- assertEquals(4, log.last().getIndex() + 1);
- assertNotNull(log.get(2));
-
- assertEquals("one", log.get(1).getData().toString());
-
- // Check that the entry at index 2 has the new data
- assertEquals("two-1", log.get(2).getData().toString());
-
- assertEquals("three", log.get(3).getData().toString());
-
- assertNotNull(log.get(3));
-
- // Also expect an AppendEntriesReply to be sent where success is false
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
- "AppendEntriesReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
-
- assertEquals(true, out);
-
-
- }};
+ public void testHandleAppendEntriesCorrectReceiverLogEntries() {
+ logStart("testHandleAppendEntriesCorrectReceiverLogEntries");
+
+ MockRaftActorContext context = createActorContext();
+
+ // First set the receivers term to lower number
+ context.getTermInformation().update(1, "test");
+
+ // Prepare the receivers log
+ MockRaftActorContext.SimpleReplicatedLog log = new MockRaftActorContext.SimpleReplicatedLog();
+ log.append(newReplicatedLogEntry(1, 0, "zero"));
+ log.append(newReplicatedLogEntry(1, 1, "one"));
+ log.append(newReplicatedLogEntry(1, 2, "two"));
+
+ context.setReplicatedLog(log);
+
+ // Prepare the entries to be sent with AppendEntries
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ entries.add(newReplicatedLogEntry(2, 2, "two-1"));
+ entries.add(newReplicatedLogEntry(2, 3, "three"));
+
+ // Send appendEntries with the same term as was set on the receiver
+ // before the new behavior was created (1 in this case)
+ // This will not work for a Candidate because as soon as a Candidate
+ // is created it increments the term
+ AppendEntries appendEntries = new AppendEntries(2, "leader", 1, 1, entries, 3, -1);
+
+ follower = createBehavior(context);
+
+ RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
+
+ Assert.assertSame(follower, newBehavior);
+
+ // The entry at index 2 will be found out-of-sync with the leader
+ // and will be removed
+ // Then the two new entries will be added to the log
+ // Thus making the log to have 4 entries
+ assertEquals("Next index", 4, log.last().getIndex() + 1);
+ //assertEquals("Entry 2", entries.get(0), log.get(2));
+
+ assertEquals("Entry 1 data", "one", log.get(1).getData().toString());
+
+ // Check that the entry at index 2 has the new data
+ assertEquals("Entry 2", entries.get(0), log.get(2));
+
+ assertEquals("Entry 3", entries.get(1), log.get(3));
+
+ expectAndVerifyAppendEntriesReply(2, true, context.getId(), 2, 3);
}
@Test
public void testHandleAppendEntriesPreviousLogEntryMissing(){
- new JavaTestKit(getSystem()) {{
+ logStart("testHandleAppendEntriesPreviousLogEntryMissing");
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
+ MockRaftActorContext context = createActorContext();
- // Prepare the receivers log
- MockRaftActorContext.SimpleReplicatedLog log =
- new MockRaftActorContext.SimpleReplicatedLog();
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 0, new MockRaftActorContext.MockPayload("zero")));
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 1, new MockRaftActorContext.MockPayload("one")));
- log.append(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 2, new MockRaftActorContext.MockPayload("two")));
+ // Prepare the receivers log
+ MockRaftActorContext.SimpleReplicatedLog log = new MockRaftActorContext.SimpleReplicatedLog();
+ log.append(newReplicatedLogEntry(1, 0, "zero"));
+ log.append(newReplicatedLogEntry(1, 1, "one"));
+ log.append(newReplicatedLogEntry(1, 2, "two"));
- context.setReplicatedLog(log);
+ context.setReplicatedLog(log);
- // Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("two-1")));
+ // Prepare the entries to be sent with AppendEntries
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ entries.add(newReplicatedLogEntry(1, 4, "four"));
- AppendEntries appendEntries =
- new AppendEntries(1, "leader-1", 3, 1, entries, 4);
+ AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, -1);
- RaftActorBehavior behavior = createBehavior(context);
+ follower = createBehavior(context);
- // Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
+ Assert.assertSame(follower, newBehavior);
- assertEquals(expected, raftBehavior);
+ expectAndVerifyAppendEntriesReply(1, false, context.getId(), 1, 2);
+ }
- // Also expect an AppendEntriesReply to be sent where success is false
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
- "AppendEntriesReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
+ @Test
+ public void testHandleAppendEntriesWithExistingLogEntry() {
+ logStart("testHandleAppendEntriesWithExistingLogEntry");
- assertEquals(false, out);
+ MockRaftActorContext context = createActorContext();
- }};
+ context.getTermInformation().update(1, "test");
- }
+ // Prepare the receivers log
+ MockRaftActorContext.SimpleReplicatedLog log = new MockRaftActorContext.SimpleReplicatedLog();
+ log.append(newReplicatedLogEntry(1, 0, "zero"));
+ log.append(newReplicatedLogEntry(1, 1, "one"));
- @Test
- public void testHandleAppendAfterInstallingSnapshot(){
- new JavaTestKit(getSystem()) {{
+ context.setReplicatedLog(log);
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext();
+ // Send the last entry again.
+ List<ReplicatedLogEntry> entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"));
+ follower = createBehavior(context);
- // Prepare the receivers log
- MockRaftActorContext.SimpleReplicatedLog log =
- new MockRaftActorContext.SimpleReplicatedLog();
+ follower.handleMessage(leaderActor, new AppendEntries(1, "leader", 0, 1, entries, 1, -1));
- // Set up a log as if it has been snapshotted
- log.setSnapshotIndex(3);
- log.setSnapshotTerm(1);
+ assertEquals("Next index", 2, log.last().getIndex() + 1);
+ assertEquals("Entry 1", entries.get(0), log.get(1));
- context.setReplicatedLog(log);
+ expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 1);
- // Prepare the entries to be sent with AppendEntries
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- entries.add(
- new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("two-1")));
+ // Send the last entry again and also a new one.
- AppendEntries appendEntries =
- new AppendEntries(1, "leader-1", 3, 1, entries, 4);
+ entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
- RaftActorBehavior behavior = createBehavior(context);
+ leaderActor.underlyingActor().clear();
+ follower.handleMessage(leaderActor, new AppendEntries(1, "leader", 0, 1, entries, 2, -1));
- // Send an unknown message so that the state of the RaftActor remains unchanged
- RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
+ assertEquals("Next index", 3, log.last().getIndex() + 1);
+ assertEquals("Entry 1", entries.get(0), log.get(1));
+ assertEquals("Entry 2", entries.get(1), log.get(2));
- RaftActorBehavior raftBehavior =
- behavior.handleMessage(getRef(), appendEntries);
+ expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 2);
+ }
- assertEquals(expected, raftBehavior);
+ @Test
+ public void testHandleAppendAfterInstallingSnapshot(){
+ logStart("testHandleAppendAfterInstallingSnapshot");
- // Also expect an AppendEntriesReply to be sent where success is false
- final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
- "AppendEntriesReply") {
- // do not put code outside this method, will run afterwards
- protected Boolean match(Object in) {
- if (in instanceof AppendEntriesReply) {
- AppendEntriesReply reply = (AppendEntriesReply) in;
- return reply.isSuccess();
- } else {
- throw noMatch();
- }
- }
- }.get();
+ MockRaftActorContext context = createActorContext();
- assertEquals(true, out);
+ // Prepare the receivers log
+ MockRaftActorContext.SimpleReplicatedLog log = new MockRaftActorContext.SimpleReplicatedLog();
- }};
+ // Set up a log as if it has been snapshotted
+ log.setSnapshotIndex(3);
+ log.setSnapshotTerm(1);
+ context.setReplicatedLog(log);
+
+ // Prepare the entries to be sent with AppendEntries
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ entries.add(newReplicatedLogEntry(1, 4, "four"));
+
+ AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, 3);
+
+ follower = createBehavior(context);
+
+ RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
+
+ Assert.assertSame(follower, newBehavior);
+
+ expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 4);
}
*/
@Test
public void testHandleInstallSnapshot() throws Exception {
- JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {{
-
- ActorRef leaderActor = getSystem().actorOf(Props.create(
- MessageCollectorActor.class));
-
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext(getRef());
-
- Follower follower = (Follower)createBehavior(context);
-
- HashMap<String, String> followerSnapshot = new HashMap<>();
- followerSnapshot.put("1", "A");
- followerSnapshot.put("2", "B");
- followerSnapshot.put("3", "C");
-
- ByteString bsSnapshot = toByteString(followerSnapshot);
- ByteString chunkData = ByteString.EMPTY;
- int offset = 0;
- int snapshotLength = bsSnapshot.size();
- int i = 1;
- int chunkIndex = 1;
-
- do {
- chunkData = getNextChunk(bsSnapshot, offset);
- final InstallSnapshot installSnapshot =
- new InstallSnapshot(1, "leader-1", i, 1,
- chunkData, chunkIndex, 3);
- follower.handleMessage(leaderActor, installSnapshot);
- offset = offset + 50;
- i++;
- chunkIndex++;
- } while ((offset+50) < snapshotLength);
-
- final InstallSnapshot installSnapshot3 = new InstallSnapshot(1, "leader-1", 3, 1, chunkData, chunkIndex, 3);
- follower.handleMessage(leaderActor, installSnapshot3);
-
- String[] matches = new ReceiveWhile<String>(String.class, duration("2 seconds")) {
- @Override
- protected String match(Object o) throws Exception {
- if (o instanceof ApplySnapshot) {
- ApplySnapshot as = (ApplySnapshot)o;
- if (as.getSnapshot().getLastIndex() != installSnapshot3.getLastIncludedIndex()) {
- return "applySnapshot-lastIndex-mismatch";
- }
- if (as.getSnapshot().getLastAppliedTerm() != installSnapshot3.getLastIncludedTerm()) {
- return "applySnapshot-lastAppliedTerm-mismatch";
- }
- if (as.getSnapshot().getLastAppliedIndex() != installSnapshot3.getLastIncludedIndex()) {
- return "applySnapshot-lastAppliedIndex-mismatch";
- }
- if (as.getSnapshot().getLastTerm() != installSnapshot3.getLastIncludedTerm()) {
- return "applySnapshot-lastTerm-mismatch";
- }
- return "applySnapshot";
- }
-
- return "ignoreCase";
- }
- }.get();
-
- // Verify that after a snapshot is successfully applied the collected snapshot chunks is reset to empty
- assertEquals(ByteString.EMPTY, follower.getSnapshotChunksCollected());
-
- String applySnapshotMatch = "";
- for (String reply: matches) {
- if (reply.startsWith("applySnapshot")) {
- applySnapshotMatch = reply;
- }
- }
-
- assertEquals("applySnapshot", applySnapshotMatch);
-
- Object messages = executeLocalOperation(leaderActor, "get-all-messages");
-
- assertNotNull(messages);
- assertTrue(messages instanceof List);
- List<Object> listMessages = (List<Object>) messages;
-
- int installSnapshotReplyReceivedCount = 0;
- for (Object message: listMessages) {
- if (message instanceof InstallSnapshotReply) {
- ++installSnapshotReplyReceivedCount;
- }
- }
+ logStart("testHandleInstallSnapshot");
+
+ MockRaftActorContext context = createActorContext();
+
+ follower = createBehavior(context);
+
+ HashMap<String, String> followerSnapshot = new HashMap<>();
+ followerSnapshot.put("1", "A");
+ followerSnapshot.put("2", "B");
+ followerSnapshot.put("3", "C");
+
+ ByteString bsSnapshot = toByteString(followerSnapshot);
+ int offset = 0;
+ int snapshotLength = bsSnapshot.size();
+ int chunkSize = 50;
+ int totalChunks = (snapshotLength / chunkSize) + ((snapshotLength % chunkSize) > 0 ? 1 : 0);
+ int lastIncludedIndex = 1;
+ int chunkIndex = 1;
+ InstallSnapshot lastInstallSnapshot = null;
+
+ for(int i = 0; i < totalChunks; i++) {
+ ByteString chunkData = getNextChunk(bsSnapshot, offset, chunkSize);
+ lastInstallSnapshot = new InstallSnapshot(1, "leader", lastIncludedIndex, 1,
+ chunkData, chunkIndex, totalChunks);
+ follower.handleMessage(leaderActor, lastInstallSnapshot);
+ offset = offset + 50;
+ lastIncludedIndex++;
+ chunkIndex++;
+ }
- assertEquals(3, installSnapshotReplyReceivedCount);
+ ApplySnapshot applySnapshot = MessageCollectorActor.expectFirstMatching(followerActor,
+ ApplySnapshot.class);
+ Snapshot snapshot = applySnapshot.getSnapshot();
+ assertEquals("getLastIndex", lastInstallSnapshot.getLastIncludedIndex(), snapshot.getLastIndex());
+ assertEquals("getLastIncludedTerm", lastInstallSnapshot.getLastIncludedTerm(),
+ snapshot.getLastAppliedTerm());
+ assertEquals("getLastAppliedIndex", lastInstallSnapshot.getLastIncludedIndex(),
+ snapshot.getLastAppliedIndex());
+ assertEquals("getLastTerm", lastInstallSnapshot.getLastIncludedTerm(), snapshot.getLastTerm());
+ Assert.assertArrayEquals("getState", bsSnapshot.toByteArray(), snapshot.getState());
+
+ List<InstallSnapshotReply> replies = MessageCollectorActor.getAllMatching(
+ leaderActor, InstallSnapshotReply.class);
+ assertEquals("InstallSnapshotReply count", totalChunks, replies.size());
+
+ chunkIndex = 1;
+ for(InstallSnapshotReply reply: replies) {
+ assertEquals("getChunkIndex", chunkIndex++, reply.getChunkIndex());
+ assertEquals("getTerm", 1, reply.getTerm());
+ assertEquals("isSuccess", true, reply.isSuccess());
+ assertEquals("getFollowerId", context.getId(), reply.getFollowerId());
+ }
- }};
+ Assert.assertNull("Expected null SnapshotTracker", ((Follower)follower).getSnapshotTracker());
}
@Test
public void testHandleOutOfSequenceInstallSnapshot() throws Exception {
- JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {
- {
-
- ActorRef leaderActor = getSystem().actorOf(Props.create(
- MessageCollectorActor.class));
-
- MockRaftActorContext context = (MockRaftActorContext)
- createActorContext(getRef());
+ logStart("testHandleOutOfSequenceInstallSnapshot");
- Follower follower = (Follower) createBehavior(context);
+ MockRaftActorContext context = createActorContext();
- HashMap<String, String> followerSnapshot = new HashMap<>();
- followerSnapshot.put("1", "A");
- followerSnapshot.put("2", "B");
- followerSnapshot.put("3", "C");
+ follower = createBehavior(context);
- ByteString bsSnapshot = toByteString(followerSnapshot);
+ HashMap<String, String> followerSnapshot = new HashMap<>();
+ followerSnapshot.put("1", "A");
+ followerSnapshot.put("2", "B");
+ followerSnapshot.put("3", "C");
- final InstallSnapshot installSnapshot = new InstallSnapshot(1, "leader-1", 3, 1, getNextChunk(bsSnapshot, 10), 3, 3);
- follower.handleMessage(leaderActor, installSnapshot);
+ ByteString bsSnapshot = toByteString(followerSnapshot);
- Object messages = executeLocalOperation(leaderActor, "get-all-messages");
+ InstallSnapshot installSnapshot = new InstallSnapshot(1, "leader", 3, 1,
+ getNextChunk(bsSnapshot, 10, 50), 3, 3);
+ follower.handleMessage(leaderActor, installSnapshot);
- assertNotNull(messages);
- assertTrue(messages instanceof List);
- List<Object> listMessages = (List<Object>) messages;
+ InstallSnapshotReply reply = MessageCollectorActor.expectFirstMatching(leaderActor,
+ InstallSnapshotReply.class);
- int installSnapshotReplyReceivedCount = 0;
- for (Object message: listMessages) {
- if (message instanceof InstallSnapshotReply) {
- ++installSnapshotReplyReceivedCount;
- }
- }
+ assertEquals("isSuccess", false, reply.isSuccess());
+ assertEquals("getChunkIndex", -1, reply.getChunkIndex());
+ assertEquals("getTerm", 1, reply.getTerm());
+ assertEquals("getFollowerId", context.getId(), reply.getFollowerId());
- assertEquals(1, installSnapshotReplyReceivedCount);
- InstallSnapshotReply reply = (InstallSnapshotReply) listMessages.get(0);
- assertEquals(false, reply.isSuccess());
- assertEquals(-1, reply.getChunkIndex());
- assertEquals(ByteString.EMPTY, follower.getSnapshotChunksCollected());
-
-
- }};
+ Assert.assertNull("Expected null SnapshotTracker", ((Follower)follower).getSnapshotTracker());
}
- public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
- return MessageCollectorActor.getAllMessages(actor);
- }
-
- public ByteString getNextChunk (ByteString bs, int offset){
+ public ByteString getNextChunk (ByteString bs, int offset, int chunkSize){
int snapshotLength = bs.size();
int start = offset;
- int size = 50;
- if (50 > snapshotLength) {
+ int size = chunkSize;
+ if (chunkSize > snapshotLength) {
size = snapshotLength;
} else {
- if ((start + 50) > snapshotLength) {
+ if ((start + chunkSize) > snapshotLength) {
size = snapshotLength - start;
}
}
return bs.substring(start, start + size);
}
- private ByteString toByteString(Map<String, String> state) {
- ByteArrayOutputStream b = null;
- ObjectOutputStream o = null;
- try {
- try {
- b = new ByteArrayOutputStream();
- o = new ObjectOutputStream(b);
- o.writeObject(state);
- byte[] snapshotBytes = b.toByteArray();
- return ByteString.copyFrom(snapshotBytes);
- } finally {
- if (o != null) {
- o.flush();
- o.close();
- }
- if (b != null) {
- b.close();
- }
- }
- } catch (IOException e) {
- org.junit.Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
- }
- return null;
+ private void expectAndVerifyAppendEntriesReply(int expTerm, boolean expSuccess,
+ String expFollowerId, long expLogLastTerm, long expLogLastIndex) {
+
+ AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor,
+ AppendEntriesReply.class);
+
+ assertEquals("isSuccess", expSuccess, reply.isSuccess());
+ assertEquals("getTerm", expTerm, reply.getTerm());
+ assertEquals("getFollowerId", expFollowerId, reply.getFollowerId());
+ assertEquals("getLogLastTerm", expLogLastTerm, reply.getLogLastTerm());
+ assertEquals("getLogLastIndex", expLogLastIndex, reply.getLogLastIndex());
+ }
+
+ private ReplicatedLogEntry newReplicatedLogEntry(long term, long index, String data) {
+ return new MockRaftActorContext.MockReplicatedLogEntry(term, index,
+ new MockRaftActorContext.MockPayload(data));
+ }
+
+ @Override
+ protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(RaftActorContext actorContext,
+ ActorRef actorRef, RaftRPC rpc) throws Exception {
+ super.assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, actorRef, rpc);
+
+ String expVotedFor = RequestVote.class.isInstance(rpc) ? ((RequestVote)rpc).getCandidateId() : null;
+ assertEquals("New votedFor", expVotedFor, actorContext.getTermInformation().getVotedFor());
+ }
+
+ @Override
+ protected void handleAppendEntriesAddSameEntryToLogReply(TestActorRef<MessageCollectorActor> replyActor)
+ throws Exception {
+ AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(replyActor, AppendEntriesReply.class);
+ assertEquals("isSuccess", true, reply.isSuccess());
}
}
*/
package org.opendaylight.controller.cluster.raft.behaviors;
+import static org.junit.Assert.assertEquals;
import akka.actor.ActorRef;
import akka.actor.Props;
-import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
import java.util.HashMap;
import java.util.Map;
+import org.junit.After;
import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+
+public class IsolatedLeaderTest extends AbstractLeaderTest {
+
+ private final TestActorRef<MessageCollectorActor> leaderActor = actorFactory.createTestActor(
+ Props.create(MessageCollectorActor.class), actorFactory.generateActorId("leader"));
-public class IsolatedLeaderTest extends AbstractRaftActorBehaviorTest {
+ private final TestActorRef<MessageCollectorActor> senderActor = actorFactory.createTestActor(
+ Props.create(MessageCollectorActor.class), actorFactory.generateActorId("sender"));
- private ActorRef leaderActor =
- getSystem().actorOf(Props.create(DoNothingActor.class));
+ private AbstractLeader isolatedLeader;
- private ActorRef senderActor =
- getSystem().actorOf(Props.create(DoNothingActor.class));
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ if(isolatedLeader != null) {
+ isolatedLeader.close();
+ }
+
+ super.tearDown();
+ }
@Override
- protected RaftActorBehavior createBehavior(
- RaftActorContext actorContext) {
- return new Leader(actorContext);
+ protected RaftActorBehavior createBehavior(RaftActorContext actorContext) {
+ return new IsolatedLeader(actorContext);
}
@Override
- protected RaftActorContext createActorContext() {
+ protected MockRaftActorContext createActorContext() {
return createActorContext(leaderActor);
}
+ @Override
+ protected MockRaftActorContext createActorContext(ActorRef actor) {
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setElectionTimeoutFactor(100000);
+ MockRaftActorContext context = new MockRaftActorContext("isolated-leader", getSystem(), actor);
+ context.setConfigParams(configParams);
+ return context;
+ }
@Test
- public void testHandleMessageWithThreeMembers() {
- new JavaTestKit(getSystem()) {{
- String followerAddress1 = "akka://test/user/$a";
- String followerAddress2 = "akka://test/user/$b";
-
- MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put("follower-1", followerAddress1);
- peerAddresses.put("follower-2", followerAddress2);
- leaderActorContext.setPeerAddresses(peerAddresses);
-
- IsolatedLeader isolatedLeader = new IsolatedLeader(leaderActorContext);
- assertTrue(isolatedLeader.state() == RaftState.IsolatedLeader);
-
- // in a 3 node cluster, even if 1 follower is returns a reply, the isolatedLeader is not isolated
- RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
+ public void testHandleMessageWithThreeMembers() throws Exception {
+ String followerAddress1 = "akka://test/user/$a";
+ String followerAddress2 = "akka://test/user/$b";
+
+ MockRaftActorContext leaderActorContext = createActorContext();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerAddress1);
+ peerAddresses.put("follower-2", followerAddress2);
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ isolatedLeader = new IsolatedLeader(leaderActorContext);
+ assertEquals("Raft state", RaftState.IsolatedLeader, isolatedLeader.state());
+
+ // in a 3 node cluster, even if 1 follower is returns a reply, the isolatedLeader is not isolated
+ RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
new AppendEntriesReply("follower-1", isolatedLeader.lastTerm() - 1, true,
- isolatedLeader.lastIndex() - 1, isolatedLeader.lastTerm() - 1));
+ isolatedLeader.lastIndex() - 1, isolatedLeader.lastTerm() - 1));
+
+ assertEquals("Raft state", RaftState.Leader, behavior.state());
- assertEquals(RaftState.Leader, behavior.state());
+ isolatedLeader.close();
+ isolatedLeader = (AbstractLeader) behavior;
- behavior = isolatedLeader.handleMessage(senderActor,
+ behavior = isolatedLeader.handleMessage(senderActor,
new AppendEntriesReply("follower-2", isolatedLeader.lastTerm() - 1, true,
- isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
- assertEquals(RaftState.Leader, behavior.state());
- }};
+ assertEquals("Raft state", RaftState.Leader, behavior.state());
}
@Test
- public void testHandleMessageWithFiveMembers() {
- new JavaTestKit(getSystem()) {{
-
- String followerAddress1 = "akka://test/user/$a";
- String followerAddress2 = "akka://test/user/$b";
- String followerAddress3 = "akka://test/user/$c";
- String followerAddress4 = "akka://test/user/$d";
-
- MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put("follower-1", followerAddress1);
- peerAddresses.put("follower-2", followerAddress2);
- peerAddresses.put("follower-3", followerAddress3);
- peerAddresses.put("follower-4", followerAddress4);
- leaderActorContext.setPeerAddresses(peerAddresses);
-
- IsolatedLeader isolatedLeader = new IsolatedLeader(leaderActorContext);
- assertEquals(RaftState.IsolatedLeader, isolatedLeader.state());
-
- // in a 5 member cluster, atleast 2 followers need to be active and return a reply
- RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
+ public void testHandleMessageWithFiveMembers() throws Exception {
+ String followerAddress1 = "akka://test/user/$a";
+ String followerAddress2 = "akka://test/user/$b";
+ String followerAddress3 = "akka://test/user/$c";
+ String followerAddress4 = "akka://test/user/$d";
+
+ MockRaftActorContext leaderActorContext = createActorContext();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerAddress1);
+ peerAddresses.put("follower-2", followerAddress2);
+ peerAddresses.put("follower-3", followerAddress3);
+ peerAddresses.put("follower-4", followerAddress4);
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ isolatedLeader = new IsolatedLeader(leaderActorContext);
+ assertEquals("Raft state", RaftState.IsolatedLeader, isolatedLeader.state());
+
+ // in a 5 member cluster, atleast 2 followers need to be active and return a reply
+ RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
new AppendEntriesReply("follower-1", isolatedLeader.lastTerm() - 1, true,
- isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
- assertEquals(RaftState.IsolatedLeader, behavior.state());
+ assertEquals("Raft state", RaftState.IsolatedLeader, behavior.state());
- behavior = isolatedLeader.handleMessage(senderActor,
+ behavior = isolatedLeader.handleMessage(senderActor,
new AppendEntriesReply("follower-2", isolatedLeader.lastTerm() - 1, true,
- isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+
+ assertEquals("Raft state", RaftState.Leader, behavior.state());
- assertEquals(RaftState.Leader, behavior.state());
+ isolatedLeader.close();
+ isolatedLeader = (AbstractLeader) behavior;
- behavior = isolatedLeader.handleMessage(senderActor,
+ behavior = isolatedLeader.handleMessage(senderActor,
new AppendEntriesReply("follower-3", isolatedLeader.lastTerm() - 1, true,
- isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
- assertEquals(RaftState.Leader, behavior.state());
- }};
+ assertEquals("Raft state", RaftState.Leader, behavior.state());
}
@Test
- public void testHandleMessageFromAnotherLeader() {
- new JavaTestKit(getSystem()) {{
- String followerAddress1 = "akka://test/user/$a";
- String followerAddress2 = "akka://test/user/$b";
-
- MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put("follower-1", followerAddress1);
- peerAddresses.put("follower-2", followerAddress2);
- leaderActorContext.setPeerAddresses(peerAddresses);
-
- IsolatedLeader isolatedLeader = new IsolatedLeader(leaderActorContext);
- assertTrue(isolatedLeader.state() == RaftState.IsolatedLeader);
-
- // if an append-entries reply is received by the isolated-leader, and that reply
- // has a term > than its own term, then IsolatedLeader switches to Follower
- // bowing itself to another leader in the cluster
- RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
+ public void testHandleMessageFromAnotherLeader() throws Exception {
+ String followerAddress1 = "akka://test/user/$a";
+ String followerAddress2 = "akka://test/user/$b";
+
+ MockRaftActorContext leaderActorContext = createActorContext();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerAddress1);
+ peerAddresses.put("follower-2", followerAddress2);
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ isolatedLeader = new IsolatedLeader(leaderActorContext);
+ assertEquals("Raft state", RaftState.IsolatedLeader, isolatedLeader.state());
+
+ // if an append-entries reply is received by the isolated-leader, and that reply
+ // has a term > than its own term, then IsolatedLeader switches to Follower
+ // bowing itself to another leader in the cluster
+ RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
new AppendEntriesReply("follower-1", isolatedLeader.lastTerm() + 1, true,
- isolatedLeader.lastIndex() + 1, isolatedLeader.lastTerm() + 1));
+ isolatedLeader.lastIndex() + 1, isolatedLeader.lastTerm() + 1));
- assertEquals(RaftState.Follower, behavior.state());
- }};
+ assertEquals("Raft state", RaftState.Follower, behavior.state());
+ behavior.close();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.dispatch.Dispatchers;
+import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import org.junit.After;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.SimpleReplicatedLog;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.impl.SimpleLogger;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * Tests various leader election scenarios.
+ *
+ * @author Thomas Pantelis
+ */
+public class LeaderElectionScenariosTest {
+
+ private static final int HEARTBEAT_INTERVAL = 50;
+
+ public static class MemberActor extends MessageCollectorActor {
+
+ volatile RaftActorBehavior behavior;
+ Map<Class<?>, CountDownLatch> messagesReceivedLatches = new ConcurrentHashMap<>();
+ Map<Class<?>, Boolean> dropMessagesToBehavior = new ConcurrentHashMap<>();
+ CountDownLatch behaviorStateChangeLatch;
+
+ public static Props props() {
+ return Props.create(MemberActor.class).withDispatcher(Dispatchers.DefaultDispatcherId());
+ }
+
+ @Override
+ public void onReceive(Object message) throws Exception {
+ // Ignore scheduled SendHeartBeat messages.
+ if(message instanceof SendHeartBeat) {
+ return;
+ }
+
+ try {
+ if(behavior != null && !dropMessagesToBehavior.containsKey(message.getClass())) {
+ RaftActorBehavior oldBehavior = behavior;
+ behavior = behavior.handleMessage(getSender(), message);
+ if(behavior != oldBehavior && behaviorStateChangeLatch != null) {
+ behaviorStateChangeLatch.countDown();
+ }
+ }
+ } finally {
+ super.onReceive(message);
+
+ CountDownLatch latch = messagesReceivedLatches.get(message.getClass());
+ if(latch != null) {
+ latch.countDown();
+ }
+ }
+ }
+
+ void expectBehaviorStateChange() {
+ behaviorStateChangeLatch = new CountDownLatch(1);
+ }
+
+ void waitForBehaviorStateChange() {
+ assertTrue("Expected behavior state change",
+ Uninterruptibles.awaitUninterruptibly(behaviorStateChangeLatch, 5, TimeUnit.SECONDS));
+ }
+
+ void expectMessageClass(Class<?> expClass, int expCount) {
+ messagesReceivedLatches.put(expClass, new CountDownLatch(expCount));
+ }
+
+ void waitForExpectedMessages(Class<?> expClass) {
+ CountDownLatch latch = messagesReceivedLatches.get(expClass);
+ assertNotNull("No messages received for " + expClass, latch);
+ assertTrue("Missing messages of type " + expClass,
+ Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+ }
+
+ void dropMessagesToBehavior(Class<?> msgClass) {
+ dropMessagesToBehavior(msgClass, 1);
+ }
+
+ void dropMessagesToBehavior(Class<?> msgClass, int expCount) {
+ expectMessageClass(msgClass, expCount);
+ dropMessagesToBehavior.put(msgClass, Boolean.TRUE);
+ }
+
+ void clearDropMessagesToBehavior() {
+ dropMessagesToBehavior.clear();
+ }
+
+ @Override
+ public void clear() {
+ behaviorStateChangeLatch = null;
+ clearDropMessagesToBehavior();
+ messagesReceivedLatches.clear();
+ super.clear();
+ }
+
+ void forwardCapturedMessageToBehavior(Class<?> msgClass, ActorRef sender) throws Exception {
+ Object message = getFirstMatching(getSelf(), msgClass);
+ assertNotNull("Message of type " + msgClass + " not received", message);
+ getSelf().tell(message, sender);
+ }
+
+ void forwardCapturedMessagesToBehavior(Class<?> msgClass, ActorRef sender) throws Exception {
+ for(Object m: getAllMatching(getSelf(), msgClass)) {
+ getSelf().tell(m, sender);
+ }
+ }
+
+ <T> T getCapturedMessage(Class<T> msgClass) throws Exception {
+ Object message = getFirstMatching(getSelf(), msgClass);
+ assertNotNull("Message of type " + msgClass + " not received", message);
+ return (T) message;
+ }
+ }
+
+ static {
+ System.setProperty(SimpleLogger.LOG_KEY_PREFIX + MockRaftActorContext.class.getName(), "trace");
+ }
+
+ private final Logger testLog = LoggerFactory.getLogger(MockRaftActorContext.class);
+ private final ActorSystem system = ActorSystem.create("test");
+
+ @After
+ public void tearDown() {
+ JavaTestKit.shutdownActorSystem(system);
+ }
+
+ private DefaultConfigParamsImpl newConfigParams() {
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS));
+ configParams.setElectionTimeoutFactor(100000);
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+ return configParams;
+ }
+
+ private MockRaftActorContext newRaftActorContext(String id, ActorRef actor,
+ Map<String, String> peerAddresses) {
+ MockRaftActorContext context = new MockRaftActorContext(id, system, actor);
+ context.setPeerAddresses(peerAddresses);
+ context.getTermInformation().updateAndPersist(1, "");
+ return context;
+ }
+
+ private void verifyBehaviorState(String name, TestActorRef<MemberActor> actor, RaftState expState) {
+ assertEquals(name + " behavior state", expState, actor.underlyingActor().behavior.state());
+ }
+
+ private void initializeLeaderBehavior(TestActorRef<MemberActor> actor, RaftActorContext context,
+ int numActiveFollowers) throws Exception {
+ // Leader sends immediate heartbeats - we don't care about it so ignore it.
+
+ actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, numActiveFollowers);
+ Leader leader = new Leader(context);
+ actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
+ actor.underlyingActor().behavior = leader;
+
+ actor.underlyingActor().forwardCapturedMessagesToBehavior(AppendEntriesReply.class, ActorRef.noSender());
+ actor.underlyingActor().clear();
+ }
+
+ private TestActorRef<MemberActor> newMemberActor(String name) throws Exception {
+ TestActorRef<MemberActor> actor = TestActorRef.create(system, MemberActor.props(), name);
+ MessageCollectorActor.waitUntilReady(actor);
+ return actor;
+ }
+
+ private void sendHeartbeat(TestActorRef<MemberActor> leaderActor) {
+ Uninterruptibles.sleepUninterruptibly(HEARTBEAT_INTERVAL, TimeUnit.MILLISECONDS);
+ leaderActor.underlyingActor().behavior.handleMessage(leaderActor, new SendHeartBeat());
+ }
+
+ @Test
+ public void testDelayedMessagesScenario() throws Exception {
+ testLog.info("Starting testDelayedMessagesScenario");
+
+ TestActorRef<MemberActor> member1Actor = newMemberActor("member1");
+ TestActorRef<MemberActor> member2Actor = newMemberActor("member2");
+ TestActorRef<MemberActor> member3Actor = newMemberActor("member3");
+
+ // Create member 2's behavior initially as Follower
+
+ MockRaftActorContext member2Context = newRaftActorContext("member2", member2Actor,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1Actor.path().toString()).
+ put("member3", member3Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
+ member2Context.setConfigParams(member2ConfigParams);
+
+ Follower member2Behavior = new Follower(member2Context);
+ member2Actor.underlyingActor().behavior = member2Behavior;
+
+ // Create member 3's behavior initially as Follower
+
+ MockRaftActorContext member3Context = newRaftActorContext("member3", member3Actor,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1Actor.path().toString()).
+ put("member2", member2Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
+ member3Context.setConfigParams(member3ConfigParams);
+
+ Follower member3Behavior = new Follower(member3Context);
+ member3Actor.underlyingActor().behavior = member3Behavior;
+
+ // Create member 1's behavior initially as Leader
+
+ MockRaftActorContext member1Context = newRaftActorContext("member1", member1Actor,
+ ImmutableMap.<String,String>builder().
+ put("member2", member2Actor.path().toString()).
+ put("member3", member3Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
+ member1Context.setConfigParams(member1ConfigParams);
+
+ initializeLeaderBehavior(member1Actor, member1Context, 2);
+
+ member2Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().clear();
+
+ // Send ElectionTimeout to member 2 to simulate missing heartbeat from the Leader. member 2
+ // should switch to Candidate and send out RequestVote messages. Set member 1 and 3 actors
+ // to capture RequestVote but not to forward to the behavior just yet as we want to
+ // control the order of RequestVote messages to member 1 and 3.
+
+ member1Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
+
+ member2Actor.underlyingActor().expectBehaviorStateChange();
+
+ member3Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
+
+ member2Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ member2Actor.underlyingActor().waitForBehaviorStateChange();
+ verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
+
+ assertEquals("member 1 election term", 1, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 1, member3Context.getTermInformation().getCurrentTerm());
+
+ // At this point member 1 and 3 actors have captured the RequestVote messages. First
+ // forward the RequestVote message to member 1's behavior. Since the RequestVote term
+ // is greater than member 1's term, member 1 should switch to Follower without replying
+ // to RequestVote and update its term to 2.
+
+ member1Actor.underlyingActor().clearDropMessagesToBehavior();
+ member1Actor.underlyingActor().expectBehaviorStateChange();
+ member1Actor.underlyingActor().forwardCapturedMessageToBehavior(RequestVote.class, member2Actor);
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ member1Actor.underlyingActor().waitForBehaviorStateChange();
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+
+ // Now forward member 3's captured RequestVote message to its behavior. Since member 3 is
+ // already a Follower, it should update its term to 2 and send a RequestVoteReply back to
+ // member 2 granting the vote b/c the RequestVote's term, lastLogTerm, and lastLogIndex
+ // should satisfy the criteria for granting the vote. However, we'll delay sending the
+ // RequestVoteReply to member 2's behavior to simulate network latency.
+
+ member2Actor.underlyingActor().dropMessagesToBehavior(RequestVoteReply.class);
+
+ member3Actor.underlyingActor().clearDropMessagesToBehavior();
+ member3Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+ member3Actor.underlyingActor().forwardCapturedMessageToBehavior(RequestVote.class, member2Actor);
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
+
+ // Send ElectionTimeout to member 3 to simulate missing heartbeat from a Leader. member 3
+ // should switch to Candidate and send out RequestVote messages. member 1 should grant the
+ // vote and send a reply. After receiving the RequestVoteReply, member 3 should switch to leader.
+
+ member2Actor.underlyingActor().expectBehaviorStateChange();
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
+ member3Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 2);
+
+ member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member3Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ // member 2 should've switched to Follower as member 3's RequestVote term (3) was greater
+ // than member 2's term (2).
+
+ member2Actor.underlyingActor().waitForBehaviorStateChange();
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+
+ // The switch to leader should cause an immediate AppendEntries heartbeat from member 3.
+
+ member3Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ // Now forward the original delayed RequestVoteReply from member 3 to member 2 that granted
+ // the vote. Since member 2 is now a Follower, the RequestVoteReply should be ignored.
+
+ member2Actor.underlyingActor().clearDropMessagesToBehavior();
+ member2Actor.underlyingActor().forwardCapturedMessageToBehavior(RequestVoteReply.class, member3Actor);
+
+ member2Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("testDelayedMessagesScenario done");
+ }
+
+ @Test
+ public void testPartitionedLeadersScenario() throws Exception {
+ testLog.info("Starting testPartitionedLeadersScenario");
+
+ TestActorRef<MemberActor> member1Actor = newMemberActor("member1");
+ TestActorRef<MemberActor> member2Actor = newMemberActor("member2");
+ TestActorRef<MemberActor> member3Actor = newMemberActor("member3");
+
+ // Create member 2's behavior initially as Follower
+
+ MockRaftActorContext member2Context = newRaftActorContext("member2", member2Actor,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1Actor.path().toString()).
+ put("member3", member3Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
+ member2Context.setConfigParams(member2ConfigParams);
+
+ Follower member2Behavior = new Follower(member2Context);
+ member2Actor.underlyingActor().behavior = member2Behavior;
+
+ // Create member 3's behavior initially as Follower
+
+ MockRaftActorContext member3Context = newRaftActorContext("member3", member3Actor,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1Actor.path().toString()).
+ put("member2", member2Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
+ member3Context.setConfigParams(member3ConfigParams);
+
+ Follower member3Behavior = new Follower(member3Context);
+ member3Actor.underlyingActor().behavior = member3Behavior;
+
+ // Create member 1's behavior initially as Leader
+
+ MockRaftActorContext member1Context = newRaftActorContext("member1", member1Actor,
+ ImmutableMap.<String,String>builder().
+ put("member2", member2Actor.path().toString()).
+ put("member3", member3Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
+ member1Context.setConfigParams(member1ConfigParams);
+
+ initializeLeaderBehavior(member1Actor, member1Context, 2);
+
+ member2Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().clear();
+
+ // Send ElectionTimeout to member 2 to simulate no heartbeat from the Leader (member 1).
+ // member 2 should switch to Candidate, start new term 2 and send out RequestVote messages.
+ // member 1 will switch to Follower b/c its term is less than the RequestVote term, also it
+ // won't send back a reply. member 3 will drop the message (ie won't forward it to its behavior) to
+ // simulate loss of network connectivity between member 2 and 3.
+
+ member1Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+
+ member2Actor.underlyingActor().expectBehaviorStateChange();
+
+ member3Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
+
+ member2Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ // member 1 should switch to Follower as the RequestVote term is greater than its term. It
+ // won't send back a RequestVoteReply in this case.
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+
+ // member 2 should switch to Candidate since member 1 didn't reply.
+
+ member2Actor.underlyingActor().waitForBehaviorStateChange();
+ verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
+
+ assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 1, member3Context.getTermInformation().getCurrentTerm());
+
+ // Send ElectionTimeout to member 3 to simulate no heartbeat from the Leader (member 1).
+ // member 2 should switch to Candidate and send out RequestVote messages. member 1 will reply and
+ // grant the vote but member 2 will drop the message to simulate loss of network connectivity.
+
+ member1Actor.underlyingActor().clear();
+ member1Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+ member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.underlyingActor().clear();
+ member2Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
+ member2Actor.underlyingActor().dropMessagesToBehavior(AppendEntries.class);
+
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
+ member3Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
+
+ member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member3Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ // when member 3 switches to Leader it will immediately send out heartbeat AppendEntries to
+ // the followers. Wait for AppendEntries to member 1 and its AppendEntriesReply. The
+ // AppendEntries message to member 2 is dropped.
+
+ member1Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
+ member2Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Candidate);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ assertEquals("member 1 election term", 2, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 2, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
+
+ // member 2 is partitioned from the Leader (member 3) and hasn't received any messages. It
+ // would get another ElectionTimeout so simulate that. member 1 should send back a reply
+ // granting the vote. Messages (RequestVote and AppendEntries) from member 2 to member 3
+ // are dropped to simulate loss of network connectivity. Note member 2 will increment its
+ // election term to 3.
+
+ member1Actor.underlyingActor().clear();
+ member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.underlyingActor().clear();
+ member2Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
+ member2Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
+
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().dropMessagesToBehavior(AppendEntries.class);
+ member3Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class);
+
+ member2Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member2Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
+
+ requestVoteReply = member2Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member2Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ member1Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
+ member2Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
+
+ // We end up with 2 partitioned leaders both leading member 1. The term for member 1 and 3
+ // is 3 and member 3's term is 2.
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Leader);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 2, member3Context.getTermInformation().getCurrentTerm());
+
+ // Re-establish connectivity between member 2 and 3, ie stop dropping messages between
+ // the 2. Send heartbeats (AppendEntries) from member 3. Both member 1 and 2 should send back
+ // an unsuccessful AppendEntriesReply b/c their term (3) is greater than member 3's term (2).
+ // This should cause member 3 to switch to Follower.
+
+ RaftActorBehavior savedMember1Behavior = member1Actor.underlyingActor().behavior;
+ RaftActorBehavior savedMember2Behavior = member2Actor.underlyingActor().behavior;
+ RaftActorBehavior savedMember3Behavior = member3Actor.underlyingActor().behavior;
+ long savedMember3Term = member3Context.getTermInformation().getCurrentTerm();
+ String savedMember3VoterFor = member3Context.getTermInformation().getVotedFor();
+
+ member1Actor.underlyingActor().clear();
+ member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.underlyingActor().clear();
+ member2Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
+
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
+
+ sendHeartbeat(member3Actor);
+
+ member3Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
+
+ AppendEntriesReply appendEntriesReply = member3Actor.underlyingActor().
+ getCapturedMessage(AppendEntriesReply.class);
+ assertEquals("isSuccess", false, appendEntriesReply.isSuccess());
+ assertEquals("getTerm", 3, appendEntriesReply.getTerm());
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ // Revert back to the partitioned leaders state to test the other sequence where member 2
+ // sends heartbeats first before member 3. member 1 should return a successful
+ // AppendEntriesReply b/c his term matches member 2's. member 3 should switch to Follower
+ // as his term is less than member 2's.
+
+ member1Actor.underlyingActor().behavior = savedMember1Behavior;
+ member2Actor.underlyingActor().behavior = savedMember2Behavior;
+ member3Actor.underlyingActor().behavior = savedMember3Behavior;
+
+ member3Context.getTermInformation().update(savedMember3Term, savedMember3VoterFor);
+
+ member1Actor.underlyingActor().clear();
+ member1Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
+
+ member2Actor.underlyingActor().clear();
+ member2Actor.underlyingActor().expectMessageClass(AppendEntriesReply.class, 1);
+
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().expectMessageClass(AppendEntries.class, 1);
+
+ sendHeartbeat(member2Actor);
+
+ member1Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(AppendEntries.class);
+
+ member2Actor.underlyingActor().waitForExpectedMessages(AppendEntriesReply.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Leader);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", 3, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("testPartitionedLeadersScenario done");
+ }
+
+ @Test
+ public void testPartitionedCandidateOnStartupScenario() throws Exception {
+ testLog.info("Starting testPartitionedCandidateOnStartupScenario");
+
+ TestActorRef<MemberActor> member1Actor = newMemberActor("member1") ;
+ TestActorRef<MemberActor> member2Actor = newMemberActor("member2");
+ TestActorRef<MemberActor> member3Actor = newMemberActor("member3");
+
+ // Create member 2's behavior as Follower.
+
+ MockRaftActorContext member2Context = newRaftActorContext("member2", member2Actor,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1Actor.path().toString()).
+ put("member3", member3Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member2ConfigParams = newConfigParams();
+ member2Context.setConfigParams(member2ConfigParams);
+
+ Follower member2Behavior = new Follower(member2Context);
+ member2Actor.underlyingActor().behavior = member2Behavior;
+
+ // Create member 1's behavior as Leader.
+
+ MockRaftActorContext member1Context = newRaftActorContext("member1", member1Actor,
+ ImmutableMap.<String,String>builder().
+ put("member2", member2Actor.path().toString()).
+ put("member3", member3Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member1ConfigParams = newConfigParams();
+ member1Context.setConfigParams(member1ConfigParams);
+
+ initializeLeaderBehavior(member1Actor, member1Context, 1);
+
+ member2Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().clear();
+
+ // Initialize the ReplicatedLog and election term info for member 1 and 2. The current term
+ // will be 3 and the last term will be 2.
+
+ SimpleReplicatedLog replicatedLog = new SimpleReplicatedLog();
+ replicatedLog.append(new MockReplicatedLogEntry(2, 1, new MockPayload("")));
+ replicatedLog.append(new MockReplicatedLogEntry(3, 1, new MockPayload("")));
+
+ member1Context.setReplicatedLog(replicatedLog);
+ member1Context.getTermInformation().update(3, "");
+
+ member2Context.setReplicatedLog(replicatedLog);
+ member2Context.getTermInformation().update(3, member1Context.getId());
+
+ // Create member 3's behavior initially as a Candidate.
+
+ MockRaftActorContext member3Context = newRaftActorContext("member3", member3Actor,
+ ImmutableMap.<String,String>builder().
+ put("member1", member1Actor.path().toString()).
+ put("member2", member2Actor.path().toString()).build());
+
+ DefaultConfigParamsImpl member3ConfigParams = newConfigParams();
+ member3Context.setConfigParams(member3ConfigParams);
+
+ // Initialize the ReplicatedLog and election term info for Candidate member 3. The current term
+ // will be 2 and the last term will be 1 so it is behind the leader's log.
+
+ SimpleReplicatedLog candidateReplicatedLog = new SimpleReplicatedLog();
+ candidateReplicatedLog.append(new MockReplicatedLogEntry(1, 1, new MockPayload("")));
+ candidateReplicatedLog.append(new MockReplicatedLogEntry(2, 1, new MockPayload("")));
+
+ member3Context.setReplicatedLog(candidateReplicatedLog);
+ member3Context.getTermInformation().update(2, member1Context.getId());
+
+ // The member 3 Candidate will start a new term and send RequestVotes. However it will be
+ // partitioned from the cluster by having member 1 and 2 drop its RequestVote messages.
+
+ int numCandidateElections = 5;
+ long candidateElectionTerm = member3Context.getTermInformation().getCurrentTerm() + numCandidateElections;
+
+ member1Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class, numCandidateElections);
+
+ member2Actor.underlyingActor().dropMessagesToBehavior(RequestVote.class, numCandidateElections);
+
+ Candidate member3Behavior = new Candidate(member3Context);
+ member3Actor.underlyingActor().behavior = member3Behavior;
+
+ // Send several additional ElectionTimeouts to Candidate member 3. Each ElectionTimeout will
+ // start a new term so Candidate member 3's current term will be greater than the leader's
+ // current term.
+
+ for(int i = 0; i < numCandidateElections - 1; i++) {
+ member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+ }
+
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Leader);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Candidate);
+
+ assertEquals("member 1 election term", 3, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", 3, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", candidateElectionTerm,
+ member3Context.getTermInformation().getCurrentTerm());
+
+ // Now send a couple more ElectionTimeouts to Candidate member 3 with the partition resolved.
+ //
+ // On the first RequestVote, Leader member 1 should switch to Follower as its term (s) is less than
+ // the RequestVote's term (8) from member 3. No RequestVoteReply should be sent by member 1.
+ // Follower member 2 should update its term since it less than the RequestVote's term and
+ // should return a RequestVoteReply but should not grant the vote as its last term and index
+ // is greater than the RequestVote's lastLogTerm and lastLogIndex, ie member 2's log is later
+ // or more up to date than member 3's.
+ //
+ // On the second RequestVote, both member 1 and 2 are followers so they should update their
+ // term and return a RequestVoteReply but should not grant the vote.
+
+ candidateElectionTerm += 2;
+ for(int i = 0; i < 2; i++) {
+ member1Actor.underlyingActor().clear();
+ member1Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+ member2Actor.underlyingActor().clear();
+ member2Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
+
+ member3Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member3Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member3Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", false, requestVoteReply.isVoteGranted());
+ }
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Follower);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Candidate);
+
+ // Even though member 3 didn't get voted for, member 1 and 2 should have updated their term
+ // to member 3's.
+
+ assertEquals("member 1 election term", candidateElectionTerm,
+ member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", candidateElectionTerm,
+ member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", candidateElectionTerm,
+ member3Context.getTermInformation().getCurrentTerm());
+
+ // At this point we have no leader. Candidate member 3 would continue to start new elections
+ // but wouldn't be granted a vote. One of the 2 followers would eventually time out from
+ // not having received a heartbeat from a leader and switch to candidate and start a new
+ // election. We'll simulate that here by sending an ElectionTimeout to member 1.
+
+ member1Actor.underlyingActor().clear();
+ member1Actor.underlyingActor().expectMessageClass(RequestVoteReply.class, 1);
+ member2Actor.underlyingActor().clear();
+ member2Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+ member3Actor.underlyingActor().clear();
+ member3Actor.underlyingActor().expectMessageClass(RequestVote.class, 1);
+ member3Actor.underlyingActor().expectBehaviorStateChange();
+
+ member1Actor.tell(new ElectionTimeout(), ActorRef.noSender());
+
+ member2Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+ member3Actor.underlyingActor().waitForExpectedMessages(RequestVote.class);
+
+ // The RequestVoteReply should come from Follower member 2 and the vote should be granted
+ // since member 2's last term and index matches member 1's.
+
+ member1Actor.underlyingActor().waitForExpectedMessages(RequestVoteReply.class);
+
+ RequestVoteReply requestVoteReply = member1Actor.underlyingActor().getCapturedMessage(RequestVoteReply.class);
+ assertEquals("getTerm", member1Context.getTermInformation().getCurrentTerm(), requestVoteReply.getTerm());
+ assertEquals("isVoteGranted", true, requestVoteReply.isVoteGranted());
+
+ // Candidate member 3 should change to follower as its term should be less than the
+ // RequestVote term (member 1 started a new term higher than the other member's terms).
+
+ member3Actor.underlyingActor().waitForBehaviorStateChange();
+
+ verifyBehaviorState("member 1", member1Actor, RaftState.Leader);
+ verifyBehaviorState("member 2", member2Actor, RaftState.Follower);
+ verifyBehaviorState("member 3", member3Actor, RaftState.Follower);
+
+ // newTerm should be 10.
+
+ long newTerm = candidateElectionTerm + 1;
+ assertEquals("member 1 election term", newTerm, member1Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 2 election term", newTerm, member2Context.getTermInformation().getCurrentTerm());
+ assertEquals("member 3 election term", newTerm, member3Context.getTermInformation().getCurrentTerm());
+
+ testLog.info("testPartitionedCandidateOnStartupScenario done");
+ }
+}
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.IsolatedLeaderCheck;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader.FollowerToSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import scala.concurrent.duration.FiniteDuration;
-public class LeaderTest extends AbstractRaftActorBehaviorTest {
+public class LeaderTest extends AbstractLeaderTest {
- private final ActorRef leaderActor =
- getSystem().actorOf(Props.create(DoNothingActor.class));
- private final ActorRef senderActor =
- getSystem().actorOf(Props.create(DoNothingActor.class));
+ static final String FOLLOWER_ID = "follower";
+
+ private final TestActorRef<ForwardMessageToBehaviorActor> leaderActor = actorFactory.createTestActor(
+ Props.create(ForwardMessageToBehaviorActor.class), actorFactory.generateActorId("leader"));
+
+ private final TestActorRef<ForwardMessageToBehaviorActor> followerActor = actorFactory.createTestActor(
+ Props.create(ForwardMessageToBehaviorActor.class), actorFactory.generateActorId("follower"));
+
+ private Leader leader;
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ if(leader != null) {
+ leader.close();
+ }
+
+ super.tearDown();
+ }
@Test
public void testHandleMessageForUnknownMessage() throws Exception {
- new JavaTestKit(getSystem()) {{
- Leader leader =
- new Leader(createActorContext());
+ logStart("testHandleMessageForUnknownMessage");
- // handle message should return the Leader state when it receives an
- // unknown message
- RaftActorBehavior behavior = leader.handleMessage(senderActor, "foo");
- Assert.assertTrue(behavior instanceof Leader);
- }};
+ leader = new Leader(createActorContext());
+
+ // handle message should return the Leader state when it receives an
+ // unknown message
+ RaftActorBehavior behavior = leader.handleMessage(followerActor, "foo");
+ Assert.assertTrue(behavior instanceof Leader);
}
@Test
- public void testThatLeaderSendsAHeartbeatMessageToAllFollowers() {
- new JavaTestKit(getSystem()) {{
+ public void testThatLeaderSendsAHeartbeatMessageToAllFollowers() throws Exception {
+ logStart("testThatLeaderSendsAHeartbeatMessageToAllFollowers");
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- ActorRef followerActor = getTestActor();
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
- MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext();
+ leader = new Leader(actorContext);
- Map<String, String> peerAddresses = new HashMap<>();
+ // Leader should send an immediate heartbeat with no entries as follower is inactive.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+ assertEquals("getTerm", term, appendEntries.getTerm());
+ assertEquals("getPrevLogIndex", -1, appendEntries.getPrevLogIndex());
+ assertEquals("getPrevLogTerm", -1, appendEntries.getPrevLogTerm());
+ assertEquals("Entries size", 0, appendEntries.getEntries().size());
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ // The follower would normally reply - simulate that explicitly here.
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex - 1, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
- actorContext.setPeerAddresses(peerAddresses);
+ followerActor.underlyingActor().clear();
- Leader leader = new Leader(actorContext);
- leader.handleMessage(senderActor, new SendHeartBeat());
+ // Sleep for the heartbeat interval so AppendEntries is sent.
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().
+ getHeartBeatInterval().toMillis(), TimeUnit.MILLISECONDS);
- final String out =
- new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- Object msg = fromSerializableMessage(in);
- if (msg instanceof AppendEntries) {
- if (((AppendEntries)msg).getTerm() == 0) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- assertEquals("match", out);
-
- }
- };
- }};
+ appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+ assertEquals("getPrevLogIndex", lastIndex - 1, appendEntries.getPrevLogIndex());
+ assertEquals("getPrevLogTerm", term, appendEntries.getPrevLogTerm());
+ assertEquals("Entries size", 1, appendEntries.getEntries().size());
+ assertEquals("Entry getIndex", lastIndex, appendEntries.getEntries().get(0).getIndex());
+ assertEquals("Entry getTerm", term, appendEntries.getEntries().get(0).getTerm());
}
@Test
- public void testHandleReplicateMessageSendAppendEntriesToFollower() {
- new JavaTestKit(getSystem()) {{
+ public void testHandleReplicateMessageSendAppendEntriesToFollower() throws Exception {
+ logStart("testHandleReplicateMessageSendAppendEntriesToFollower");
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- ActorRef followerActor = getTestActor();
-
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
-
- Map<String, String> peerAddresses = new HashMap<>();
-
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
-
- actorContext.setPeerAddresses(peerAddresses);
-
- Leader leader = new Leader(actorContext);
- RaftActorBehavior raftBehavior = leader
- .handleMessage(senderActor, new Replicate(null, null,
- new MockRaftActorContext.MockReplicatedLogEntry(1,
- 100,
- new MockRaftActorContext.MockPayload("foo"))
- ));
-
- // State should not change
- assertTrue(raftBehavior instanceof Leader);
-
- final String out =
- new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- Object msg = fromSerializableMessage(in);
- if (msg instanceof AppendEntries) {
- if (((AppendEntries)msg).getTerm() == 0) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
- }
- };
- }};
+ MockRaftActorContext actorContext = createActorContextWithFollower();
+
+ long term = 1;
+ actorContext.getTermInformation().update(term, "");
+
+ leader = new Leader(actorContext);
+
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // The follower would normally reply - simulate that explicitly here.
+ long lastIndex = actorContext.getReplicatedLog().lastIndex();
+ leader.handleMessage(followerActor, new AppendEntriesReply(
+ FOLLOWER_ID, term, true, lastIndex, term));
+ assertEquals("isFollowerActive", true, leader.getFollower(FOLLOWER_ID).isFollowerActive());
+
+ followerActor.underlyingActor().clear();
+
+ MockRaftActorContext.MockPayload payload = new MockRaftActorContext.MockPayload("foo");
+ MockRaftActorContext.MockReplicatedLogEntry newEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ 1, lastIndex + 1, payload);
+ actorContext.getReplicatedLog().append(newEntry);
+ RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
+ new Replicate(null, null, newEntry));
+
+ // State should not change
+ assertTrue(raftBehavior instanceof Leader);
+
+ AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+ assertEquals("getPrevLogIndex", lastIndex, appendEntries.getPrevLogIndex());
+ assertEquals("getPrevLogTerm", term, appendEntries.getPrevLogTerm());
+ assertEquals("Entries size", 1, appendEntries.getEntries().size());
+ assertEquals("Entry getIndex", lastIndex + 1, appendEntries.getEntries().get(0).getIndex());
+ assertEquals("Entry getTerm", term, appendEntries.getEntries().get(0).getTerm());
+ assertEquals("Entry payload", payload, appendEntries.getEntries().get(0).getData());
}
@Test
- public void testHandleReplicateMessageWhenThereAreNoFollowers() {
- new JavaTestKit(getSystem()) {{
+ public void testHandleReplicateMessageWhenThereAreNoFollowers() throws Exception {
+ logStart("testHandleReplicateMessageWhenThereAreNoFollowers");
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ MockRaftActorContext actorContext = createActorContext();
- ActorRef raftActor = getTestActor();
+ leader = new Leader(actorContext);
- MockRaftActorContext actorContext =
- new MockRaftActorContext("test", getSystem(), raftActor);
+ actorContext.setLastApplied(0);
- actorContext.getReplicatedLog().removeFrom(0);
+ long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
+ long term = actorContext.getTermInformation().getCurrentTerm();
+ MockRaftActorContext.MockReplicatedLogEntry newEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+ term, newLogIndex, new MockRaftActorContext.MockPayload("foo"));
- actorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1)
- .build());
+ actorContext.getReplicatedLog().append(newEntry);
- Leader leader = new Leader(actorContext);
- RaftActorBehavior raftBehavior = leader
- .handleMessage(senderActor, new Replicate(null, "state-id",actorContext.getReplicatedLog().get(1)));
+ RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
+ new Replicate(leaderActor, "state-id", newEntry));
- // State should not change
- assertTrue(raftBehavior instanceof Leader);
+ // State should not change
+ assertTrue(raftBehavior instanceof Leader);
- assertEquals(1, actorContext.getCommitIndex());
+ assertEquals("getCommitIndex", newLogIndex, actorContext.getCommitIndex());
- final String out =
- new ExpectMsg<String>(duration("1 seconds"),
- "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof ApplyState) {
- if (((ApplyState) in).getIdentifier().equals("state-id")) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ // We should get 2 ApplyState messages - 1 for new log entry and 1 for the previous
+ // one since lastApplied state is 0.
+ List<ApplyState> applyStateList = MessageCollectorActor.getAllMatching(
+ leaderActor, ApplyState.class);
+ assertEquals("ApplyState count", newLogIndex, applyStateList.size());
- assertEquals("match", out);
+ for(int i = 0; i <= newLogIndex - 1; i++ ) {
+ ApplyState applyState = applyStateList.get(i);
+ assertEquals("getIndex", i + 1, applyState.getReplicatedLogEntry().getIndex());
+ assertEquals("getTerm", term, applyState.getReplicatedLogEntry().getTerm());
+ }
- }
- };
- }};
+ ApplyState last = applyStateList.get((int) newLogIndex - 1);
+ assertEquals("getData", newEntry.getData(), last.getReplicatedLogEntry().getData());
+ assertEquals("getIdentifier", "state-id", last.getIdentifier());
}
@Test
public void testSendAppendEntriesOnAnInProgressInstallSnapshot() throws Exception {
- new JavaTestKit(getSystem()) {{
- ActorRef followerActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ logStart("testSendAppendEntriesOnAnInProgressInstallSnapshot");
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
-
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext(leaderActor);
- actorContext.setPeerAddresses(peerAddresses);
-
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
-
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
-
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
-
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.setCommitIndex(followersLastIndex);
- //set follower timeout to 2 mins, helps during debugging
- actorContext.setConfigParams(new MockConfigParamsImpl(120000L, 10));
-
- MockLeader leader = new MockLeader(actorContext);
-
- // new entry
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- //update follower timestamp
- leader.markFollowerActive(followerActor.path().toString());
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- ByteString bs = toByteString(leadersSnapshot);
- leader.setSnapshot(Optional.of(bs));
- leader.createFollowerToSnapshot(followerActor.path().toString(), bs);
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.setCommitIndex(followersLastIndex);
+ //set follower timeout to 2 mins, helps during debugging
+ actorContext.setConfigParams(new MockConfigParamsImpl(120000L, 10));
- //send first chunk and no InstallSnapshotReply received yet
- leader.getFollowerToSnapshot().getNextChunk();
- leader.getFollowerToSnapshot().incrementChunkIndex();
+ leader = new Leader(actorContext);
- leader.handleMessage(leaderActor, new SendHeartBeat());
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
+
+ //update follower timestamp
+ leader.markFollowerActive(FOLLOWER_ID);
- AppendEntries aeproto = (AppendEntries)MessageCollectorActor.getFirstMatching(
- followerActor, AppendEntries.class);
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+ FollowerToSnapshot fts = leader.new FollowerToSnapshot(bs);
+ leader.setFollowerSnapshot(FOLLOWER_ID, fts);
- assertNotNull("AppendEntries should be sent even if InstallSnapshotReply is not " +
- "received", aeproto);
+ //send first chunk and no InstallSnapshotReply received yet
+ fts.getNextChunk();
+ fts.incrementChunkIndex();
- AppendEntries ae = (AppendEntries) SerializationUtils.fromSerializable(aeproto);
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
- assertTrue("AppendEntries should be sent with empty entries", ae.getEntries().isEmpty());
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- //InstallSnapshotReply received
- leader.getFollowerToSnapshot().markSendStatus(true);
+ AppendEntries aeproto = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- leader.handleMessage(senderActor, new SendHeartBeat());
+ AppendEntries ae = (AppendEntries) SerializationUtils.fromSerializable(aeproto);
- InstallSnapshotMessages.InstallSnapshot isproto = (InstallSnapshotMessages.InstallSnapshot)
- MessageCollectorActor.getFirstMatching(followerActor,
- InstallSnapshot.SERIALIZABLE_CLASS);
+ assertTrue("AppendEntries should be sent with empty entries", ae.getEntries().isEmpty());
- assertNotNull("Installsnapshot should get called for sending the next chunk of snapshot",
- isproto);
+ //InstallSnapshotReply received
+ fts.markSendStatus(true);
- InstallSnapshot is = (InstallSnapshot) SerializationUtils.fromSerializable(isproto);
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- assertEquals(snapshotIndex, is.getLastIncludedIndex());
+ InstallSnapshotMessages.InstallSnapshot isproto = MessageCollectorActor.expectFirstMatching(followerActor,
+ InstallSnapshot.SERIALIZABLE_CLASS);
- }};
+ InstallSnapshot is = (InstallSnapshot) SerializationUtils.fromSerializable(isproto);
+
+ assertEquals(snapshotIndex, is.getLastIncludedIndex());
}
@Test
- public void testSendAppendEntriesSnapshotScenario() {
- new JavaTestKit(getSystem()) {{
+ public void testSendAppendEntriesSnapshotScenario() throws Exception {
+ logStart("testSendAppendEntriesSnapshotScenario");
- ActorRef followerActor = getTestActor();
-
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext(getRef());
- actorContext.setPeerAddresses(peerAddresses);
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.setCommitIndex(followersLastIndex);
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.setCommitIndex(followersLastIndex);
+ leader = new Leader(actorContext);
- Leader leader = new Leader(actorContext);
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- // new entry
- ReplicatedLogImplEntry entry =
+ // new entry
+ ReplicatedLogImplEntry entry =
new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
-
- //update follower timestamp
- leader.markFollowerActive(followerActor.path().toString());
-
- // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
- RaftActorBehavior raftBehavior = leader.handleMessage(
- senderActor, new Replicate(null, "state-id", entry));
-
- assertTrue(raftBehavior instanceof Leader);
-
- // we might receive some heartbeat messages, so wait till we InitiateInstallSnapshot
- Boolean[] matches = new ReceiveWhile<Boolean>(Boolean.class, duration("2 seconds")) {
- @Override
- protected Boolean match(Object o) throws Exception {
- if (o instanceof InitiateInstallSnapshot) {
- return true;
- }
- return false;
- }
- }.get();
-
- boolean initiateInitiateInstallSnapshot = false;
- for (Boolean b: matches) {
- initiateInitiateInstallSnapshot = b | initiateInitiateInstallSnapshot;
- }
+ new MockRaftActorContext.MockPayload("D"));
- assertTrue(initiateInitiateInstallSnapshot);
- }};
+ //update follower timestamp
+ leader.markFollowerActive(FOLLOWER_ID);
+
+ // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
+ RaftActorBehavior raftBehavior = leader.handleMessage(
+ leaderActor, new Replicate(null, "state-id", entry));
+
+ assertTrue(raftBehavior instanceof Leader);
+
+ MessageCollectorActor.expectFirstMatching(leaderActor, CaptureSnapshot.class);
}
@Test
public void testInitiateInstallSnapshot() throws Exception {
- new JavaTestKit(getSystem()) {{
+ logStart("testInitiateInstallSnapshot");
- ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- ActorRef followerActor = getTestActor();
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext(leaderActor);
- actorContext.setPeerAddresses(peerAddresses);
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.setLastApplied(3);
+ actorContext.setCommitIndex(followersLastIndex);
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ leader = new Leader(actorContext);
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ // Leader will send an immediate heartbeat - ignore it.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ // set the snapshot as absent and check if capture-snapshot is invoked.
+ leader.setSnapshot(Optional.<ByteString>absent());
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.setLastApplied(3);
- actorContext.setCommitIndex(followersLastIndex);
+ // new entry
+ ReplicatedLogImplEntry entry = new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
- Leader leader = new Leader(actorContext);
- // set the snapshot as absent and check if capture-snapshot is invoked.
- leader.setSnapshot(Optional.<ByteString>absent());
+ actorContext.getReplicatedLog().append(entry);
- // new entry
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
+ //update follower timestamp
+ leader.markFollowerActive(FOLLOWER_ID);
- actorContext.getReplicatedLog().append(entry);
+ leader.handleMessage(leaderActor, new Replicate(null, "state-id", entry));
- // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
- RaftActorBehavior raftBehavior = leader.handleMessage(
- leaderActor, new InitiateInstallSnapshot());
+ CaptureSnapshot cs = MessageCollectorActor.expectFirstMatching(leaderActor, CaptureSnapshot.class);
- CaptureSnapshot cs = (CaptureSnapshot) MessageCollectorActor.
- getFirstMatching(leaderActor, CaptureSnapshot.class);
+ assertTrue(cs.isInstallSnapshotInitiated());
+ assertEquals(3, cs.getLastAppliedIndex());
+ assertEquals(1, cs.getLastAppliedTerm());
+ assertEquals(4, cs.getLastIndex());
+ assertEquals(2, cs.getLastTerm());
- assertNotNull(cs);
+ // if an initiate is started again when first is in progress, it shouldnt initiate Capture
+ leader.handleMessage(leaderActor, new Replicate(null, "state-id", entry));
- assertTrue(cs.isInstallSnapshotInitiated());
- assertEquals(3, cs.getLastAppliedIndex());
- assertEquals(1, cs.getLastAppliedTerm());
- assertEquals(4, cs.getLastIndex());
- assertEquals(2, cs.getLastTerm());
- }};
+ List<CaptureSnapshot> captureSnapshots = MessageCollectorActor.getAllMatching(leaderActor, CaptureSnapshot.class);
+ assertEquals("CaptureSnapshot should not get invoked when initiate is in progress", 1, captureSnapshots.size());
}
@Test
- public void testInstallSnapshot() {
- new JavaTestKit(getSystem()) {{
+ public void testInstallSnapshot() throws Exception {
+ logStart("testInstallSnapshot");
- ActorRef followerActor = getTestActor();
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
- actorContext.setPeerAddresses(peerAddresses);
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+ actorContext.setCommitIndex(followersLastIndex);
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ leader = new Leader(actorContext);
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ // Ignore initial heartbeat.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
- actorContext.setCommitIndex(followersLastIndex);
+ RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
+ new SendInstallSnapshot(toByteString(leadersSnapshot)));
- Leader leader = new Leader(actorContext);
+ assertTrue(raftBehavior instanceof Leader);
- // new entry
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
+ // check if installsnapshot gets called with the correct values.
- RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
- new SendInstallSnapshot(toByteString(leadersSnapshot)));
+ InstallSnapshot installSnapshot = (InstallSnapshot) SerializationUtils.fromSerializable(
+ MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshotMessages.InstallSnapshot.class));
- assertTrue(raftBehavior instanceof Leader);
-
- // check if installsnapshot gets called with the correct values.
- final String out =
- new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof InstallSnapshotMessages.InstallSnapshot) {
- InstallSnapshot is = (InstallSnapshot)
- SerializationUtils.fromSerializable(in);
- if (is.getData() == null) {
- return "InstallSnapshot data is null";
- }
- if (is.getLastIncludedIndex() != snapshotIndex) {
- return is.getLastIncludedIndex() + "!=" + snapshotIndex;
- }
- if (is.getLastIncludedTerm() != snapshotTerm) {
- return is.getLastIncludedTerm() + "!=" + snapshotTerm;
- }
- if (is.getTerm() == currentTerm) {
- return is.getTerm() + "!=" + currentTerm;
- }
-
- return "match";
-
- } else {
- return "message mismatch:" + in.getClass();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
- }};
+ assertNotNull(installSnapshot.getData());
+ assertEquals(snapshotIndex, installSnapshot.getLastIncludedIndex());
+ assertEquals(snapshotTerm, installSnapshot.getLastIncludedTerm());
+
+ assertEquals(currentTerm, installSnapshot.getTerm());
}
@Test
- public void testHandleInstallSnapshotReplyLastChunk() {
- new JavaTestKit(getSystem()) {{
+ public void testHandleInstallSnapshotReplyLastChunk() throws Exception {
+ logStart("testHandleInstallSnapshotReplyLastChunk");
- ActorRef followerActor = getTestActor();
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
-
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
-
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
- actorContext.setPeerAddresses(peerAddresses);
- actorContext.setCommitIndex(followersLastIndex);
-
- MockLeader leader = new MockLeader(actorContext);
-
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
-
- // set the snapshot variables in replicatedlog
-
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
-
- ByteString bs = toByteString(leadersSnapshot);
- leader.setSnapshot(Optional.of(bs));
- leader.createFollowerToSnapshot(followerActor.path().toString(), bs);
- while(!leader.getFollowerToSnapshot().isLastChunk(leader.getFollowerToSnapshot().getChunkIndex())) {
- leader.getFollowerToSnapshot().getNextChunk();
- leader.getFollowerToSnapshot().incrementChunkIndex();
- }
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ actorContext.setCommitIndex(followersLastIndex);
- RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
- new InstallSnapshotReply(currentTerm, followerActor.path().toString(),
- leader.getFollowerToSnapshot().getChunkIndex(), true));
+ leader = new Leader(actorContext);
- assertTrue(raftBehavior instanceof Leader);
+ // Ignore initial heartbeat.
+ MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- assertEquals(0, leader.followerSnapshotSize());
- assertEquals(1, leader.followerLogSize());
- assertNotNull(leader.getFollower(followerActor.path().toString()));
- FollowerLogInformation fli = leader.getFollower(followerActor.path().toString());
- assertEquals(snapshotIndex, fli.getMatchIndex());
- assertEquals(snapshotIndex, fli.getMatchIndex());
- assertEquals(snapshotIndex + 1, fli.getNextIndex());
- }};
- }
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- @Test
- public void testHandleInstallSnapshotReplyWithInvalidChunkIndex() throws Exception {
- new JavaTestKit(getSystem()) {{
+ // set the snapshot variables in replicatedlog
- TestActorRef<MessageCollectorActor> followerActor =
- TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower");
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+ FollowerToSnapshot fts = leader.new FollowerToSnapshot(bs);
+ leader.setFollowerSnapshot(FOLLOWER_ID, fts);
+ while(!fts.isLastChunk(fts.getChunkIndex())) {
+ fts.getNextChunk();
+ fts.incrementChunkIndex();
+ }
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
+ RaftActorBehavior raftBehavior = leader.handleMessage(followerActor,
+ new InstallSnapshotReply(currentTerm, FOLLOWER_ID, fts.getChunkIndex(), true));
- actorContext.setConfigParams(new DefaultConfigParamsImpl(){
- @Override
- public int getSnapshotChunkSize() {
- return 50;
- }
- });
- actorContext.setPeerAddresses(peerAddresses);
- actorContext.setCommitIndex(followersLastIndex);
+ assertTrue(raftBehavior instanceof Leader);
- MockLeader leader = new MockLeader(actorContext);
+ assertEquals(0, leader.followerSnapshotSize());
+ assertEquals(1, leader.followerLogSize());
+ FollowerLogInformation fli = leader.getFollower(FOLLOWER_ID);
+ assertNotNull(fli);
+ assertEquals(snapshotIndex, fli.getMatchIndex());
+ assertEquals(snapshotIndex, fli.getMatchIndex());
+ assertEquals(snapshotIndex + 1, fli.getNextIndex());
+ }
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ @Test
+ public void testSendSnapshotfromInstallSnapshotReply() throws Exception {
+ logStart("testSendSnapshotfromInstallSnapshotReply");
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- ByteString bs = toByteString(leadersSnapshot);
- leader.setSnapshot(Optional.of(bs));
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl(){
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
+ };
+ configParams.setHeartBeatInterval(new FiniteDuration(9, TimeUnit.SECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
- Object o = MessageCollectorActor.getAllMessages(followerActor).get(0);
+ actorContext.setConfigParams(configParams);
+ actorContext.setCommitIndex(followersLastIndex);
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ leader = new Leader(actorContext);
- InstallSnapshotMessages.InstallSnapshot installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
- assertEquals(1, installSnapshot.getChunkIndex());
- assertEquals(3, installSnapshot.getTotalChunks());
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+ leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(), followerActor.path().toString(), -1, false));
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
- leader.handleMessage(leaderActor, new SendHeartBeat());
+ assertEquals(1, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
- o = MessageCollectorActor.getAllMessages(followerActor).get(1);
+ followerActor.underlyingActor().clear();
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ FOLLOWER_ID, installSnapshot.getChunkIndex(), true));
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
- installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ assertEquals(2, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(1, installSnapshot.getChunkIndex());
- assertEquals(3, installSnapshot.getTotalChunks());
+ followerActor.underlyingActor().clear();
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ FOLLOWER_ID, installSnapshot.getChunkIndex(), true));
- followerActor.tell(PoisonPill.getInstance(), getRef());
- }};
+ installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+
+ // Send snapshot reply one more time and make sure that a new snapshot message should not be sent to follower
+ followerActor.underlyingActor().clear();
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ FOLLOWER_ID, installSnapshot.getChunkIndex(), true));
+
+ installSnapshot = MessageCollectorActor.getFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+
+ Assert.assertNull(installSnapshot);
}
+
@Test
- public void testHandleSnapshotSendsPreviousChunksHashCodeWhenSendingNextChunk() throws Exception {
- new JavaTestKit(getSystem()) {
- {
+ public void testHandleInstallSnapshotReplyWithInvalidChunkIndex() throws Exception{
+ logStart("testHandleInstallSnapshotReplyWithInvalidChunkIndex");
- TestActorRef<MessageCollectorActor> followerActor =
- TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class), "follower");
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ actorContext.setConfigParams(new DefaultConfigParamsImpl(){
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
+ });
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
+ actorContext.setCommitIndex(followersLastIndex);
- actorContext.setConfigParams(new DefaultConfigParamsImpl() {
- @Override
- public int getSnapshotChunkSize() {
- return 50;
- }
- });
- actorContext.setPeerAddresses(peerAddresses);
- actorContext.setCommitIndex(followersLastIndex);
+ leader = new Leader(actorContext);
- MockLeader leader = new MockLeader(actorContext);
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
- ByteString bs = toByteString(leadersSnapshot);
- leader.setSnapshot(Optional.of(bs));
+ leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
- leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
- Object o = MessageCollectorActor.getAllMessages(followerActor).get(0);
+ assertEquals(1, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ followerActor.underlyingActor().clear();
- InstallSnapshotMessages.InstallSnapshot installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ leader.handleMessage(followerActor, new InstallSnapshotReply(actorContext.getTermInformation().getCurrentTerm(),
+ FOLLOWER_ID, -1, false));
- assertEquals(1, installSnapshot.getChunkIndex());
- assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE, installSnapshot.getLastChunkHashCode());
+ Uninterruptibles.sleepUninterruptibly(actorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
- int hashCode = installSnapshot.getData().hashCode();
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- leader.handleMessage(followerActor, new InstallSnapshotReply(installSnapshot.getTerm(),followerActor.path().toString(),1,true ));
+ installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
- leader.handleMessage(leaderActor, new SendHeartBeat());
+ assertEquals(1, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
+ }
- Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+ @Test
+ public void testHandleSnapshotSendsPreviousChunksHashCodeWhenSendingNextChunk() throws Exception {
+ logStart("testHandleSnapshotSendsPreviousChunksHashCodeWhenSendingNextChunk");
- o = MessageCollectorActor.getAllMessages(followerActor).get(1);
+ MockRaftActorContext actorContext = createActorContextWithFollower();
- assertTrue(o instanceof InstallSnapshotMessages.InstallSnapshot);
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- installSnapshot = (InstallSnapshotMessages.InstallSnapshot) o;
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
+ });
- assertEquals(2, installSnapshot.getChunkIndex());
- assertEquals(3, installSnapshot.getTotalChunks());
- assertEquals(hashCode, installSnapshot.getLastChunkHashCode());
+ actorContext.setCommitIndex(followersLastIndex);
- followerActor.tell(PoisonPill.getInstance(), getRef());
- }};
+ leader = new Leader(actorContext);
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+
+ leader.handleMessage(leaderActor, new SendInstallSnapshot(bs));
+
+ InstallSnapshotMessages.InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(1, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
+ assertEquals(AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE, installSnapshot.getLastChunkHashCode());
+
+ int hashCode = installSnapshot.getData().hashCode();
+
+ followerActor.underlyingActor().clear();
+
+ leader.handleMessage(followerActor, new InstallSnapshotReply(installSnapshot.getTerm(),
+ FOLLOWER_ID, 1, true));
+
+ installSnapshot = MessageCollectorActor.expectFirstMatching(
+ followerActor, InstallSnapshotMessages.InstallSnapshot.class);
+
+ assertEquals(2, installSnapshot.getChunkIndex());
+ assertEquals(3, installSnapshot.getTotalChunks());
+ assertEquals(hashCode, installSnapshot.getLastChunkHashCode());
}
@Test
public void testFollowerToSnapshotLogic() {
+ logStart("testFollowerToSnapshotLogic");
- MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext();
+ MockRaftActorContext actorContext = createActorContext();
actorContext.setConfigParams(new DefaultConfigParamsImpl() {
@Override
}
});
- MockLeader leader = new MockLeader(actorContext);
+ leader = new Leader(actorContext);
Map<String, String> leadersSnapshot = new HashMap<>();
leadersSnapshot.put("1", "A");
ByteString bs = toByteString(leadersSnapshot);
byte[] barray = bs.toByteArray();
- leader.createFollowerToSnapshot("followerId", bs);
+ FollowerToSnapshot fts = leader.new FollowerToSnapshot(bs);
+ leader.setFollowerSnapshot(FOLLOWER_ID, fts);
+
assertEquals(bs.size(), barray.length);
int chunkIndex=0;
j = barray.length;
}
- ByteString chunk = leader.getFollowerToSnapshot().getNextChunk();
+ ByteString chunk = fts.getNextChunk();
assertEquals("bytestring size not matching for chunk:"+ chunkIndex, j-i, chunk.size());
- assertEquals("chunkindex not matching", chunkIndex, leader.getFollowerToSnapshot().getChunkIndex());
+ assertEquals("chunkindex not matching", chunkIndex, fts.getChunkIndex());
- leader.getFollowerToSnapshot().markSendStatus(true);
- if (!leader.getFollowerToSnapshot().isLastChunk(chunkIndex)) {
- leader.getFollowerToSnapshot().incrementChunkIndex();
+ fts.markSendStatus(true);
+ if (!fts.isLastChunk(chunkIndex)) {
+ fts.incrementChunkIndex();
}
}
- assertEquals("totalChunks not matching", chunkIndex, leader.getFollowerToSnapshot().getTotalChunks());
+ assertEquals("totalChunks not matching", chunkIndex, fts.getTotalChunks());
}
-
@Override protected RaftActorBehavior createBehavior(
RaftActorContext actorContext) {
return new Leader(actorContext);
}
- @Override protected RaftActorContext createActorContext() {
+ @Override
+ protected MockRaftActorContext createActorContext() {
return createActorContext(leaderActor);
}
@Override
- protected RaftActorContext createActorContext(ActorRef actorRef) {
- return new MockRaftActorContext("test", getSystem(), actorRef);
+ protected MockRaftActorContext createActorContext(ActorRef actorRef) {
+ return createActorContext("leader", actorRef);
}
- private ByteString toByteString(Map<String, String> state) {
- ByteArrayOutputStream b = null;
- ObjectOutputStream o = null;
- try {
- try {
- b = new ByteArrayOutputStream();
- o = new ObjectOutputStream(b);
- o.writeObject(state);
- byte[] snapshotBytes = b.toByteArray();
- return ByteString.copyFrom(snapshotBytes);
- } finally {
- if (o != null) {
- o.flush();
- o.close();
- }
- if (b != null) {
- b.close();
- }
- }
- } catch (IOException e) {
- Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
- }
- return null;
+ private MockRaftActorContext createActorContextWithFollower() {
+ MockRaftActorContext actorContext = createActorContext();
+ actorContext.setPeerAddresses(ImmutableMap.<String, String>builder().put(FOLLOWER_ID,
+ followerActor.path().toString()).build());
+ return actorContext;
}
- public static class ForwardMessageToBehaviorActor extends MessageCollectorActor {
- private static AbstractRaftActorBehavior behavior;
-
- public ForwardMessageToBehaviorActor(){
-
- }
-
- @Override public void onReceive(Object message) throws Exception {
- super.onReceive(message);
- behavior.handleMessage(sender(), message);
- }
-
- public static void setBehavior(AbstractRaftActorBehavior behavior){
- ForwardMessageToBehaviorActor.behavior = behavior;
- }
+ private MockRaftActorContext createActorContext(String id, ActorRef actorRef) {
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(50, TimeUnit.MILLISECONDS));
+ configParams.setElectionTimeoutFactor(100000);
+ MockRaftActorContext context = new MockRaftActorContext(id, getSystem(), actorRef);
+ context.setConfigParams(configParams);
+ return context;
}
@Test
public void testLeaderCreatedWithCommitIndexLessThanLastIndex() throws Exception {
- new JavaTestKit(getSystem()) {{
-
- ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
-
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
-
- ActorRef followerActor = getSystem().actorOf(Props.create(ForwardMessageToBehaviorActor.class));
+ logStart("testLeaderCreatedWithCommitIndexLessThanLastIndex");
- MockRaftActorContext followerActorContext =
- new MockRaftActorContext("follower", getSystem(), followerActor);
+ MockRaftActorContext leaderActorContext = createActorContextWithFollower();
- Follower follower = new Follower(followerActorContext);
+ MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
- ForwardMessageToBehaviorActor.setBehavior(follower);
+ Follower follower = new Follower(followerActorContext);
+ followerActor.underlyingActor().setBehavior(follower);
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(FOLLOWER_ID, followerActor.path().toString());
- leaderActorContext.setPeerAddresses(peerAddresses);
+ leaderActorContext.setPeerAddresses(peerAddresses);
- leaderActorContext.getReplicatedLog().removeFrom(0);
+ leaderActorContext.getReplicatedLog().removeFrom(0);
- //create 3 entries
- leaderActorContext.setReplicatedLog(
+ //create 3 entries
+ leaderActorContext.setReplicatedLog(
new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
- leaderActorContext.setCommitIndex(1);
+ leaderActorContext.setCommitIndex(1);
- followerActorContext.getReplicatedLog().removeFrom(0);
+ followerActorContext.getReplicatedLog().removeFrom(0);
- // follower too has the exact same log entries and has the same commit index
- followerActorContext.setReplicatedLog(
+ // follower too has the exact same log entries and has the same commit index
+ followerActorContext.setReplicatedLog(
new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
- followerActorContext.setCommitIndex(1);
-
- Leader leader = new Leader(leaderActorContext);
- leader.markFollowerActive(followerActor.path().toString());
+ followerActorContext.setCommitIndex(1);
- leader.handleMessage(leaderActor, new SendHeartBeat());
+ leader = new Leader(leaderActorContext);
- AppendEntries appendEntries = (AppendEntries) MessageCollectorActor
- .getFirstMatching(followerActor, AppendEntries.class);
+ AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- assertNotNull(appendEntries);
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(0, appendEntries.getEntries().size());
+ assertEquals(0, appendEntries.getPrevLogIndex());
- assertEquals(1, appendEntries.getLeaderCommit());
- assertEquals(1, appendEntries.getEntries().get(0).getIndex());
- assertEquals(0, appendEntries.getPrevLogIndex());
+ AppendEntriesReply appendEntriesReply = MessageCollectorActor.expectFirstMatching(
+ leaderActor, AppendEntriesReply.class);
- AppendEntriesReply appendEntriesReply =
- (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
- leaderActor, AppendEntriesReply.class);
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
- assertNotNull(appendEntriesReply);
+ // follower returns its next index
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
- // follower returns its next index
- assertEquals(2, appendEntriesReply.getLogLastIndex());
- assertEquals(1, appendEntriesReply.getLogLastTerm());
-
- }};
+ follower.close();
}
-
@Test
public void testLeaderCreatedWithCommitIndexLessThanFollowersCommitIndex() throws Exception {
- new JavaTestKit(getSystem()) {{
+ logStart("testLeaderCreatedWithCommitIndexLessThanFollowersCommitIndex");
- ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ MockRaftActorContext leaderActorContext = createActorContext();
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
- ActorRef followerActor = getSystem().actorOf(
- Props.create(ForwardMessageToBehaviorActor.class));
+ Follower follower = new Follower(followerActorContext);
+ followerActor.underlyingActor().setBehavior(follower);
- MockRaftActorContext followerActorContext =
- new MockRaftActorContext("follower", getSystem(), followerActor);
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(FOLLOWER_ID, followerActor.path().toString());
- Follower follower = new Follower(followerActorContext);
+ leaderActorContext.setPeerAddresses(peerAddresses);
- ForwardMessageToBehaviorActor.setBehavior(follower);
+ leaderActorContext.getReplicatedLog().removeFrom(0);
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
- leaderActorContext.setPeerAddresses(peerAddresses);
+ leaderActorContext.setCommitIndex(1);
- leaderActorContext.getReplicatedLog().removeFrom(0);
+ followerActorContext.getReplicatedLog().removeFrom(0);
- leaderActorContext.setReplicatedLog(
+ followerActorContext.setReplicatedLog(
new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
- leaderActorContext.setCommitIndex(1);
+ // follower has the same log entries but its commit index > leaders commit index
+ followerActorContext.setCommitIndex(2);
- followerActorContext.getReplicatedLog().removeFrom(0);
+ leader = new Leader(leaderActorContext);
- followerActorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ // Initial heartbeat
+ AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- // follower has the same log entries but its commit index > leaders commit index
- followerActorContext.setCommitIndex(2);
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(0, appendEntries.getEntries().size());
+ assertEquals(0, appendEntries.getPrevLogIndex());
- Leader leader = new Leader(leaderActorContext);
- leader.markFollowerActive(followerActor.path().toString());
+ AppendEntriesReply appendEntriesReply = MessageCollectorActor.expectFirstMatching(
+ leaderActor, AppendEntriesReply.class);
- leader.handleMessage(leaderActor, new SendHeartBeat());
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
- AppendEntries appendEntries = (AppendEntries) MessageCollectorActor
- .getFirstMatching(followerActor, AppendEntries.class);
+ leaderActor.underlyingActor().setBehavior(follower);
+ leader.handleMessage(followerActor, appendEntriesReply);
- assertNotNull(appendEntries);
+ leaderActor.underlyingActor().clear();
+ followerActor.underlyingActor().clear();
- assertEquals(1, appendEntries.getLeaderCommit());
- assertEquals(1, appendEntries.getEntries().get(0).getIndex());
- assertEquals(0, appendEntries.getPrevLogIndex());
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
- AppendEntriesReply appendEntriesReply =
- (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
- leaderActor, AppendEntriesReply.class);
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- assertNotNull(appendEntriesReply);
+ appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
- assertEquals(2, appendEntriesReply.getLogLastIndex());
- assertEquals(1, appendEntriesReply.getLogLastTerm());
+ assertEquals(2, appendEntries.getLeaderCommit());
+ assertEquals(0, appendEntries.getEntries().size());
+ assertEquals(2, appendEntries.getPrevLogIndex());
- }};
- }
+ appendEntriesReply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
- @Test
- public void testHandleAppendEntriesReplyFailure(){
- new JavaTestKit(getSystem()) {
- {
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
- ActorRef leaderActor =
- getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ assertEquals(2, followerActorContext.getCommitIndex());
- ActorRef followerActor =
- getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ follower.close();
+ }
+ @Test
+ public void testHandleAppendEntriesReplyFailure(){
+ logStart("testHandleAppendEntriesReplyFailure");
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ MockRaftActorContext leaderActorContext = createActorContextWithFollower();
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put("follower-1",
- followerActor.path().toString());
+ leader = new Leader(leaderActorContext);
- leaderActorContext.setPeerAddresses(peerAddresses);
+ // Send initial heartbeat reply with last index.
+ leader.handleAppendEntriesReply(followerActor, new AppendEntriesReply(FOLLOWER_ID, 1, true, 10, 1));
- Leader leader = new Leader(leaderActorContext);
+ FollowerLogInformation followerInfo = leader.getFollower(FOLLOWER_ID);
+ assertEquals("getNextIndex", 11, followerInfo.getNextIndex());
- AppendEntriesReply reply = new AppendEntriesReply("follower-1", 1, false, 10, 1);
+ AppendEntriesReply reply = new AppendEntriesReply(FOLLOWER_ID, 1, false, 10, 1);
- RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
+ RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
- assertEquals(RaftState.Leader, raftActorBehavior.state());
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
- }};
+ assertEquals("getNextIndex", 10, followerInfo.getNextIndex());
}
@Test
public void testHandleAppendEntriesReplySuccess() throws Exception {
- new JavaTestKit(getSystem()) {
- {
-
- ActorRef leaderActor =
- getSystem().actorOf(Props.create(MessageCollectorActor.class));
-
- ActorRef followerActor =
- getSystem().actorOf(Props.create(MessageCollectorActor.class));
-
-
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
-
- leaderActorContext.setReplicatedLog(
- new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ logStart("testHandleAppendEntriesReplySuccess");
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put("follower-1",
- followerActor.path().toString());
+ MockRaftActorContext leaderActorContext = createActorContextWithFollower();
- leaderActorContext.setPeerAddresses(peerAddresses);
- leaderActorContext.setCommitIndex(1);
- leaderActorContext.setLastApplied(1);
- leaderActorContext.getTermInformation().update(1, "leader");
-
- Leader leader = new Leader(leaderActorContext);
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
- AppendEntriesReply reply = new AppendEntriesReply("follower-1", 1, true, 2, 1);
+ leaderActorContext.setCommitIndex(1);
+ leaderActorContext.setLastApplied(1);
+ leaderActorContext.getTermInformation().update(1, "leader");
- RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
+ leader = new Leader(leaderActorContext);
- assertEquals(RaftState.Leader, raftActorBehavior.state());
+ AppendEntriesReply reply = new AppendEntriesReply(FOLLOWER_ID, 1, true, 2, 1);
- assertEquals(2, leaderActorContext.getCommitIndex());
+ RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
- ApplyLogEntries applyLogEntries =
- (ApplyLogEntries) MessageCollectorActor.getFirstMatching(leaderActor,
- ApplyLogEntries.class);
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
- assertNotNull(applyLogEntries);
+ assertEquals(2, leaderActorContext.getCommitIndex());
- assertEquals(2, leaderActorContext.getLastApplied());
+ ApplyLogEntries applyLogEntries = MessageCollectorActor.expectFirstMatching(
+ leaderActor, ApplyLogEntries.class);
- assertEquals(2, applyLogEntries.getToIndex());
+ assertEquals(2, leaderActorContext.getLastApplied());
- List<Object> applyStateList = MessageCollectorActor.getAllMatching(leaderActor,
- ApplyState.class);
+ assertEquals(2, applyLogEntries.getToIndex());
- assertEquals(1,applyStateList.size());
+ List<ApplyState> applyStateList = MessageCollectorActor.getAllMatching(leaderActor,
+ ApplyState.class);
- ApplyState applyState = (ApplyState) applyStateList.get(0);
+ assertEquals(1,applyStateList.size());
- assertEquals(2, applyState.getReplicatedLogEntry().getIndex());
+ ApplyState applyState = applyStateList.get(0);
- }};
+ assertEquals(2, applyState.getReplicatedLogEntry().getIndex());
}
@Test
public void testHandleAppendEntriesReplyUnknownFollower(){
- new JavaTestKit(getSystem()) {
- {
-
- ActorRef leaderActor =
- getSystem().actorOf(Props.create(MessageCollectorActor.class));
-
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ logStart("testHandleAppendEntriesReplyUnknownFollower");
- Leader leader = new Leader(leaderActorContext);
+ MockRaftActorContext leaderActorContext = createActorContext();
- AppendEntriesReply reply = new AppendEntriesReply("follower-1", 1, false, 10, 1);
+ leader = new Leader(leaderActorContext);
- RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(getRef(), reply);
+ AppendEntriesReply reply = new AppendEntriesReply("unkown-follower", 1, false, 10, 1);
- assertEquals(RaftState.Leader, raftActorBehavior.state());
+ RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
- }};
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
}
@Test
public void testHandleRequestVoteReply(){
- new JavaTestKit(getSystem()) {
- {
+ logStart("testHandleRequestVoteReply");
- ActorRef leaderActor =
- getSystem().actorOf(Props.create(MessageCollectorActor.class));
+ MockRaftActorContext leaderActorContext = createActorContext();
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ leader = new Leader(leaderActorContext);
- Leader leader = new Leader(leaderActorContext);
+ // Should be a no-op.
+ RaftActorBehavior raftActorBehavior = leader.handleRequestVoteReply(followerActor,
+ new RequestVoteReply(1, true));
- RaftActorBehavior raftActorBehavior = leader.handleRequestVoteReply(getRef(), new RequestVoteReply(1, true));
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
- assertEquals(RaftState.Leader, raftActorBehavior.state());
+ raftActorBehavior = leader.handleRequestVoteReply(followerActor, new RequestVoteReply(1, false));
- raftActorBehavior = leader.handleRequestVoteReply(getRef(), new RequestVoteReply(1, false));
-
- assertEquals(RaftState.Leader, raftActorBehavior.state());
- }};
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
}
@Test
public void testIsolatedLeaderCheckNoFollowers() {
- new JavaTestKit(getSystem()) {{
- ActorRef leaderActor = getTestActor();
+ logStart("testIsolatedLeaderCheckNoFollowers");
- MockRaftActorContext leaderActorContext =
- new MockRaftActorContext("leader", getSystem(), leaderActor);
+ MockRaftActorContext leaderActorContext = createActorContext();
- Map<String, String> peerAddresses = new HashMap<>();
- leaderActorContext.setPeerAddresses(peerAddresses);
-
- Leader leader = new Leader(leaderActorContext);
- RaftActorBehavior behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
- Assert.assertTrue(behavior instanceof Leader);
- }};
+ leader = new Leader(leaderActorContext);
+ RaftActorBehavior behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
+ Assert.assertTrue(behavior instanceof Leader);
}
@Test
public void testIsolatedLeaderCheckTwoFollowers() throws Exception {
+ logStart("testIsolatedLeaderCheckTwoFollowers");
+
new JavaTestKit(getSystem()) {{
ActorRef followerActor1 = getTestActor();
ActorRef followerActor2 = getTestActor();
- MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
+ MockRaftActorContext leaderActorContext = createActorContext();
Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put("follower-1", followerActor1.path().toString());
leaderActorContext.setPeerAddresses(peerAddresses);
- Leader leader = new Leader(leaderActorContext);
- leader.stopIsolatedLeaderCheckSchedule();
+ leader = new Leader(leaderActorContext);
leader.markFollowerActive("follower-1");
leader.markFollowerActive("follower-2");
behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
Assert.assertTrue("Behavior not instance of IsolatedLeader when majority followers are inactive",
behavior instanceof IsolatedLeader);
-
}};
}
- class MockLeader extends Leader {
- FollowerToSnapshot fts;
+ @Test
+ public void testAppendEntryCallAtEndofAppendEntryReply() throws Exception {
+ logStart("testAppendEntryCallAtEndofAppendEntryReply");
+
+ MockRaftActorContext leaderActorContext = createActorContextWithFollower();
- public MockLeader(RaftActorContext context){
- super(context);
- }
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ //configParams.setHeartBeatInterval(new FiniteDuration(9, TimeUnit.SECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
- public FollowerToSnapshot getFollowerToSnapshot() {
- return fts;
- }
+ leaderActorContext.setConfigParams(configParams);
- public void createFollowerToSnapshot(String followerId, ByteString bs ) {
- fts = new FollowerToSnapshot(bs);
- setFollowerSnapshot(followerId, fts);
- }
+ MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
+
+ followerActorContext.setConfigParams(configParams);
+
+ Follower follower = new Follower(followerActorContext);
+ followerActor.underlyingActor().setBehavior(follower);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+ leaderActorContext.setCommitIndex(-1);
+ leaderActorContext.setLastApplied(-1);
+
+ followerActorContext.getReplicatedLog().removeFrom(0);
+ followerActorContext.setCommitIndex(-1);
+ followerActorContext.setLastApplied(-1);
+
+ leader = new Leader(leaderActorContext);
+
+ AppendEntriesReply appendEntriesReply = MessageCollectorActor.expectFirstMatching(
+ leaderActor, AppendEntriesReply.class);
+
+ leader.handleMessage(followerActor, appendEntriesReply);
+
+ // Clear initial heartbeat messages
+
+ leaderActor.underlyingActor().clear();
+ followerActor.underlyingActor().clear();
+
+ // create 3 entries
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+ leaderActorContext.setCommitIndex(1);
+ leaderActorContext.setLastApplied(1);
+
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().getHeartBeatInterval().toMillis(),
+ TimeUnit.MILLISECONDS);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // Should send first log entry
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(0, appendEntries.getEntries().get(0).getIndex());
+ assertEquals(-1, appendEntries.getPrevLogIndex());
+
+ appendEntriesReply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
+
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+ assertEquals(0, appendEntriesReply.getLogLastIndex());
+
+ followerActor.underlyingActor().clear();
+
+ leader.handleAppendEntriesReply(followerActor, appendEntriesReply);
+
+ appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
+
+ // Should send second log entry
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getEntries().get(0).getIndex());
+
+ follower.close();
+ }
+
+ @Test
+ public void testLaggingFollowerStarvation() throws Exception {
+ logStart("testLaggingFollowerStarvation");
+ new JavaTestKit(getSystem()) {{
+ String leaderActorId = actorFactory.generateActorId("leader");
+ String follower1ActorId = actorFactory.generateActorId("follower");
+ String follower2ActorId = actorFactory.generateActorId("follower");
+
+ TestActorRef<ForwardMessageToBehaviorActor> leaderActor =
+ actorFactory.createTestActor(ForwardMessageToBehaviorActor.props(), leaderActorId);
+ ActorRef follower1Actor = actorFactory.createActor(MessageCollectorActor.props(), follower1ActorId);
+ ActorRef follower2Actor = actorFactory.createActor(MessageCollectorActor.props(), follower2ActorId);
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext(leaderActorId, getSystem(), leaderActor);
+
+ DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
+ configParams.setHeartBeatInterval(new FiniteDuration(200, TimeUnit.MILLISECONDS));
+ configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(10, TimeUnit.SECONDS));
+
+ leaderActorContext.setConfigParams(configParams);
+
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(1,5,1).build());
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(follower1ActorId,
+ follower1Actor.path().toString());
+ peerAddresses.put(follower2ActorId,
+ follower2Actor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+ leaderActorContext.getTermInformation().update(1, leaderActorId);
+
+ RaftActorBehavior leader = createBehavior(leaderActorContext);
+
+ leaderActor.underlyingActor().setBehavior(leader);
+
+ for(int i=1;i<6;i++) {
+ // Each AppendEntriesReply could end up rescheduling the heartbeat (without the fix for bug 2733)
+ RaftActorBehavior newBehavior = leader.handleMessage(follower1Actor, new AppendEntriesReply(follower1ActorId, 1, true, i, 1));
+ assertTrue(newBehavior == leader);
+ Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+ }
+
+ // Check if the leader has been receiving SendHeartbeat messages despite getting AppendEntriesReply
+ List<SendHeartBeat> heartbeats = MessageCollectorActor.getAllMatching(leaderActor, SendHeartBeat.class);
+
+ assertTrue(String.format("%s heartbeat(s) is less than expected", heartbeats.size()),
+ heartbeats.size() > 1);
+
+ // Check if follower-2 got AppendEntries during this time and was not starved
+ List<AppendEntries> appendEntries = MessageCollectorActor.getAllMatching(follower2Actor, AppendEntries.class);
+
+ assertTrue(String.format("%s append entries is less than expected", appendEntries.size()),
+ appendEntries.size() > 1);
+
+ }};
+ }
+
+ @Override
+ protected void assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(RaftActorContext actorContext,
+ ActorRef actorRef, RaftRPC rpc) throws Exception {
+ super.assertStateChangesToFollowerWhenRaftRPCHasNewerTerm(actorContext, actorRef, rpc);
+ assertEquals("New votedFor", null, actorContext.getTermInformation().getVotedFor());
}
private class MockConfigParamsImpl extends DefaultConfigParamsImpl {
package org.opendaylight.controller.cluster.raft.behaviors;
import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import akka.event.LoggingAdapter;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
import java.io.ByteArrayOutputStream;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class SnapshotTrackerTest {
+ Logger logger = LoggerFactory.getLogger(getClass());
+
Map<String, String> data;
ByteString byteString;
ByteString chunk1;
@Test
public void testAddChunk() throws SnapshotTracker.InvalidChunkException {
- SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+ SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
tracker1.addChunk(1, chunk1, Optional.<Integer>absent());
tracker1.addChunk(2, chunk2, Optional.<Integer>absent());
tracker1.addChunk(3, chunk3, Optional.<Integer>absent());
// Verify that an InvalidChunkException is thrown when we try to add a chunk to a sealed tracker
- SnapshotTracker tracker2 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker2 = new SnapshotTracker(logger, 2);
tracker2.addChunk(1, chunk1, Optional.<Integer>absent());
tracker2.addChunk(2, chunk2, Optional.<Integer>absent());
}
// The first chunk's index must at least be FIRST_CHUNK_INDEX
- SnapshotTracker tracker3 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker3 = new SnapshotTracker(logger, 2);
try {
tracker3.addChunk(AbstractLeader.FIRST_CHUNK_INDEX - 1, chunk1, Optional.<Integer>absent());
}
// Out of sequence chunk indexes won't work
- SnapshotTracker tracker4 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker4 = new SnapshotTracker(logger, 2);
tracker4.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.<Integer>absent());
// No exceptions will be thrown when invalid chunk is added with the right sequence
// If the lastChunkHashCode is missing
- SnapshotTracker tracker5 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker5 = new SnapshotTracker(logger, 2);
tracker5.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.<Integer>absent());
// Look I can add the same chunk again
// An exception will be thrown when an invalid chunk is addedd with the right sequence
// when the lastChunkHashCode is present
- SnapshotTracker tracker6 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+ SnapshotTracker tracker6 = new SnapshotTracker(logger, 2);
tracker6.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.of(-1));
public void testGetSnapShot() throws SnapshotTracker.InvalidChunkException {
// Trying to get a snapshot before all chunks have been received will throw an exception
- SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+ SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
tracker1.addChunk(1, chunk1, Optional.<Integer>absent());
try {
}
- SnapshotTracker tracker2 = new SnapshotTracker(mock(LoggingAdapter.class), 3);
+ SnapshotTracker tracker2 = new SnapshotTracker(logger, 3);
tracker2.addChunk(1, chunk1, Optional.<Integer>absent());
tracker2.addChunk(2, chunk2, Optional.<Integer>absent());
@Test
public void testGetCollectedChunks() throws SnapshotTracker.InvalidChunkException {
- SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+ SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
ByteString chunks = chunk1.concat(chunk2);
*/
package org.opendaylight.controller.cluster.raft.messages;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+
/**
* Unit tests for AppendEntries.
*
ReplicatedLogEntry entry2 = new ReplicatedLogImplEntry(3, 4, new MockPayload("payload2"));
- AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L);
+ AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L, -1);
AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
@Test
public void testToAndFromSerializable() {
AppendEntries entries = new AppendEntries(5L, "node1", 7L, 8L,
- Collections.<ReplicatedLogEntry>emptyList(), 10L);
+ Collections.<ReplicatedLogEntry>emptyList(), 10L, -1);
assertSame("toSerializable", entries, entries.toSerializable());
assertSame("fromSerializable", entries,
@Test
public void testToAndFromLegacySerializable() {
ReplicatedLogEntry entry = new ReplicatedLogImplEntry(3, 4, new MockPayload("payload"));
- AppendEntries entries = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry), 10L);
+ AppendEntries entries = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry), 10L, -1);
Object serializable = entries.toSerializable(RaftVersions.HELIUM_VERSION);
Assert.assertTrue(serializable instanceof AppendEntriesMessages.AppendEntries);
assertEquals("getLeaderCommit", expected.getLeaderCommit(), actual.getLeaderCommit());
assertEquals("getPrevLogIndex", expected.getPrevLogIndex(), actual.getPrevLogIndex());
assertEquals("getPrevLogTerm", expected.getPrevLogTerm(), actual.getPrevLogTerm());
+ assertEquals("getReplicatedToAllIndex", expected.getReplicatedToAllIndex(), actual.getReplicatedToAllIndex());
assertEquals("getEntries size", expected.getEntries().size(), actual.getEntries().size());
Iterator<ReplicatedLogEntry> iter = expected.getEntries().iterator();
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.Props;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+
+public class ForwardMessageToBehaviorActor extends MessageCollectorActor {
+ private RaftActorBehavior behavior;
+
+ @Override
+ public void onReceive(Object message) throws Exception {
+ if(behavior != null) {
+ behavior.handleMessage(sender(), message);
+ }
+
+ super.onReceive(message);
+ }
+
+ public static Props props() {
+ return Props.create(ForwardMessageToBehaviorActor.class);
+ }
+
+ public void setBehavior(RaftActorBehavior behavior){
+ this.behavior = behavior;
+ }
+}
+
package org.opendaylight.controller.cluster.raft.utils;
import akka.actor.ActorRef;
+import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
public class MessageCollectorActor extends UntypedActor {
- private List<Object> messages = new ArrayList<>();
+ private static final String ARE_YOU_READY = "ARE_YOU_READY";
+
+ private final List<Object> messages = new ArrayList<>();
@Override public void onReceive(Object message) throws Exception {
+ if(message.equals(ARE_YOU_READY)) {
+ getSender().tell("yes", getSelf());
+ return;
+ }
+
if(message instanceof String){
if("get-all-messages".equals(message)){
- getSender().tell(new ArrayList(messages), getSelf());
+ getSender().tell(new ArrayList<>(messages), getSelf());
}
- } else {
+ } else if(message != null) {
messages.add(message);
}
}
+ public void clear() {
+ messages.clear();
+ }
+
public static List<Object> getAllMessages(ActorRef actor) throws Exception {
FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
Timeout operationTimeout = new Timeout(operationDuration);
Future<Object> future = Patterns.ask(actor, "get-all-messages", operationTimeout);
- try {
- return (List<Object>) Await.result(future, operationDuration);
- } catch (Exception e) {
- throw e;
- }
+ return (List<Object>) Await.result(future, operationDuration);
}
/**
* @param clazz
* @return
*/
- public static Object getFirstMatching(ActorRef actor, Class<?> clazz) throws Exception {
+ public static <T> T getFirstMatching(ActorRef actor, Class<T> clazz) throws Exception {
List<Object> allMessages = getAllMessages(actor);
for(Object message : allMessages){
if(message.getClass().equals(clazz)){
- return message;
+ return (T) message;
}
}
return null;
}
- public static List<Object> getAllMatching(ActorRef actor, Class<?> clazz) throws Exception {
+ public static <T> T expectFirstMatching(ActorRef actor, Class<T> clazz) {
+ return expectFirstMatching(actor, clazz, 5000);
+ }
+
+ public static <T> T expectFirstMatching(ActorRef actor, Class<T> clazz, long timeout) {
+ int count = (int) (timeout / 50);
+ for(int i = 0; i < count; i++) {
+ try {
+ T message = getFirstMatching(actor, clazz);
+ if(message != null) {
+ return message;
+ }
+ } catch (Exception e) {}
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Did not receive message of type " + clazz);
+ return null;
+ }
+
+ public static <T> List<T> getAllMatching(ActorRef actor, Class<T> clazz) throws Exception {
List<Object> allMessages = getAllMessages(actor);
- List<Object> output = Lists.newArrayList();
+ List<T> output = Lists.newArrayList();
for(Object message : allMessages){
if(message.getClass().equals(clazz)){
- output.add(message);
+ output.add((T) message);
}
}
return output;
}
+ public static void waitUntilReady(ActorRef actor) throws Exception {
+ long timeout = 500;
+ FiniteDuration duration = Duration.create(timeout, TimeUnit.MILLISECONDS);
+ for(int i = 0; i < 10; i++) {
+ try {
+ Await.ready(Patterns.ask(actor, ARE_YOU_READY, timeout), duration);
+ return;
+ } catch (TimeoutException e) {
+ }
+ }
+
+ throw new TimeoutException("Actor not ready in time.");
+ }
+
+ public static Props props() {
+ return Props.create(MessageCollectorActor.class);
+ }
}
--- /dev/null
+org.slf4j.simpleLogger.showDateTime=true
+org.slf4j.simpleLogger.dateTimeFormat=hh:mm:ss,S a
+org.slf4j.simpleLogger.logFile=System.out
+org.slf4j.simpleLogger.showShortLogName=true
+org.slf4j.simpleLogger.levelInBrackets=true
+org.slf4j.simpleLogger.log.org.opendaylight.controller.cluster.raft=trace
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import java.util.Collection;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+
+/**
+ * Modified Data Object.
+ *
+ * Represents modification of Data Object.
+ *
+ */
+public interface DataObjectModification<T extends DataObject> extends Identifiable<PathArgument> {
+
+ enum ModificationType {
+ /**
+ *
+ * Child node (direct or indirect) was modified.
+ *
+ */
+ SUBTREE_MODIFIED,
+ /**
+ *
+ * Node was explicitly created / overwritten.
+ *
+ */
+ WRITE,
+ /**
+ *
+ * Node was deleted.
+ *
+ */
+ DELETE
+ }
+
+ @Override
+ PathArgument getIdentifier();
+
+ /**
+ * Returns type of modified object.
+ *
+ * @return type of modified object.
+ */
+ @Nonnull Class<T> getDataType();
+
+ /**
+ *
+ * Returns type of modification
+ *
+ * @return type Type of performed modification.
+ */
+ @Nonnull ModificationType getModificationType();
+
+ /**
+ * Returns after state of top level container.
+ *
+ * @param root Class representing data container
+ * @return State of object after modification. Null if subtree is not present.
+ */
+ @Nullable T getDataAfter();
+
+ /**
+ * Returns unmodifiable collection of modified direct children.
+ *
+ * @return unmodifiable collection of modified direct children.
+ */
+ @Nonnull Collection<DataObjectModification<? extends DataObject>> getModifiedChildren();
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import java.util.Collection;
+import java.util.EventListener;
+import javax.annotation.Nonnull;
+
+/**
+ * Interface implemented by classes interested in receiving notifications about
+ * data tree changes. This interface differs from {@link DataChangeListener}
+ * in that it provides a cursor-based view of the change, which has potentially
+ * lower overhead and allow more flexible consumption of change event.
+ */
+public interface DataTreeChangeListener extends EventListener {
+ /**
+ * Invoked when there was data change for the supplied path, which was used
+ * to register this listener.
+ *
+ * <p>
+ * This method may be also invoked during registration of the listener if
+ * there is any pre-existing data in the conceptual data tree for supplied
+ * path. This initial event will contain all pre-existing data as created.
+ *
+ * <p>
+ * A data change event may be triggered spuriously, e.g. such that data before
+ * and after compare as equal. Implementations of this interface are expected
+ * to recover from such events. Event producers are expected to exert reasonable
+ * effort to suppress such events.
+ *
+ * In other words, it is completely acceptable to observe
+ * a {@link DataObjectModification}, while the state observed before and
+ * after- data items compare as equal.
+ *
+ * @param changes Collection of change events, may not be null or empty.
+ */
+ void onDataTreeChanged(@Nonnull Collection<DataTreeModification> changes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A {@link DOMService} which allows users to register for changes to a
+ * subtree.
+ */
+public interface DataTreeChangeService extends BindingService {
+ /**
+ * Registers a {@link DataTreeChangeListener} to receive
+ * notifications when data changes under a given path in the conceptual data
+ * tree.
+ * <p>
+ * You are able to register for notifications for any node or subtree
+ * which can be represented using {@link DataTreeIdentifier}.
+ * <p>
+ *
+ * You are able to register for data change notifications for a subtree or leaf
+ * even if it does not exist. You will receive notification once that node is
+ * created.
+ * <p>
+ * If there is any pre-existing data in the data tree for the path for which you are
+ * registering, you will receive an initial data change event, which will
+ * contain all pre-existing data, marked as created.
+ *
+ * <p>
+ * This method returns a {@link ListenerRegistration} object. To
+ * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
+ * method on the returned object.
+ * <p>
+ * You MUST explicitly unregister your listener when you no longer want to receive
+ * notifications. This is especially true in OSGi environments, where failure to
+ * do so during bundle shutdown can lead to stale listeners being still registered.
+ *
+ * @param treeId
+ * Data tree identifier of the subtree which should be watched for
+ * changes.
+ * @param listener
+ * Listener instance which is being registered
+ * @return Listener registration object, which may be used to unregister
+ * your listener using {@link ListenerRegistration#close()} to stop
+ * delivery of change events.
+ */
+ @Nonnull <L extends DataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier treeId, @Nonnull L listener);
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.api;
+
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * A unique identifier for a particular subtree. It is composed of the logical
+ * data store type and the instance identifier of the root node.
+ */
+public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentifier>, Serializable {
+ private static final long serialVersionUID = 1L;
+ private final InstanceIdentifier<?> rootIdentifier;
+ private final LogicalDatastoreType datastoreType;
+
+ public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<?> rootIdentifier) {
+ this.datastoreType = Preconditions.checkNotNull(datastoreType);
+ this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
+ }
+
+ /**
+ * Return the logical data store type.
+ *
+ * @return Logical data store type. Guaranteed to be non-null.
+ */
+ public @Nonnull LogicalDatastoreType getDatastoreType() {
+ return datastoreType;
+ }
+
+ /**
+ * Return the {@link YangInstanceIdentifier} of the root node.
+ *
+ * @return Instance identifier corresponding to the root node.
+ */
+ public @Nonnull InstanceIdentifier<?> getRootIdentifier() {
+ return rootIdentifier;
+ }
+
+ @Override
+ public boolean contains(final DataTreeIdentifier other) {
+ return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + datastoreType.hashCode();
+ result = prime * result + rootIdentifier.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DataTreeIdentifier)) {
+ return false;
+ }
+ DataTreeIdentifier other = (DataTreeIdentifier) obj;
+ if (datastoreType != other.datastoreType) {
+ return false;
+ }
+ return rootIdentifier.equals(other.rootIdentifier);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.md.sal.binding.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+
+/**
+ * Represent root of modification.
+ *
+ * @author Tony Tkacik <ttkacik@cisco.com>
+ *
+ */
+public interface DataTreeModification {
+
+ /**
+ * Get the modification root path. This is the path of the root node
+ * relative to the root of InstanceIdentifier namespace.
+ *
+ * @return absolute path of the root node
+ */
+ @Nonnull DataTreeIdentifier getRootPath();
+
+ /**
+ * Get the modification root node.
+ *
+ * @return modification root node
+ */
+ @Nonnull DataObjectModification<? extends DataObject> getRootNode();
+
+}
<type>test-jar</type>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-service</artifactId>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>pax-exam-container-native</artifactId>
<artifactId>yang-parser-impl</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-test-model</artifactId>
- </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-test-model</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>opendaylight-l2-types</artifactId>
+ </dependency>
</dependencies>
<build>
<plugins>
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-/*
+/**
* FIXME: THis test should be moved to sal-binding-broker and rewriten
* to use new DataBroker API
*/
@SuppressWarnings("deprecation")
public class ConcurrentImplicitCreateTest extends AbstractDataServiceTest {
- private static final NodeKey NODE_FOO_KEY = new NodeKey(new NodeId("foo"));
- private static final NodeKey NODE_BAR_KEY = new NodeKey(new NodeId("foo"));
- private static InstanceIdentifier<Nodes> NODES_PATH = InstanceIdentifier.builder(Nodes.class).build();
- private static InstanceIdentifier<Node> NODE_FOO_PATH = NODES_PATH.child(Node.class, NODE_FOO_KEY);
- private static InstanceIdentifier<Node> NODE_BAR_PATH = NODES_PATH.child(Node.class, NODE_FOO_KEY);
+ private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
+ private static final TopLevelListKey BAR_KEY = new TopLevelListKey("bar");
+ private static InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
+ private static InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, FOO_KEY);
+ private static InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, BAR_KEY);
@Test
public void testConcurrentCreate() throws InterruptedException, ExecutionException {
DataModificationTransaction fooTx = baDataService.beginTransaction();
DataModificationTransaction barTx = baDataService.beginTransaction();
- fooTx.putOperationalData(NODE_FOO_PATH, new NodeBuilder().setKey(NODE_FOO_KEY).build());
- barTx.putOperationalData(NODE_BAR_PATH, new NodeBuilder().setKey(NODE_BAR_KEY).build());
+ fooTx.putOperationalData(FOO_PATH, new TopLevelListBuilder().setKey(FOO_KEY).build());
+ barTx.putOperationalData(BAR_PATH, new TopLevelListBuilder().setKey(BAR_KEY).build());
Future<RpcResult<TransactionStatus>> fooFuture = fooTx.commit();
Future<RpcResult<TransactionStatus>> barFuture = barTx.commit();
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUses;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUsesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUses;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUsesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUsesKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import com.google.common.util.concurrent.SettableFuture;
-/*
- * FIXME: THis test should be moved to compat test-suite and rewriten
- * to use sal-test-model
+/**
+ * FIXME: THis test should be moved to compat test-suite
*/
@SuppressWarnings("deprecation")
public class WildcardedDataChangeListenerTest extends AbstractDataServiceTest {
- private static final NodeKey NODE_0_KEY = new NodeKey(new NodeId("test:0"));
- private static final NodeKey NODE_1_KEY = new NodeKey(new NodeId("test:1"));
+ private static final TopLevelListKey TOP_LEVEL_LIST_0_KEY = new TopLevelListKey("test:0");
+ private static final TopLevelListKey TOP_LEVEL_LIST_1_KEY = new TopLevelListKey("test:1");
- public static final InstanceIdentifier<Flow> DEEP_WILDCARDED_PATH = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class) //
- .augmentation(FlowCapableNode.class) //
- .child(Table.class) //
- .child(Flow.class) //
+ protected static final InstanceIdentifier<ListViaUses> DEEP_WILDCARDED_PATH = InstanceIdentifier
+ .builder(Top.class)
+ .child(TopLevelList.class) //
+ .augmentation(TreeComplexUsesAugment.class) //
+ .child(ListViaUses.class) //
.build();
- private static final TableKey TABLE_0_KEY = new TableKey((short) 0);
- private static final TableFeaturesKey TABLE_FEATURES_KEY = new TableFeaturesKey((short) 0);
-
- private static final InstanceIdentifier<Table> NODE_0_TABLE_PATH = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, NODE_0_KEY) //
- .augmentation(FlowCapableNode.class) //
- .child(Table.class, TABLE_0_KEY) //
+ private static final InstanceIdentifier<TreeComplexUsesAugment> NODE_0_TCU_PATH = InstanceIdentifier
+ .builder(Top.class)
+ .child(TopLevelList.class, TOP_LEVEL_LIST_0_KEY) //
+ .augmentation(TreeComplexUsesAugment.class) //
.build();
- private static final InstanceIdentifier<Table> NODE_1_TABLE_PATH = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, NODE_1_KEY) //
- .augmentation(FlowCapableNode.class) //
- .child(Table.class, TABLE_0_KEY) //
+ private static final InstanceIdentifier<TreeComplexUsesAugment> NODE_1_TCU_PATH = InstanceIdentifier
+ .builder(Top.class)
+ .child(TopLevelList.class, TOP_LEVEL_LIST_1_KEY) //
+ .augmentation(TreeComplexUsesAugment.class) //
.build();
- private static final FlowKey FLOW_KEY = new FlowKey(new FlowId("test"));
- private static final InstanceIdentifier<Flow> NODE_0_FLOW_PATH = NODE_0_TABLE_PATH.child(Flow.class, FLOW_KEY);
+ private static final ListViaUsesKey LIST_VIA_USES_KEY = new ListViaUsesKey("test");
+
+ private static final InstanceIdentifier<ListViaUses> NODE_0_LVU_PATH = NODE_0_TCU_PATH.child(ListViaUses.class, LIST_VIA_USES_KEY);
- private static final InstanceIdentifier<Flow> NODE_1_FLOW_PATH = NODE_1_TABLE_PATH.child(Flow.class, FLOW_KEY);
+ private static final InstanceIdentifier<ListViaUses> NODE_1_LVU_PATH = NODE_1_TCU_PATH.child(ListViaUses.class, LIST_VIA_USES_KEY);
- private static final InstanceIdentifier<TableFeatures> NODE_0_TABLE_FEATURES_PATH =
- NODE_0_TABLE_PATH.child(TableFeatures.class, TABLE_FEATURES_KEY);
+ private static final InstanceIdentifier<ContainerWithUses> NODE_0_CWU_PATH =
+ NODE_0_TCU_PATH.child(ContainerWithUses.class);
- private static final TableFeatures TABLE_FEATURES = new TableFeaturesBuilder()//
- .setKey(TABLE_FEATURES_KEY) //
- .setName("Foo") //
- .setMaxEntries(1000L) //
+ private static final ContainerWithUses CWU= new ContainerWithUsesBuilder()//
+ .setLeafFromGrouping("some container value") //
.build();
- private static final Flow FLOW = new FlowBuilder() //
- .setKey(FLOW_KEY) //
- .setBarrier(true) //
- .setStrict(true) //
+ private static final ListViaUses LVU = new ListViaUsesBuilder() //
+ .setKey(LIST_VIA_USES_KEY) //
+ .setName("john")
.build();
@Test
- public void testSepareteWrites() throws InterruptedException, TimeoutException, ExecutionException {
+ public void testSeparateWrites() throws InterruptedException, TimeoutException, ExecutionException {
DataProviderService dataBroker = testContext.getBindingDataBroker();
final SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> eventFuture = SettableFuture.create();
dataBroker.registerDataChangeListener(DEEP_WILDCARDED_PATH, new DataChangeListener() {
-
@Override
public void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> dataChangeEvent) {
eventFuture.set(dataChangeEvent);
});
DataModificationTransaction transaction = dataBroker.beginTransaction();
- transaction.putOperationalData(NODE_0_TABLE_FEATURES_PATH, TABLE_FEATURES);
- transaction.putOperationalData(NODE_0_FLOW_PATH, FLOW);
- transaction.putOperationalData(NODE_1_FLOW_PATH, FLOW);
+ transaction.putOperationalData(NODE_0_CWU_PATH, CWU);
+ transaction.putOperationalData(NODE_0_LVU_PATH, LVU);
+ transaction.putOperationalData(NODE_1_LVU_PATH, LVU);
transaction.commit().get();
DataChangeEvent<InstanceIdentifier<?>, DataObject> event = eventFuture.get(1000, TimeUnit.MILLISECONDS);
final SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> eventFuture = SettableFuture.create();
dataBroker.registerDataChangeListener(DEEP_WILDCARDED_PATH, new DataChangeListener() {
-
@Override
public void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> dataChangeEvent) {
eventFuture.set(dataChangeEvent);
}
});
- DataModificationTransaction tableTx = dataBroker.beginTransaction();
- tableTx.putOperationalData(NODE_0_TABLE_FEATURES_PATH, TABLE_FEATURES);
- tableTx.commit().get();
+ DataModificationTransaction cwuTx = dataBroker.beginTransaction();
+ cwuTx.putOperationalData(NODE_0_CWU_PATH, CWU);
+ cwuTx.commit().get();
assertFalse(eventFuture.isDone());
- DataModificationTransaction flowTx = dataBroker.beginTransaction();
+ DataModificationTransaction lvuTx = dataBroker.beginTransaction();
- Table table = new TableBuilder() //
- .setKey(TABLE_0_KEY) //
- .setFlow(Collections.singletonList(FLOW)) //
- .build();
+ TreeComplexUsesAugment tcua = new TreeComplexUsesAugmentBuilder()
+ .setListViaUses(Collections.singletonList(LVU)).build();
- flowTx.putOperationalData(NODE_0_TABLE_PATH, table);
- flowTx.putOperationalData(NODE_1_FLOW_PATH, FLOW);
- flowTx.commit().get();
+ lvuTx.putOperationalData(NODE_0_TCU_PATH, tcua);
+ lvuTx.putOperationalData(NODE_1_LVU_PATH, LVU);
+ lvuTx.commit().get();
validateEvent(eventFuture.get(1000, TimeUnit.MILLISECONDS));
}
// We wrote initial state NODE_0_FLOW
DataModificationTransaction transaction = dataBroker.beginTransaction();
- transaction.putOperationalData(NODE_0_FLOW_PATH, FLOW);
+ transaction.putOperationalData(NODE_0_LVU_PATH, LVU);
transaction.commit().get();
// We registered DataChangeListener
assertFalse(eventFuture.isDone());
DataModificationTransaction secondTx = dataBroker.beginTransaction();
- secondTx.putOperationalData(NODE_0_FLOW_PATH, FLOW);
- secondTx.putOperationalData(NODE_1_FLOW_PATH, FLOW);
+ secondTx.putOperationalData(NODE_0_LVU_PATH, LVU);
+ secondTx.putOperationalData(NODE_1_LVU_PATH, LVU);
secondTx.commit().get();
DataChangeEvent<InstanceIdentifier<?>, DataObject> event = (eventFuture.get(1000, TimeUnit.MILLISECONDS));
assertNotNull(event);
// Data change should contains NODE_1 Flow - which was added
- assertTrue(event.getCreatedOperationalData().containsKey(NODE_1_FLOW_PATH));
+ assertTrue(event.getCreatedOperationalData().containsKey(NODE_1_LVU_PATH));
// Data change must not containe NODE_0 Flow which was replaced with same value.
- assertFalse(event.getUpdatedOperationalData().containsKey(NODE_0_FLOW_PATH));
+ assertFalse(event.getUpdatedOperationalData().containsKey(NODE_0_LVU_PATH));
}
private static void validateEvent(final DataChangeEvent<InstanceIdentifier<?>, DataObject> event) {
assertNotNull(event);
- assertTrue(event.getCreatedOperationalData().containsKey(NODE_1_FLOW_PATH));
- assertTrue(event.getCreatedOperationalData().containsKey(NODE_0_FLOW_PATH));
- assertFalse(event.getCreatedOperationalData().containsKey(NODE_0_TABLE_FEATURES_PATH));
+ assertTrue(event.getCreatedOperationalData().containsKey(NODE_1_LVU_PATH));
+ assertTrue(event.getCreatedOperationalData().containsKey(NODE_0_LVU_PATH));
+ assertFalse(event.getCreatedOperationalData().containsKey(NODE_0_CWU_PATH));
}
}
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TopBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
@SuppressWarnings("deprecation")
public class DOMCodecBug02Test extends AbstractDataServiceTest {
- private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
+ private static final InstanceIdentifier<Top> TOP_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
.toInstance();
/**
.submit(new Callable<Future<RpcResult<TransactionStatus>>>() {
@Override
public Future<RpcResult<TransactionStatus>> call() throws Exception {
- NodesBuilder nodesBuilder = new NodesBuilder();
- nodesBuilder.setNode(Collections.<Node> emptyList());
+ TopBuilder topBuilder = new TopBuilder();
+ topBuilder.setTopLevelList(Collections.<TopLevelList> emptyList());
DataModificationTransaction transaction = baDataService.beginTransaction();
- transaction.putOperationalData(NODES_INSTANCE_ID_BA, nodesBuilder.build());
+ transaction.putOperationalData(TOP_INSTANCE_ID_BA, topBuilder.build());
return transaction.commit();
}
});
RpcResult<TransactionStatus> result = future.get().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Nodes nodes = checkForNodes();
- assertNotNull(nodes);
+ Top top = checkForTop();
+ assertNotNull(top);
}
- private Nodes checkForNodes() {
- return (Nodes) baDataService.readOperationalData(NODES_INSTANCE_ID_BA);
+ private Top checkForTop() {
+ return (Top) baDataService.readOperationalData(TOP_INSTANCE_ID_BA);
}
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SupportedActions;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SupportedActionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.supported.actions.ActionType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.supported.actions.ActionTypeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.SupportType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.CustomEnum;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugmentBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.Cont2;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.Cont2Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.cont2.Contlist1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.cont2.Contlist1Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
@SuppressWarnings("deprecation")
public class DOMCodecBug03Test extends AbstractDataServiceTest implements DataChangeListener {
- private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
- private static final String NODE_ID = "openflow:1";
+ private static final QName TOP_LEVEL_LIST_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
+ private static final String TOP_LEVEL_LIST_NAME = "tll:foo";
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TOP_LEVEL_LIST_NAME);
- private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
- NODE_ID);
+ private static final Map<QName, Object> TLL_KEY_BI = Collections.<QName, Object> singletonMap(TOP_LEVEL_LIST_NAME_QNAME,
+ TOP_LEVEL_LIST_NAME);
- private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
+ private static final InstanceIdentifier<Top> TOP_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
.toInstance();
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = NODES_INSTANCE_ID_BA.child(Node.class, NODE_KEY);
+ private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = TOP_INSTANCE_ID_BA.child(TopLevelList.class, TLL_KEY);
- private static final InstanceIdentifier<SupportedActions> SUPPORTED_ACTIONS_INSTANCE_ID_BA = //
- NODES_INSTANCE_ID_BA.builder() //
- .child(Node.class, NODE_KEY) //
- .augmentation(FlowCapableNode.class) //
- .child(SupportedActions.class)
+ private static final InstanceIdentifier<Cont2> CONT2_INSTANCE_ID_BA = //
+ TOP_INSTANCE_ID_BA.builder() //
+ .child(TopLevelList.class, TLL_KEY) //
+ .augmentation(TllComplexAugment.class) //
+ .child(Cont2.class)
.toInstance();
- private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier NODE_INSTANCE_ID_BI = //
+ private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier TLL_INSTANCE_ID_BI = //
org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .nodeWithKey(Node.QNAME, NODE_KEY_BI) //
+ .node(Top.QNAME) //
+ .nodeWithKey(TopLevelList.QNAME, TLL_KEY_BI) //
.toInstance();
- private static final QName SUPPORTED_ACTIONS_QNAME = QName.create(FlowCapableNode.QNAME, SupportedActions.QNAME.getLocalName());
+ private static final QName CONT2_QNAME = QName.create(TllComplexAugment.QNAME, Cont2.QNAME.getLocalName());
- private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier SUPPORTED_ACTIONS_INSTANCE_ID_BI = //
+ private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier CONT2_INSTANCE_ID_BI = //
org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .nodeWithKey(Node.QNAME, NODE_KEY_BI) //
- .node(SUPPORTED_ACTIONS_QNAME) //
+ .node(Top.QNAME) //
+ .nodeWithKey(TopLevelList.QNAME, TLL_KEY_BI) //
+ .node(CONT2_QNAME) //
.toInstance();
private final SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> receivedChangeEvent = SettableFuture.create();
public void testAugmentSerialization() throws Exception {
- baDataService.registerDataChangeListener(NODES_INSTANCE_ID_BA, this);
+ baDataService.registerDataChangeListener(TOP_INSTANCE_ID_BA, this);
- NodeBuilder nodeBuilder = new NodeBuilder();
- nodeBuilder.setId(new NodeId(NODE_ID));
- nodeBuilder.setKey(NODE_KEY);
+ TopLevelListBuilder tllBuilder = new TopLevelListBuilder();
+ tllBuilder.setKey(TLL_KEY);
DataModificationTransaction transaction = baDataService.beginTransaction();
- FlowCapableNodeBuilder fnub = new FlowCapableNodeBuilder();
- fnub.setHardware("Hardware Foo");
- fnub.setManufacturer("Manufacturer Foo");
- fnub.setSerialNumber("Serial Foo");
- fnub.setDescription("Description Foo");
- fnub.setSoftware("JUnit emulated");
- FlowCapableNode fnu = fnub.build();
- nodeBuilder.addAugmentation(FlowCapableNode.class, fnu);
- Node original = nodeBuilder.build();
- transaction.putOperationalData(NODE_INSTANCE_ID_BA, original);
+ TllComplexAugmentBuilder tllcab = new TllComplexAugmentBuilder();
+ tllcab.setAttrStr1("Hardware Foo");
+ tllcab.setAttrStr2("Manufacturer Foo");
+ tllcab.setAttrStr3("Serial Foo");
+ tllcab.setAttrStr4("Description Foo");
+ TllComplexAugment tlca = tllcab.build();
+ tllBuilder.addAugmentation(TllComplexAugment.class, tlca);
+ TopLevelList original = tllBuilder.build();
+ transaction.putOperationalData(TLL_INSTANCE_ID_BA, original);
RpcResult<TransactionStatus> result = transaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
DataChangeEvent<InstanceIdentifier<?>, DataObject> potential = receivedChangeEvent.get(1000,TimeUnit.MILLISECONDS);
assertNotNull(potential);
- verifyNodes((Nodes) potential.getUpdatedOperationalSubtree(),original);
- assertBindingIndependentVersion(NODE_INSTANCE_ID_BI);
- Nodes nodes = checkForNodes();
- verifyNodes(nodes,original);
+ verifyTll((Top) potential.getUpdatedOperationalSubtree(),original);
+ assertBindingIndependentVersion(TLL_INSTANCE_ID_BI);
+ Top top = checkForTop();
+ verifyTll(top,original);
testAddingNodeConnector();
- testNodeRemove();
+ testTllRemove();
}
public void testAugmentNestedSerialization() throws Exception {
DataModificationTransaction transaction = baDataService.beginTransaction();
- SupportedActionsBuilder actions = new SupportedActionsBuilder();
- ActionTypeBuilder action = new ActionTypeBuilder();
- action.setAction("foo-action");
- action.setSupportState(SupportType.Native);
- List<ActionType> actionTypes = Collections.singletonList(action.build());
- actions.setActionType(actionTypes );
+ Cont2Builder cont2b = new Cont2Builder();
+ Contlist1Builder cl1b = new Contlist1Builder();
+ cl1b.setAttrStr("foo-action");
+ cl1b.setAttrEnum(CustomEnum.Type1);
+ List<Contlist1> contlists = Collections.singletonList(cl1b.build());
+ cont2b.setContlist1(contlists);
- transaction.putOperationalData(SUPPORTED_ACTIONS_INSTANCE_ID_BA, actions.build());
+ transaction.putOperationalData(CONT2_INSTANCE_ID_BA, cont2b.build());
RpcResult<TransactionStatus> putResult = transaction.commit().get();
assertNotNull(putResult);
assertEquals(TransactionStatus.COMMITED, putResult.getResult());
- SupportedActions readedTable = (SupportedActions) baDataService.readOperationalData(SUPPORTED_ACTIONS_INSTANCE_ID_BA);
+ Cont2 readedTable = (Cont2) baDataService.readOperationalData(CONT2_INSTANCE_ID_BA);
assertNotNull(readedTable);
- CompositeNode biSupportedActions = biDataService.readOperationalData(SUPPORTED_ACTIONS_INSTANCE_ID_BI);
+ CompositeNode biSupportedActions = biDataService.readOperationalData(CONT2_INSTANCE_ID_BI);
assertNotNull(biSupportedActions);
}
private void testAddingNodeConnector() throws Exception {
-
- NodeConnectorId ncId = new NodeConnectorId("openflow:1:bar");
- NodeConnectorKey nodeKey = new NodeConnectorKey(ncId );
- InstanceIdentifier<NodeConnector> ncInstanceId = NODE_INSTANCE_ID_BA.child(NodeConnector.class, nodeKey);
- NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
- ncBuilder.setId(ncId);
- ncBuilder.setKey(nodeKey);
- NodeConnector connector = ncBuilder.build();
+ NestedListKey nlKey = new NestedListKey("test:0:0");
+ InstanceIdentifier<NestedList> ncInstanceId = TLL_INSTANCE_ID_BA.child(NestedList.class, nlKey);
+ NestedListBuilder nlBuilder = new NestedListBuilder();
+ nlBuilder.setKey(nlKey);
+ NestedList nestedList = nlBuilder.build();
DataModificationTransaction transaction = baDataService.beginTransaction();
- transaction.putOperationalData(ncInstanceId, connector);
+ transaction.putOperationalData(ncInstanceId, nestedList);
RpcResult<TransactionStatus> result = transaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Node node = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
- assertNotNull(node);
- assertNotNull(node.getNodeConnector());
- assertFalse(node.getNodeConnector().isEmpty());
- NodeConnector readedNc = node.getNodeConnector().get(0);
- assertNotNull(readedNc);
+ TopLevelList tll = (TopLevelList) baDataService.readOperationalData(TLL_INSTANCE_ID_BA);
+ assertNotNull(tll);
+ assertNotNull(tll.getNestedList());
+ assertFalse(tll.getNestedList().isEmpty());
+ NestedList readedNl = tll.getNestedList().get(0);
+ assertNotNull(readedNl);
}
- private void testNodeRemove() throws Exception {
+ private void testTllRemove() throws Exception {
DataModificationTransaction transaction = baDataService.beginTransaction();
- transaction.removeOperationalData(NODE_INSTANCE_ID_BA);
+ transaction.removeOperationalData(TLL_INSTANCE_ID_BA);
RpcResult<TransactionStatus> result = transaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Node node = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
- assertNull(node);
+ TopLevelList tll = (TopLevelList) baDataService.readOperationalData(TLL_INSTANCE_ID_BA);
+ assertNull(tll);
}
- private void verifyNodes(final Nodes nodes,final Node original) {
- assertNotNull(nodes);
- assertNotNull(nodes.getNode());
- assertEquals(1, nodes.getNode().size());
- Node readedNode = nodes.getNode().get(0);
- assertEquals(original.getId(), readedNode.getId());
+ private void verifyTll(final Top top,final TopLevelList original) {
+ assertNotNull(top);
+ assertNotNull(top.getTopLevelList());
+ assertEquals(1, top.getTopLevelList().size());
+ TopLevelList readedNode = top.getTopLevelList().get(0);
+ assertEquals(original.getName(), readedNode.getName());
assertEquals(original.getKey(), readedNode.getKey());
- FlowCapableNode fnu = original.getAugmentation(FlowCapableNode.class);
- FlowCapableNode readedAugment = readedNode.getAugmentation(FlowCapableNode.class);
+ TllComplexAugment fnu = original.getAugmentation(TllComplexAugment.class);
+ TllComplexAugment readedAugment = readedNode.getAugmentation(TllComplexAugment.class);
assertNotNull(fnu);
- assertEquals(fnu.getDescription(), readedAugment.getDescription());
- assertEquals(fnu.getSerialNumber(), readedAugment.getSerialNumber());
+ assertEquals(fnu.getAttrStr2(), readedAugment.getAttrStr2());
+ assertEquals(fnu.getAttrStr3(), readedAugment.getAttrStr3());
}
assertNotNull(node);
}
- private Nodes checkForNodes() {
- return (Nodes) baDataService.readOperationalData(NODES_INSTANCE_ID_BA);
+ private Top checkForTop() {
+ return (Top) baDataService.readOperationalData(TOP_INSTANCE_ID_BA);
}
@Override
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsDataBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.statistics.FlowStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.List11SimpleAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.List11SimpleAugmentBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
@SuppressWarnings("deprecation")
public class DeleteNestedAugmentationListenParentTest extends AbstractDataServiceTest {
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId("foo"));
+ private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
- private static final TableKey TABLE_KEY = new TableKey((short) 0);
+ private static final List1Key LIST1_KEY = new List1Key("one");
- private static final FlowKey FLOW_KEY = new FlowKey(new FlowId("100"));
+ private static final List11Key LIST11_KEY = new List11Key(100);
- private static final InstanceIdentifier<FlowCapableNode> NODE_AUGMENT_PATH = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class,NODE_KEY)
- .augmentation(FlowCapableNode.class)
+ private static final InstanceIdentifier<TllComplexAugment> TLL_COMPLEX_AUGMENT_PATH = InstanceIdentifier.builder(Top.class)
+ .child(TopLevelList.class,FOO_KEY)
+ .augmentation(TllComplexAugment.class)
.build();
- private static final InstanceIdentifier<Flow> FLOW_PATH = NODE_AUGMENT_PATH.builder()
- .child(Table.class,TABLE_KEY)
- .child(Flow.class,FLOW_KEY)
+ private static final InstanceIdentifier<List11> LIST11_PATH = TLL_COMPLEX_AUGMENT_PATH.builder()
+ .child(List1.class,LIST1_KEY)
+ .child(List11.class,LIST11_KEY)
.build();
public void deleteChildListenParent() throws InterruptedException, ExecutionException {
DataModificationTransaction initTx = baDataService.beginTransaction();
- initTx.putOperationalData(FLOW_PATH, flow());
+ initTx.putOperationalData(LIST11_PATH, createList11());
initTx.commit().get();
final SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> event = SettableFuture.create();
- baDataService.registerDataChangeListener(FLOW_PATH, new DataChangeListener() {
+ baDataService.registerDataChangeListener(LIST11_PATH, new DataChangeListener() {
@Override
public void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
});
DataModificationTransaction deleteTx = baDataService.beginTransaction();
- deleteTx.removeOperationalData(FLOW_PATH.augmentation(FlowStatisticsData.class));
+ deleteTx.removeOperationalData(LIST11_PATH.augmentation(List11SimpleAugment.class));
deleteTx.commit().get();
DataChangeEvent<InstanceIdentifier<?>, DataObject> receivedEvent = event.get();
- assertFalse(receivedEvent.getRemovedOperationalData().contains(NODE_AUGMENT_PATH));
+ assertFalse(receivedEvent.getRemovedOperationalData().contains(TLL_COMPLEX_AUGMENT_PATH));
}
- private Flow flow() {
- FlowBuilder builder = new FlowBuilder()
- .setKey(FLOW_KEY)
- .addAugmentation(FlowStatisticsData.class,new FlowStatisticsDataBuilder()
- .setFlowStatistics(new FlowStatisticsBuilder().build())
- .build())
- .setBarrier(true)
- .setMatch(new MatchBuilder()
- .build())
- ;
+ private List11 createList11() {
+ List11Builder builder = new List11Builder()
+ .setKey(LIST11_KEY)
+ .addAugmentation(List11SimpleAugment.class,new List11SimpleAugmentBuilder()
+ .setAttrStr2("bad").build())
+ .setAttrStr("good");
return builder.build();
}
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PopMplsActionCaseBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.pop.mpls.action._case.PopMplsActionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowModFlags;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.InstructionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActionsCaseBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.apply.actions._case.ApplyActionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.vlan.match.fields.VlanIdBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.BitFlags;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
@SuppressWarnings("deprecation")
public class FlagsSerializationTest extends AbstractDataServiceTest {
-
- private static final String FLOW_ID = "1234";
- private static final short TABLE_ID = (short)0;
- private static final String NODE_ID = "node:1";
-
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
- private static final FlowKey FLOW_KEY = new FlowKey(new FlowId(FLOW_ID));
- private static final TableKey TABLE_KEY = new TableKey(TABLE_ID);
-
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
- .child(Node.class, NODE_KEY).toInstance();
-
- private static final InstanceIdentifier<? extends DataObject> FLOW_INSTANCE_ID_BA = //
- NODE_INSTANCE_ID_BA.builder() //
- .augmentation(FlowCapableNode.class)
- .child(Table.class,TABLE_KEY)
- .child(Flow.class, FLOW_KEY) //
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey("foo");
+ private static final List11Key LIST11_KEY = new List11Key(1234);
+ private static final List1Key LIST1_KEY = new List1Key("1");
+
+ private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
+ .child(TopLevelList.class, TLL_KEY).toInstance();
+
+ private static final InstanceIdentifier<? extends DataObject> LIST11_INSTANCE_ID_BA = //
+ TLL_INSTANCE_ID_BA.builder() //
+ .augmentation(TllComplexAugment.class)
+ .child(List1.class,LIST1_KEY)
+ .child(List11.class, LIST11_KEY) //
.toInstance();
- private static final QName FLOW_FLAGS_QNAME = QName.create(Flow.QNAME, "flags");
+ private static final QName LIST11_FLAGS_QNAME = QName.create(List11.QNAME, "flags");
@Test
public void testIndirectGeneration() throws Exception {
- FlowModFlags checkOverlapFlags = new FlowModFlags(true,false,false,false,false);
- ImmutableSet<String> domCheckOverlapFlags = ImmutableSet.<String>of("CHECK_OVERLAP");
+ BitFlags checkOverlapFlags = new BitFlags(true,false,false,false,false);
+ ImmutableSet<String> domCheckOverlapFlags = ImmutableSet.<String>of("FLAG_FIVE");
testFlags(checkOverlapFlags,domCheckOverlapFlags);
- FlowModFlags allFalseFlags = new FlowModFlags(false,false,false,false,false);
+ BitFlags allFalseFlags = new BitFlags(false,false,false,false,false);
ImmutableSet<String> domAllFalseFlags = ImmutableSet.<String>of();
testFlags(allFalseFlags,domAllFalseFlags);
- FlowModFlags allTrueFlags = new FlowModFlags(true,true,true,true,true);
- ImmutableSet<String> domAllTrueFlags = ImmutableSet.<String>of("CHECK_OVERLAP","NO_BYT_COUNTS", "NO_PKT_COUNTS", "RESET_COUNTS", "SEND_FLOW_REM");
+ BitFlags allTrueFlags = new BitFlags(true,true,true,true,true);
+ ImmutableSet<String> domAllTrueFlags = ImmutableSet.<String>of("FLAG_ONE","FLAG_TWO","FLAG_THREE","FLAG_FOUR","FLAG_FIVE");
testFlags(allTrueFlags,domAllTrueFlags);
testFlags(null,null);
}
- private void testFlags(final FlowModFlags flagsToTest, final ImmutableSet<String> domFlags) throws Exception {
- Flow flow = createFlow(flagsToTest);
- assertNotNull(flow);
+ private void testFlags(final BitFlags flagsToTest, final ImmutableSet<String> domFlags) throws Exception {
+ List11 list11 = createList11(flagsToTest);
+ assertNotNull(list11);
- CompositeNode domFlow = biDataService.readConfigurationData(mappingService.toDataDom(FLOW_INSTANCE_ID_BA));
+ CompositeNode domList11 = biDataService.readConfigurationData(mappingService.toDataDom(LIST11_INSTANCE_ID_BA));
- assertNotNull(domFlow);
- org.opendaylight.yangtools.yang.data.api.Node<?> readedFlags = domFlow.getFirstSimpleByName(FLOW_FLAGS_QNAME);
+ assertNotNull(domList11);
+ org.opendaylight.yangtools.yang.data.api.Node<?> readedFlags = domList11.getFirstSimpleByName(LIST11_FLAGS_QNAME);
if(domFlags != null) {
assertNotNull(readedFlags);
} else {
assertNull(readedFlags);
}
- assertEquals(flagsToTest, flow.getFlags());
+ assertEquals(flagsToTest, list11.getFlags());
DataModificationTransaction transaction = baDataService.beginTransaction();
- transaction.removeConfigurationData(FLOW_INSTANCE_ID_BA);
+ transaction.removeConfigurationData(LIST11_INSTANCE_ID_BA);
RpcResult<TransactionStatus> result = transaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
}
- private Flow createFlow(final FlowModFlags flagsToTest) throws Exception {
+ private List11 createList11(final BitFlags flagsToTest) throws Exception {
DataModificationTransaction modification = baDataService.beginTransaction();
- FlowBuilder flow = new FlowBuilder();
- MatchBuilder match = new MatchBuilder();
- VlanMatchBuilder vlanBuilder = new VlanMatchBuilder();
- VlanIdBuilder vlanIdBuilder = new VlanIdBuilder();
- VlanId vlanId = new VlanId(10);
- vlanBuilder.setVlanId(vlanIdBuilder.setVlanId(vlanId).build());
- match.setVlanMatch(vlanBuilder.build());
-
- flow.setKey(FLOW_KEY);
- flow.setMatch(match.build());
-
- flow.setFlags(flagsToTest);
-
- InstructionsBuilder instructions = new InstructionsBuilder();
- InstructionBuilder instruction = new InstructionBuilder();
-
- instruction.setOrder(10);
- ApplyActionsBuilder applyActions = new ApplyActionsBuilder();
- List<Action> actionList = new ArrayList<>();
- PopMplsActionBuilder popMplsAction = new PopMplsActionBuilder();
- popMplsAction.setEthernetType(34);
- actionList.add(new ActionBuilder().setAction(new PopMplsActionCaseBuilder().setPopMplsAction(popMplsAction.build()).build()).setOrder(10).build());
-
- applyActions.setAction(actionList );
-
- instruction.setInstruction(new ApplyActionsCaseBuilder().setApplyActions(applyActions.build()).build());
+ List11Builder list11b = new List11Builder();
+ list11b.setKey(LIST11_KEY);
+ list11b.setAttrStr("list:1:1");
- List<Instruction> instructionList = Collections.<Instruction>singletonList(instruction.build());
- instructions.setInstruction(instructionList );
+ list11b.setFlags(flagsToTest);
- flow.setInstructions(instructions.build());
- modification.putConfigurationData(FLOW_INSTANCE_ID_BA, flow.build());
+ modification.putConfigurationData(LIST11_INSTANCE_ID_BA, list11b.build());
RpcResult<TransactionStatus> ret = modification.commit().get();
assertNotNull(ret);
assertEquals(TransactionStatus.COMMITED, ret.getResult());
- return (Flow) baDataService.readConfigurationData(FLOW_INSTANCE_ID_BA);
+ return (List11) baDataService.readConfigurationData(LIST11_INSTANCE_ID_BA);
}
}
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.NestedListSimpleAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.NestedListSimpleAugmentBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugmentBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
@SuppressWarnings("deprecation")
public class PutAugmentationTest extends AbstractDataServiceTest implements DataChangeListener {
- private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
- private static final String NODE_ID = "openflow:1";
+ private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
+ private static final String TLL_NAME = "foo";
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
- private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
- NODE_ID);
+ private static final Map<QName, Object> TLL_KEY_BI = Collections.<QName, Object> singletonMap(TLL_NAME_QNAME,
+ TLL_NAME);
- private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
+ private static final InstanceIdentifier<Top> TOP_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
.toInstance();
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = //
- NODES_INSTANCE_ID_BA.builder() //
- .child(Node.class, NODE_KEY).toInstance();
+ private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = //
+ TOP_INSTANCE_ID_BA.builder() //
+ .child(TopLevelList.class, TLL_KEY).toInstance();
- private static final InstanceIdentifier<FlowCapableNode> ALL_FLOW_CAPABLE_NODES = //
- NODES_INSTANCE_ID_BA.builder() //
- .child(Node.class) //
- .augmentation(FlowCapableNode.class) //
+ private static final InstanceIdentifier<TllComplexAugment> ALL_TCA = //
+ TOP_INSTANCE_ID_BA.builder() //
+ .child(TopLevelList.class) //
+ .augmentation(TllComplexAugment.class) //
.build();
- private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier NODE_INSTANCE_ID_BI = //
+ private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier TLL_INSTANCE_ID_BI = //
org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .nodeWithKey(Node.QNAME, NODE_KEY_BI) //
+ .node(Top.QNAME) //
+ .nodeWithKey(TopLevelList.QNAME, TLL_KEY_BI) //
.toInstance();
- private static final InstanceIdentifier<FlowCapableNode> FLOW_AUGMENTATION_PATH =
- NODE_INSTANCE_ID_BA.builder() //
- .augmentation(FlowCapableNode.class) //
+ private static final InstanceIdentifier<TllComplexAugment> TCA_AUGMENTATION_PATH =
+ TLL_INSTANCE_ID_BA.builder() //
+ .augmentation(TllComplexAugment.class) //
.build();
private SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> lastReceivedChangeEvent;
@Ignore
public void putNodeAndAugmentation() throws Exception {
lastReceivedChangeEvent = SettableFuture.create();
- baDataService.registerDataChangeListener(ALL_FLOW_CAPABLE_NODES, this);
+ baDataService.registerDataChangeListener(ALL_TCA, this);
- NodeBuilder nodeBuilder = new NodeBuilder();
- nodeBuilder.setId(new NodeId(NODE_ID));
- nodeBuilder.setKey(NODE_KEY);
+ TopLevelListBuilder nodeBuilder = new TopLevelListBuilder();
+ nodeBuilder.setKey(TLL_KEY);
DataModificationTransaction baseTransaction = baDataService.beginTransaction();
- baseTransaction.putOperationalData(NODE_INSTANCE_ID_BA, nodeBuilder.build());
+ baseTransaction.putOperationalData(TLL_INSTANCE_ID_BA, nodeBuilder.build());
RpcResult<TransactionStatus> result = baseTransaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Node node = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
- assertNotNull(node);
- assertEquals(NODE_KEY, node.getKey());
-
- FlowCapableNodeBuilder fnub = new FlowCapableNodeBuilder();
- fnub.setHardware("Hardware Foo");
- fnub.setManufacturer("Manufacturer Foo");
- fnub.setSerialNumber("Serial Foo");
- fnub.setDescription("Description Foo");
- fnub.setSoftware("JUnit emulated");
- FlowCapableNode fnu = fnub.build();
- InstanceIdentifier<FlowCapableNode> augmentIdentifier = NODE_INSTANCE_ID_BA
- .augmentation(FlowCapableNode.class);
+ TopLevelList tll = (TopLevelList) baDataService.readOperationalData(TLL_INSTANCE_ID_BA);
+ assertNotNull(tll);
+ assertEquals(TLL_KEY, tll.getKey());
+
+ TllComplexAugmentBuilder tcab = new TllComplexAugmentBuilder();
+ tcab.setAttrStr1("FooFoo");
+ tcab.setAttrStr2("BarBar");
+ TllComplexAugment tca = tcab.build();
+ InstanceIdentifier<TreeComplexUsesAugment> augmentIdentifier = TLL_INSTANCE_ID_BA
+ .augmentation(TreeComplexUsesAugment.class);
DataModificationTransaction augmentedTransaction = baDataService.beginTransaction();
- augmentedTransaction.putOperationalData(augmentIdentifier, fnu);
+ augmentedTransaction.putOperationalData(augmentIdentifier, tca);
lastReceivedChangeEvent = SettableFuture.create();
DataChangeEvent<InstanceIdentifier<?>, DataObject> potential = lastReceivedChangeEvent.get(1000,TimeUnit.MILLISECONDS);
assertNotNull(potential);
- assertTrue(potential.getCreatedOperationalData().containsKey(FLOW_AUGMENTATION_PATH));
+ assertTrue(potential.getCreatedOperationalData().containsKey(TCA_AUGMENTATION_PATH));
lastReceivedChangeEvent = SettableFuture.create();
- Node augmentedNode = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
- assertNotNull(node);
- assertEquals(NODE_KEY, augmentedNode.getKey());
+ TopLevelList augmentedTll = (TopLevelList) baDataService.readOperationalData(TLL_INSTANCE_ID_BA);
+ assertNotNull(tll);
+ assertEquals(TLL_KEY, augmentedTll.getKey());
System.out.println("Before assertion");
- assertNotNull(augmentedNode.getAugmentation(FlowCapableNode.class));
- FlowCapableNode readedAugmentation = augmentedNode.getAugmentation(FlowCapableNode.class);
- assertEquals(fnu.getDescription(), readedAugmentation.getDescription());
- assertBindingIndependentVersion(NODE_INSTANCE_ID_BI);
- testNodeRemove();
- assertTrue(lastReceivedChangeEvent.get(1000,TimeUnit.MILLISECONDS).getRemovedOperationalData().contains(FLOW_AUGMENTATION_PATH));
+ assertNotNull(augmentedTll.getAugmentation(TllComplexAugment.class));
+ TllComplexAugment readedAugmentation = augmentedTll.getAugmentation(TllComplexAugment.class);
+ assertEquals(tca.getAttrStr2(), readedAugmentation.getAttrStr2());
+ assertBindingIndependentVersion(TLL_INSTANCE_ID_BI);
+ testTllRemove();
+ assertTrue(lastReceivedChangeEvent.get(1000,TimeUnit.MILLISECONDS).getRemovedOperationalData().contains(TCA_AUGMENTATION_PATH));
}
@Test
@Ignore
public void putNodeWithAugmentation() throws Exception {
lastReceivedChangeEvent = SettableFuture.create();
- baDataService.registerDataChangeListener(ALL_FLOW_CAPABLE_NODES, this);
-
- NodeBuilder nodeBuilder = new NodeBuilder();
- nodeBuilder.setId(new NodeId(NODE_ID));
- nodeBuilder.setKey(NODE_KEY);
- FlowCapableNodeBuilder fnub = new FlowCapableNodeBuilder();
- fnub.setHardware("Hardware Foo");
- fnub.setManufacturer("Manufacturer Foo");
- fnub.setSerialNumber("Serial Foo");
- fnub.setDescription("Description Foo");
- fnub.setSoftware("JUnit emulated");
- FlowCapableNode fnu = fnub.build();
-
- nodeBuilder.addAugmentation(FlowCapableNode.class, fnu);
+ baDataService.registerDataChangeListener(ALL_TCA, this);
+
+ TopLevelListBuilder nodeBuilder = new TopLevelListBuilder();
+ nodeBuilder.setKey(TLL_KEY);
+ TllComplexAugmentBuilder tcab = new TllComplexAugmentBuilder();
+ tcab.setAttrStr1("FooFoo");
+ tcab.setAttrStr2("BarBar");
+ TllComplexAugment tca = tcab.build();
+
+ nodeBuilder.addAugmentation(TreeComplexUsesAugment.class, tca);
DataModificationTransaction baseTransaction = baDataService.beginTransaction();
- baseTransaction.putOperationalData(NODE_INSTANCE_ID_BA, nodeBuilder.build());
+ baseTransaction.putOperationalData(TLL_INSTANCE_ID_BA, nodeBuilder.build());
RpcResult<TransactionStatus> result = baseTransaction.commit().get();
DataChangeEvent<InstanceIdentifier<?>, DataObject> potential = lastReceivedChangeEvent.get(1000,TimeUnit.MILLISECONDS);
assertNotNull(potential);
- assertTrue(potential.getCreatedOperationalData().containsKey(FLOW_AUGMENTATION_PATH));
+ assertTrue(potential.getCreatedOperationalData().containsKey(TCA_AUGMENTATION_PATH));
lastReceivedChangeEvent = SettableFuture.create();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- FlowCapableNode readedAugmentation = (FlowCapableNode) baDataService.readOperationalData(
- NODE_INSTANCE_ID_BA.augmentation(FlowCapableNode.class));
+ TllComplexAugment readedAugmentation = (TllComplexAugment) baDataService.readOperationalData(
+ TLL_INSTANCE_ID_BA.augmentation(TllComplexAugment.class));
assertNotNull(readedAugmentation);
- assertEquals(fnu.getHardware(), readedAugmentation.getHardware());
+ assertEquals(tca.getAttrStr1(), readedAugmentation.getAttrStr1());
testPutNodeConnectorWithAugmentation();
lastReceivedChangeEvent = SettableFuture.create();
- testNodeRemove();
+ testTllRemove();
- assertTrue(lastReceivedChangeEvent.get(1000,TimeUnit.MILLISECONDS).getRemovedOperationalData().contains(FLOW_AUGMENTATION_PATH));
+ assertTrue(lastReceivedChangeEvent.get(1000,TimeUnit.MILLISECONDS).getRemovedOperationalData().contains(TCA_AUGMENTATION_PATH));
}
private void testPutNodeConnectorWithAugmentation() throws Exception {
- NodeConnectorKey ncKey = new NodeConnectorKey(new NodeConnectorId("test:0:0"));
- InstanceIdentifier<NodeConnector> ncPath = NODE_INSTANCE_ID_BA
- .child(NodeConnector.class, ncKey);
- InstanceIdentifier<FlowCapableNodeConnector> ncAugmentPath = ncPath
- .augmentation(FlowCapableNodeConnector.class);
+ NestedListKey ncKey = new NestedListKey("test:0:0");
+ InstanceIdentifier<NestedList> ncPath = TLL_INSTANCE_ID_BA
+ .child(NestedList.class, ncKey);
+ InstanceIdentifier<NestedListSimpleAugment> ncAugmentPath = ncPath
+ .augmentation(NestedListSimpleAugment.class);
- NodeConnectorBuilder nc = new NodeConnectorBuilder();
+ NestedListBuilder nc = new NestedListBuilder();
nc.setKey(ncKey);
- FlowCapableNodeConnectorBuilder fncb = new FlowCapableNodeConnectorBuilder();
- fncb.setName("Baz");
- nc.addAugmentation(FlowCapableNodeConnector.class, fncb.build());
+ NestedListSimpleAugmentBuilder fncb = new NestedListSimpleAugmentBuilder();
+ fncb.setType("Baz");
+ nc.addAugmentation(NestedListSimpleAugment.class, fncb.build());
DataModificationTransaction baseTransaction = baDataService.beginTransaction();
baseTransaction.putOperationalData(ncPath, nc.build());
RpcResult<TransactionStatus> result = baseTransaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- FlowCapableNodeConnector readedAugmentation = (FlowCapableNodeConnector) baDataService
+ NestedListSimpleAugment readedAugmentation = (NestedListSimpleAugment) baDataService
.readOperationalData(ncAugmentPath);
assertNotNull(readedAugmentation);
- assertEquals(fncb.getName(), readedAugmentation.getName());
+ assertEquals(fncb.getType(), readedAugmentation.getType());
}
- private void testNodeRemove() throws Exception {
+ private void testTllRemove() throws Exception {
DataModificationTransaction transaction = baDataService.beginTransaction();
- transaction.removeOperationalData(NODE_INSTANCE_ID_BA);
+ transaction.removeOperationalData(TLL_INSTANCE_ID_BA);
RpcResult<TransactionStatus> result = transaction.commit().get();
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Node node = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
- assertNull(node);
+ TopLevelList tll = (TopLevelList) baDataService.readOperationalData(TLL_INSTANCE_ID_BA);
+ assertNull(tll);
}
- private void assertBindingIndependentVersion(final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier nodeId) {
- CompositeNode node = biDataService.readOperationalData(nodeId);
- assertNotNull(node);
+ private void assertBindingIndependentVersion(final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier tllId) {
+ CompositeNode tll = biDataService.readOperationalData(tllId);
+ assertNotNull(tll);
}
@Override
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUsesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
@SuppressWarnings("deprecation")
public class WriteParentListenAugmentTest extends AbstractDataServiceTest {
- private static final String NODE_ID = "node:1";
+ private static final String TLL_NAME = "foo";
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
- .child(Node.class, NODE_KEY).toInstance();
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
+ private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
+ .child(TopLevelList.class, TLL_KEY).toInstance();
- private static final InstanceIdentifier<FlowCapableNode> AUGMENT_WILDCARDED_PATH = InstanceIdentifier
- .builder(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class).toInstance();
+ private static final InstanceIdentifier<TreeComplexUsesAugment> AUGMENT_WILDCARDED_PATH = InstanceIdentifier
+ .builder(Top.class).child(TopLevelList.class).augmentation(TreeComplexUsesAugment.class).toInstance();
- private static final InstanceIdentifier<FlowCapableNode> AUGMENT_NODE_PATH = InstanceIdentifier
- .builder(Nodes.class).child(Node.class, NODE_KEY).augmentation(FlowCapableNode.class).toInstance();
+ private static final InstanceIdentifier<TreeComplexUsesAugment> AUGMENT_TLL_PATH = InstanceIdentifier
+ .builder(Top.class).child(TopLevelList.class, TLL_KEY).augmentation(TreeComplexUsesAugment.class).toInstance();
@Test
public void writeNodeListenAugment() throws Exception {
DataModificationTransaction modification = baDataService.beginTransaction();
- Node node = new NodeBuilder() //
- .setKey(NODE_KEY) //
- .addAugmentation(FlowCapableNode.class, flowCapableNode("one")).build();
- modification.putOperationalData(NODE_INSTANCE_ID_BA, node);
+ TopLevelList tll = new TopLevelListBuilder() //
+ .setKey(TLL_KEY) //
+ .addAugmentation(TreeComplexUsesAugment.class, treeComplexUsesAugment("one")).build();
+ modification.putOperationalData(TLL_INSTANCE_ID_BA, tll);
modification.commit().get();
DataChangeEvent<InstanceIdentifier<?>, DataObject> receivedEvent = event.get(1000, TimeUnit.MILLISECONDS);
- assertTrue(receivedEvent.getCreatedOperationalData().containsKey(AUGMENT_NODE_PATH));
+ assertTrue(receivedEvent.getCreatedOperationalData().containsKey(AUGMENT_TLL_PATH));
dclRegistration.close();
DataModificationTransaction mod2 = baDataService.beginTransaction();
- mod2.putOperationalData(AUGMENT_NODE_PATH, flowCapableNode("two"));
+ mod2.putOperationalData(AUGMENT_TLL_PATH, treeComplexUsesAugment("two"));
mod2.commit().get();
- FlowCapableNode readedAug = (FlowCapableNode) baDataService.readOperationalData(AUGMENT_NODE_PATH);
- assertEquals("two", readedAug.getDescription());
+ TreeComplexUsesAugment readedAug = (TreeComplexUsesAugment) baDataService.readOperationalData(AUGMENT_TLL_PATH);
+ assertEquals("two", readedAug.getContainerWithUses().getLeafFromGrouping());
}
- private FlowCapableNode flowCapableNode(final String description) {
- return new FlowCapableNodeBuilder() //
- .setDescription(description) //
+ private TreeComplexUsesAugment treeComplexUsesAugment(final String value) {
+ return new TreeComplexUsesAugmentBuilder() //
+ .setContainerWithUses(new ContainerWithUsesBuilder().setLeafFromGrouping(value).build()) //
.build();
}
}
\ No newline at end of file
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.InstructionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.vlan.match.fields.VlanIdBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
@SuppressWarnings("deprecation")
public class WriteParentReadChildTest extends AbstractDataServiceTest {
- private static final String FLOW_ID = "1234";
- private static final short TABLE_ID = (short) 0;
- private static final String NODE_ID = "node:1";
+ private static final int LIST11_ID = 1234;
+ private static final String LIST1_NAME = "bar";
+ private static final String TLL_NAME = "foo";
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
- private static final FlowKey FLOW_KEY = new FlowKey(new FlowId(FLOW_ID));
- private static final TableKey TABLE_KEY = new TableKey(TABLE_ID);
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
+ private static final List11Key LIST11_KEY = new List11Key(LIST11_ID);
+ private static final List1Key LIST1_KEY = new List1Key(LIST1_NAME);
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
- .child(Node.class, NODE_KEY).toInstance();
+ private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
+ .child(TopLevelList.class, TLL_KEY).toInstance();
- private static final InstanceIdentifier<Table> TABLE_INSTANCE_ID_BA = //
- NODE_INSTANCE_ID_BA.builder() //
- .augmentation(FlowCapableNode.class).child(Table.class, TABLE_KEY).build();
+ private static final InstanceIdentifier<List1> LIST1_INSTANCE_ID_BA = //
+ TLL_INSTANCE_ID_BA.builder() //
+ .augmentation(TllComplexAugment.class).child(List1.class, LIST1_KEY).build();
- private static final InstanceIdentifier<? extends DataObject> FLOW_INSTANCE_ID_BA = //
- TABLE_INSTANCE_ID_BA.child(Flow.class, FLOW_KEY);
+ private static final InstanceIdentifier<? extends DataObject> LIST11_INSTANCE_ID_BA = //
+ LIST1_INSTANCE_ID_BA.child(List11.class, LIST11_KEY);
/**
*
* The scenario tests writing parent node, which also contains child items
* @throws Exception
*/
@Test
- public void writeTableReadFlow() throws Exception {
+ public void writeParentReadChild() throws Exception {
DataModificationTransaction modification = baDataService.beginTransaction();
- Flow flow = new FlowBuilder() //
- .setKey(FLOW_KEY) //
- .setMatch(new MatchBuilder() //
- .setVlanMatch(new VlanMatchBuilder() //
- .setVlanId(new VlanIdBuilder() //
- .setVlanId(new VlanId(10)) //
- .build()) //
- .build()) //
- .build()) //
- .setInstructions(new InstructionsBuilder() //
- .setInstruction(ImmutableList.<Instruction>builder() //
- .build()) //
- .build()) //
+ List11 list11 = new List11Builder() //
+ .setKey(LIST11_KEY) //
+ .setAttrStr("primary")
.build();
- Table table = new TableBuilder()
- .setKey(TABLE_KEY)
- .setFlow(ImmutableList.of(flow))
+ List1 list1 = new List1Builder()
+ .setKey(LIST1_KEY)
+ .setList11(ImmutableList.of(list11))
.build();
- modification.putConfigurationData(TABLE_INSTANCE_ID_BA, table);
+ modification.putConfigurationData(LIST1_INSTANCE_ID_BA, list1);
RpcResult<TransactionStatus> ret = modification.commit().get();
assertNotNull(ret);
assertEquals(TransactionStatus.COMMITED, ret.getResult());
- DataObject readedTable = baDataService.readConfigurationData(TABLE_INSTANCE_ID_BA);
- assertNotNull("Readed table should not be nul.", readedTable);
- assertTrue(readedTable instanceof Table);
+ DataObject readList1 = baDataService.readConfigurationData(LIST1_INSTANCE_ID_BA);
+ assertNotNull("Readed table should not be nul.", readList1);
+ assertTrue(readList1 instanceof List1);
- DataObject readedFlow = baDataService.readConfigurationData(FLOW_INSTANCE_ID_BA);
- assertNotNull("Readed flow should not be null.",readedFlow);
- assertTrue(readedFlow instanceof Flow);
- assertEquals(flow, readedFlow);
+ DataObject readList11 = baDataService.readConfigurationData(LIST11_INSTANCE_ID_BA);
+ assertNotNull("Readed flow should not be null.",readList11);
+ assertTrue(readList11 instanceof List11);
+ assertEquals(list11, readList11);
}
}
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-// FIXME: Migrate to use new Data Broker APIs
+/**
+ * FIXME: Migrate to use new Data Broker APIs
+ */
@SuppressWarnings("deprecation")
public class BrokerIntegrationTest extends AbstractDataServiceTest {
+ private static final TopLevelListKey TLL_FOO_KEY = new TopLevelListKey("foo");
+ private static final TopLevelListKey TLL_BAR_KEY = new TopLevelListKey("bar");
+ private static final TopLevelListKey TLL_BAZ_KEY = new TopLevelListKey("baz");
+ private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
+ private static final InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, TLL_FOO_KEY);
+ private static final InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, TLL_BAR_KEY);
+ private static final InstanceIdentifier<TopLevelList> BAZ_PATH = TOP_PATH.child(TopLevelList.class, TLL_BAZ_KEY);
+
@Test
public void simpleModifyOperation() throws Exception {
- NodeRef node1 = createNodeRef("0");
- DataObject node = baDataService.readConfigurationData(node1.getValue());
- assertNull(node);
- Node nodeData1 = createNode("0");
+ DataObject tllFoo = baDataService.readConfigurationData(FOO_PATH);
+ assertNull(tllFoo);
+ TopLevelList tllFooData = createTll(TLL_FOO_KEY);
DataModificationTransaction transaction = baDataService.beginTransaction();
- transaction.putConfigurationData(node1.getValue(), nodeData1);
+ transaction.putConfigurationData(FOO_PATH, tllFooData);
Future<RpcResult<TransactionStatus>> commitResult = transaction.commit();
assertNotNull(commitResult);
assertNotNull(result.getResult());
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Node readedData = (Node) baDataService.readConfigurationData(node1.getValue());
+ TopLevelList readedData = (TopLevelList) baDataService.readConfigurationData(FOO_PATH);
assertNotNull(readedData);
- assertEquals(nodeData1.getKey(), readedData.getKey());
+ assertEquals(tllFooData.getKey(), readedData.getKey());
- NodeRef nodeFoo = createNodeRef("foo");
- NodeRef nodeBar = createNodeRef("bar");
- Node nodeFooData = createNode("foo");
- Node nodeBarData = createNode("bar");
+ TopLevelList nodeBarData = createTll(TLL_BAR_KEY);
+ TopLevelList nodeBazData = createTll(TLL_BAZ_KEY);
DataModificationTransaction insertMoreTr = baDataService.beginTransaction();
- insertMoreTr.putConfigurationData(nodeFoo.getValue(), nodeFooData);
- insertMoreTr.putConfigurationData(nodeBar.getValue(), nodeBarData);
+ insertMoreTr.putConfigurationData(BAR_PATH, nodeBarData);
+ insertMoreTr.putConfigurationData(BAZ_PATH, nodeBazData);
RpcResult<TransactionStatus> result2 = insertMoreTr.commit().get();
assertNotNull(result2);
assertNotNull(result2.getResult());
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Nodes allNodes = (Nodes) baDataService.readConfigurationData(InstanceIdentifier.builder(Nodes.class)
- .toInstance());
- assertNotNull(allNodes);
- assertNotNull(allNodes.getNode());
- assertEquals(3, allNodes.getNode().size());
+ Top top = (Top) baDataService.readConfigurationData(TOP_PATH);
+ assertNotNull(top);
+ assertNotNull(top.getTopLevelList());
+ assertEquals(3, top.getTopLevelList().size());
/**
* We create transaction no 2
* We remove node 1
*
*/
- removalTransaction.removeConfigurationData(node1.getValue());
+ removalTransaction.removeConfigurationData(BAR_PATH);
/**
* We commit transaction
assertNotNull(result3.getResult());
assertEquals(TransactionStatus.COMMITED, result2.getResult());
- DataObject readedData2 = baDataService.readConfigurationData(node1.getValue());
+ DataObject readedData2 = baDataService.readConfigurationData(BAR_PATH);
assertNull(readedData2);
}
- private static NodeRef createNodeRef(final String string) {
- NodeKey key = new NodeKey(new NodeId(string));
- InstanceIdentifier<Node> path = InstanceIdentifier.builder(Nodes.class).child(Node.class, key)
- .toInstance();
- return new NodeRef(path);
- }
-
- private static Node createNode(final String string) {
- NodeBuilder ret = new NodeBuilder();
- ret.setId(new NodeId(string));
- ret.setKey(new NodeKey(ret.getId()));
+ private static TopLevelList createTll(final TopLevelListKey key) {
+ TopLevelListBuilder ret = new TopLevelListBuilder();
+ ret.setKey(key);
return ret.build();
}
}
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import java.util.ArrayList;
import java.util.Collections;
-import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
import org.opendaylight.controller.sal.core.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpVersion;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.DecNwTtlCaseBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.dec.nw.ttl._case.DecNwTtl;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.dec.nw.ttl._case.DecNwTtlBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.InstructionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActionsCaseBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.apply.actions._case.ApplyActionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List12Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List12Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.SettableFuture;
-// FIXME: Migrate to use new Data Broker APIs
+/**
+ * FIXME: Migrate to use new Data Broker APIs
+ */
@SuppressWarnings("deprecation")
public class ChangeOriginatedInDomBrokerTest extends AbstractDataServiceTest {
- private static final Logger LOG = LoggerFactory.getLogger(ChangeOriginatedInDomBrokerTest.class);
-
- private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
- private static final QName FLOW_ID_QNAME = QName.create(Flow.QNAME, "id");
- private static final QName TABLE_ID_QNAME = QName.create(Table.QNAME, "id");
+ protected static final Logger LOG = LoggerFactory.getLogger(ChangeOriginatedInDomBrokerTest.class);
- private static final String NODE_ID = "node:1";
- private static final FlowId FLOW_ID = new FlowId("1234");
- private static final Short TABLE_ID = Short.valueOf((short) 0);
+ private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
+ private static final QName LIST1_ATTR_STR_QNAME = QName.create(List1.QNAME, "attr-str");
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
- private static final FlowKey FLOW_KEY = new FlowKey(FLOW_ID);
+ private static final String TLL_NAME = "1";
+ private static final int LIST11_ATTR_INT = 1234;
+ private static final String LIST1_ATTR_STR = "foo:foo";
- private final SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> modificationCapture = SettableFuture.create();
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
+ private static final List1Key LIST1_KEY = new List1Key(LIST1_ATTR_STR);
+ private static final List11Key LIST11_KEY = new List11Key(LIST11_ATTR_INT);
- private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
- NODE_ID);
+ protected final SettableFuture<DataChangeEvent<InstanceIdentifier<?>, DataObject>> modificationCapture = SettableFuture.create();
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
- .child(Node.class, NODE_KEY).toInstance();
+ private static final Map<QName, Object> TLL_KEY_BI = Collections.<QName, Object> singletonMap(TLL_NAME_QNAME,
+ TLL_NAME);
- private static final Map<QName, Object> FLOW_KEY_BI = //
- ImmutableMap.<QName, Object> of(FLOW_ID_QNAME, FLOW_ID.getValue());
+ private static final InstanceIdentifier<TopLevelList> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
+ .child(TopLevelList.class, TLL_KEY).toInstance();
- private static final Map<QName, Object> TABLE_KEY_BI = //
- ImmutableMap.<QName, Object> of(TABLE_ID_QNAME, TABLE_ID);;
+ private static final Map<QName, Object> LIST1_KEY_BI = //
+ ImmutableMap.<QName, Object> of(LIST1_ATTR_STR_QNAME, LIST1_ATTR_STR);;
- private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier FLOW_INSTANCE_ID_BI = //
+ private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier LIST1_INSTANCE_ID_BI = //
org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .nodeWithKey(Node.QNAME, NODE_KEY_BI) //
- .nodeWithKey(Table.QNAME, TABLE_KEY_BI) //
- .nodeWithKey(Flow.QNAME, FLOW_KEY_BI) //
+ .node(Top.QNAME) //
+ .nodeWithKey(TopLevelList.QNAME, TLL_KEY_BI) //
+ .nodeWithKey(List1.QNAME, LIST1_KEY_BI) //
.toInstance();
- private static final TableKey TABLE_KEY_BA = new TableKey((short) 0);
- private static final InstanceIdentifier<Flow> FLOWS_PATH_BA = //
+ private static final InstanceIdentifier<List1> LIST1_PATH_BA = //
NODE_INSTANCE_ID_BA.builder() //
- .augmentation(FlowCapableNode.class) //
- .child(Table.class, TABLE_KEY_BA) //
- .child(Flow.class) //
+ .augmentation(TllComplexAugment.class) //
+ .child(List1.class, LIST1_KEY) //
.toInstance();
- private static final InstanceIdentifier<Flow> FLOW_INSTANCE_ID_BA = //
- FLOWS_PATH_BA.firstIdentifierOf(Table.class).child(Flow.class, FLOW_KEY);
-
@Test
public void simpleModifyOperation() throws Exception {
- assertNull(biDataService.readConfigurationData(FLOW_INSTANCE_ID_BI));
+ assertNull(biDataService.readConfigurationData(LIST1_INSTANCE_ID_BI));
registerChangeListener();
- CompositeNode domflow = createTestFlow();
+ CompositeNode domflow = createTestList1();
DataModificationTransaction biTransaction = biDataService.beginTransaction();
- biTransaction.putConfigurationData(FLOW_INSTANCE_ID_BI, domflow);
+ biTransaction.putConfigurationData(LIST1_INSTANCE_ID_BI, domflow);
RpcResult<TransactionStatus> biResult = biTransaction.commit().get();
assertEquals(TransactionStatus.COMMITED, biResult.getResult());
DataChangeEvent<InstanceIdentifier<?>, DataObject> event = modificationCapture.get(1000,TimeUnit.MILLISECONDS);
assertNotNull(event);
LOG.info("Created Configuration :{}",event.getCreatedConfigurationData());
- Flow flow = (Flow) event.getCreatedConfigurationData().get(FLOW_INSTANCE_ID_BA);
- assertNotNull(flow);
- assertNotNull(flow.getMatch());
+ List1 list1 = (List1) event.getCreatedConfigurationData().get(LIST1_PATH_BA);
+ assertNotNull(list1);
+ assertNotNull(list1.getAttrStr());
+ assertNotNull(list1.getList11());
+ assertNotNull(list1.getList12());
assertEquals(TransactionStatus.COMMITED, biResult.getResult());
}
private void registerChangeListener() {
- baDataService.registerDataChangeListener(FLOWS_PATH_BA, new DataChangeListener() {
+ baDataService.registerDataChangeListener(LIST1_PATH_BA, new DataChangeListener() {
@Override
public void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
});
}
- private CompositeNode createTestFlow() {
- FlowBuilder flow = new FlowBuilder();
- flow.setKey(FLOW_KEY);
- Short tableId = 0;
- flow.setTableId(tableId);
- MatchBuilder match = new MatchBuilder();
- match.setIpMatch(new IpMatchBuilder().setIpProto(IpVersion.Ipv4).build());
- Ipv4MatchBuilder ipv4Match = new Ipv4MatchBuilder();
- // ipv4Match.setIpv4Destination(new Ipv4Prefix(cliInput.get(4)));
- Ipv4Prefix prefix = new Ipv4Prefix("10.0.0.1/24");
- ipv4Match.setIpv4Destination(prefix);
- Ipv4Match i4m = ipv4Match.build();
- match.setLayer3Match(i4m);
- flow.setMatch(match.build());
-
-
-
- // Create a drop action
- /*
- * Note: We are mishandling drop actions DropAction dropAction = new
- * DropActionBuilder().build(); ActionBuilder ab = new ActionBuilder();
- * ab.setAction(dropAction);
- */
-
- DecNwTtl decNwTtl = new DecNwTtlBuilder().build();
- ActionBuilder ab = new ActionBuilder();
- ActionKey actionKey = new ActionKey(0);
- ab.setKey(actionKey );
- ab.setAction(new DecNwTtlCaseBuilder().setDecNwTtl(decNwTtl).build());
-
- // Add our drop action to a list
- List<Action> actionList = new ArrayList<Action>();
- actionList.add(ab.build());
-
- // Create an Apply Action
- ApplyActionsBuilder aab = new ApplyActionsBuilder();
- aab.setAction(actionList);
-
- // Wrap our Apply Action in an Instruction
- InstructionBuilder ib = new InstructionBuilder();
- ib.setOrder(0);
- ib.setInstruction(new ApplyActionsCaseBuilder().setApplyActions(aab.build()).build());
-
- // Put our Instruction in a list of Instructions
- InstructionsBuilder isb = new InstructionsBuilder();
- List<Instruction> instructions = new ArrayList<Instruction>();
- instructions.add(ib.build());
- isb.setInstruction(instructions);
-
- // Add our instructions to the flow
- flow.setInstructions(isb.build());
-
- flow.setPriority(2);
- flow.setFlowName("Foo Name");
- CompositeNode domFlow = mappingService.toDataDom(flow.build());
- return domFlow;
+ private CompositeNode createTestList1() {
+ List1Builder l1b = new List1Builder();
+ List11Builder l11b = new List11Builder();
+ List12Builder l12b = new List12Builder();
+ l11b.setKey(LIST11_KEY);
+ l11b.setAttrStr("foo:foo:foo");
+ l12b.setKey(new List12Key(321));
+ l12b.setAttrStr("foo:foo:bar");
+ l1b.setKey(LIST1_KEY);
+ l1b.setList11(ImmutableList.of(l11b.build()));
+ l1b.setList12(ImmutableList.of(l12b.build()));
+ CompositeNode domList1 = mappingService.toDataDom(l1b.build());
+ return domList1;
}
}
*/
package org.opendaylight.controller.sal.binding.test.connect.dom;
-import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
-import java.math.BigInteger;
import java.util.Collections;
import java.util.Map;
import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.statistics.GroupStatistics;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.List11SimpleAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.top.top.level.list.list1.list1._1.Cont;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
@SuppressWarnings("deprecation")
public class CrossBrokerMountPointTest {
- private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
- private static final String NODE_ID = "node:1";
+ private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
+ private static final String TLL_NAME = "foo:1";
+
+ private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
- private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
+ private static final Map<QName, Object> TLL_KEY_BI = Collections.<QName, Object> singletonMap(TLL_NAME_QNAME,
+ TLL_NAME);
- private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
- NODE_ID);
+ private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
+ .child(TopLevelList.class, TLL_KEY).build();
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
- .child(Node.class, NODE_KEY).toInstance();
- private static GroupKey GROUP_KEY = new GroupKey(new GroupId(0L));
+ private static final List1Key LIST1_KEY = new List1Key("foo");
+ private static final List11Key LIST11_KEY = new List11Key(1);
- private static final InstanceIdentifier<GroupStatistics> GROUP_STATISTICS_ID_BA = NODE_INSTANCE_ID_BA
- .builder().augmentation(FlowCapableNode.class) //
- .child(Group.class, GROUP_KEY) //
- .augmentation(NodeGroupStatistics.class) //
- .child(GroupStatistics.class) //
- .toInstance();
+ private static final InstanceIdentifier<Cont> AUG_CONT_ID_BA = TLL_INSTANCE_ID_BA
+ .builder().augmentation(TllComplexAugment.class) //
+ .child(List1.class, LIST1_KEY) //
+ .child(List11.class, LIST11_KEY) //
+ .augmentation(List11SimpleAugment.class) //
+ .child(Cont.class) //
+ .build();
- private static final QName AUGMENTED_GROUP_STATISTICS = QName.create(NodeGroupStatistics.QNAME,
- GroupStatistics.QNAME.getLocalName());
+ private static final QName AUG_CONT = QName.create(List11.QNAME,
+ Cont.QNAME.getLocalName());
- private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier NODE_INSTANCE_ID_BI = //
+ private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier TLL_INSTANCE_ID_BI = //
org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .nodeWithKey(Node.QNAME, NODE_KEY_BI) //
- .toInstance();
+ .node(Top.QNAME) //
+ .nodeWithKey(TopLevelList.QNAME, TLL_KEY_BI) //
+ .build();
private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier GROUP_STATISTICS_ID_BI = org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier
//
- .builder(NODE_INSTANCE_ID_BI)
- .nodeWithKey(QName.create(FlowCapableNode.QNAME, "group"), QName.create(FlowCapableNode.QNAME, "group-id"),
- 0L).node(AUGMENTED_GROUP_STATISTICS).toInstance();
+ .builder(TLL_INSTANCE_ID_BI)
+ .nodeWithKey(QName.create(TllComplexAugment.QNAME, "list1"), QName.create(TllComplexAugment.QNAME, "attr-str"),
+ LIST1_KEY.getAttrStr())
+ .nodeWithKey(QName.create(TllComplexAugment.QNAME, "list1-1"), QName.create(TllComplexAugment.QNAME, "attr-int"),
+ LIST11_KEY.getAttrInt())
+ .node(AUG_CONT).build();
private BindingTestContext testContext;
private MountProviderService bindingMountPointService;
@Test
public void testMountPoint() {
- testContext.getBindingDataBroker().readOperationalData(NODE_INSTANCE_ID_BA);
+ testContext.getBindingDataBroker().readOperationalData(TLL_INSTANCE_ID_BA);
- MountProvisionInstance domMountPoint = domMountPointService.createMountPoint(NODE_INSTANCE_ID_BI);
+ MountProvisionInstance domMountPoint = domMountPointService.createMountPoint(TLL_INSTANCE_ID_BI);
assertNotNull(domMountPoint);
- MountProviderInstance bindingMountPoint = bindingMountPointService.getMountPoint(NODE_INSTANCE_ID_BA);
+ MountProviderInstance bindingMountPoint = bindingMountPointService.getMountPoint(TLL_INSTANCE_ID_BA);
assertNotNull(bindingMountPoint);
- final BigInteger packetCount = BigInteger.valueOf(500L);
+ final Integer attrIntalue = 500;
DataReader<org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier, CompositeNode> simpleReader = new DataReader<org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier, CompositeNode>() {
if (arg0.equals(GROUP_STATISTICS_ID_BI)) {
ImmutableCompositeNode data = ImmutableCompositeNode
.builder()
- .setQName(AUGMENTED_GROUP_STATISTICS)
- .addLeaf(QName.create(AUGMENTED_GROUP_STATISTICS, "packet-count"), packetCount) //
- .toInstance();
+ .setQName(AUG_CONT)
+ .addLeaf(QName.create(AUG_CONT, "attr-int"), attrIntalue) //
+ .build();
return data;
}
}
};
- domMountPoint.registerOperationalReader(NODE_INSTANCE_ID_BI, simpleReader);
+ domMountPoint.registerOperationalReader(TLL_INSTANCE_ID_BI, simpleReader);
- GroupStatistics data = (GroupStatistics) bindingMountPoint.readOperationalData(GROUP_STATISTICS_ID_BA);
+ Cont data = (Cont) bindingMountPoint.readOperationalData(AUG_CONT_ID_BA);
assertNotNull(data);
- assertEquals(packetCount,data.getPacketCount().getValue());
+ assertEquals(attrIntalue ,data.getAttrInt());
}
}
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
-import java.math.BigInteger;
-import java.util.Collections;
-import java.util.Set;
-import java.util.concurrent.Future;
-
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeFlowRemoved;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeContext;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.OpendaylightOfMigrationTestModelService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.TestContext;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.Future;
public class CrossBrokerRpcTest {
- protected RpcProviderRegistry baRpcRegistry;
- protected RpcProvisionRegistry biRpcRegistry;
+ protected RpcProviderRegistry providerRegistry;
+ protected RpcProvisionRegistry provisionRegistry;
private BindingTestContext testContext;
private RpcImplementation biRpcInvoker;
- private MessageCapturingFlowService flowService;
+ private MessageCapturingFlowService knockService;
- public static final NodeId NODE_A = new NodeId("a");
- public static final NodeId NODE_B = new NodeId("b");
- public static final NodeId NODE_C = new NodeId("c");
- public static final NodeId NODE_D = new NodeId("d");
+ public static final TopLevelListKey NODE_A = new TopLevelListKey("a");
+ public static final TopLevelListKey NODE_B = new TopLevelListKey("b");
+ public static final TopLevelListKey NODE_C = new TopLevelListKey("c");
- private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
- private static final QName ADD_FLOW_QNAME = QName.create(NodeFlowRemoved.QNAME, "add-flow");
+ private static final QName NODE_ID_QNAME = QName.create(TopLevelList.QNAME, "name");
+ private static final QName KNOCK_KNOCK_QNAME = QName.create(KnockKnockOutput.QNAME, "knock-knock");
- public static final InstanceIdentifier<Node> BA_NODE_A_ID = createBANodeIdentifier(NODE_A);
- public static final InstanceIdentifier<Node> BA_NODE_B_ID = createBANodeIdentifier(NODE_B);
- public static final InstanceIdentifier<Node> BA_NODE_C_ID = createBANodeIdentifier(NODE_C);
- public static final InstanceIdentifier<Node> BA_NODE_D_ID = createBANodeIdentifier(NODE_D);
+ public static final InstanceIdentifier<Top> NODES_PATH = InstanceIdentifier.builder(Top.class).build();
+ public static final InstanceIdentifier<TopLevelList> BA_NODE_A_ID = NODES_PATH.child(TopLevelList.class, NODE_A);
+ public static final InstanceIdentifier<TopLevelList> BA_NODE_B_ID = NODES_PATH.child(TopLevelList.class, NODE_B);
+ public static final InstanceIdentifier<TopLevelList> BA_NODE_C_ID = NODES_PATH.child(TopLevelList.class, NODE_C);
- public static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_NODE_A_ID = createBINodeIdentifier(NODE_A);
- public static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_NODE_B_ID = createBINodeIdentifier(NODE_B);
public static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_NODE_C_ID = createBINodeIdentifier(NODE_C);
- public static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_NODE_D_ID = createBINodeIdentifier(NODE_D);
-
@Before
testContext = testFactory.getTestContext();
testContext.start();
- baRpcRegistry = testContext.getBindingRpcRegistry();
- biRpcRegistry = testContext.getDomRpcRegistry();
+ providerRegistry = testContext.getBindingRpcRegistry();
+ provisionRegistry = testContext.getDomRpcRegistry();
biRpcInvoker = testContext.getDomRpcInvoker();
- assertNotNull(baRpcRegistry);
- assertNotNull(biRpcRegistry);
+ assertNotNull(providerRegistry);
+ assertNotNull(provisionRegistry);
- flowService = MessageCapturingFlowService.create(baRpcRegistry);
+ knockService = MessageCapturingFlowService.create(providerRegistry);
}
@Test
public void bindingRoutedRpcProvider_DomInvokerTest() throws Exception {
- flowService//
- .registerPath(NodeContext.class, BA_NODE_A_ID) //
- .registerPath(NodeContext.class, BA_NODE_B_ID) //
- .setAddFlowResult(addFlowResult(true, 10));
+ knockService//
+ .registerPath(TestContext.class, BA_NODE_A_ID) //
+ .registerPath(TestContext.class, BA_NODE_B_ID) //
+ .setKnockKnockResult(knockResult(true, "open"));
- SalFlowService baFlowInvoker = baRpcRegistry.getRpcService(SalFlowService.class);
- assertNotSame(flowService, baFlowInvoker);
+ OpendaylightOfMigrationTestModelService baKnockInvoker =
+ providerRegistry.getRpcService(OpendaylightOfMigrationTestModelService.class);
+ assertNotSame(knockService, baKnockInvoker);
- AddFlowInput addFlowA = addFlow(BA_NODE_A_ID) //
- .setPriority(100).setBarrier(true).build();
+ KnockKnockInput knockKnockA = knockKnock(BA_NODE_A_ID) //
+ .setQuestion("who's there?").build();
- CompositeNode addFlowDom = toDomRpc(ADD_FLOW_QNAME, addFlowA);
- assertNotNull(addFlowDom);
- RpcResult<CompositeNode> domResult = biRpcInvoker.invokeRpc(ADD_FLOW_QNAME, addFlowDom).get();
+ CompositeNode knockKnockDom = toDomRpc(KNOCK_KNOCK_QNAME, knockKnockA);
+ assertNotNull(knockKnockDom);
+ RpcResult<CompositeNode> domResult = biRpcInvoker.invokeRpc(KNOCK_KNOCK_QNAME, knockKnockDom).get();
assertNotNull(domResult);
assertTrue("DOM result is successful.", domResult.isSuccessful());
- assertTrue("Bidning Add Flow RPC was captured.", flowService.getReceivedAddFlows().containsKey(BA_NODE_A_ID));
- assertEquals(addFlowA, flowService.getReceivedAddFlows().get(BA_NODE_A_ID).iterator().next());
+ assertTrue("Bidning Add Flow RPC was captured.", knockService.getReceivedKnocks().containsKey(BA_NODE_A_ID));
+ assertEquals(knockKnockA, knockService.getReceivedKnocks().get(BA_NODE_A_ID).iterator().next());
}
@Test
public void bindingRpcInvoker_DomRoutedProviderTest() throws Exception {
- AddFlowOutputBuilder builder = new AddFlowOutputBuilder();
- builder.setTransactionId(new TransactionId(BigInteger.valueOf(10)));
- final AddFlowOutput output = builder.build();
- org.opendaylight.controller.sal.core.api.Broker.RoutedRpcRegistration registration = biRpcRegistry.addRoutedRpcImplementation(ADD_FLOW_QNAME, new RpcImplementation() {
+ KnockKnockOutputBuilder builder = new KnockKnockOutputBuilder();
+ builder.setAnswer("open");
+ final KnockKnockOutput output = builder.build();
+ org.opendaylight.controller.sal.core.api.Broker.RoutedRpcRegistration registration = provisionRegistry.addRoutedRpcImplementation(KNOCK_KNOCK_QNAME, new RpcImplementation() {
@Override
public Set<QName> getSupportedRpcs() {
- return ImmutableSet.of(ADD_FLOW_QNAME);
+ return ImmutableSet.of(KNOCK_KNOCK_QNAME);
}
@Override
return Futures.immediateFuture(RpcResultBuilder.<CompositeNode>success(result).build());
}
});
- registration.registerPath(NodeContext.QNAME, BI_NODE_C_ID);
+ registration.registerPath(TestContext.QNAME, BI_NODE_C_ID);
+
- SalFlowService baFlowInvoker = baRpcRegistry.getRpcService(SalFlowService.class);
- Future<RpcResult<AddFlowOutput>> baResult = baFlowInvoker.addFlow(addFlow(BA_NODE_C_ID).setPriority(500).build());
+ OpendaylightOfMigrationTestModelService baKnockInvoker =
+ providerRegistry.getRpcService(OpendaylightOfMigrationTestModelService.class);
+ Future<RpcResult<KnockKnockOutput>> baResult = baKnockInvoker.knockKnock((knockKnock(BA_NODE_C_ID).setQuestion("Who's there?").build()));
assertNotNull(baResult);
- assertEquals(output,baResult.get().getResult());
+ assertEquals(output, baResult.get().getResult());
}
private CompositeNode toDomRpcInput(DataObject addFlowA) {
testContext.close();
}
- private static InstanceIdentifier<Node> createBANodeIdentifier(NodeId node) {
- return InstanceIdentifier.builder(Nodes.class).child(Node.class, new NodeKey(node)).toInstance();
- }
-
- private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBINodeIdentifier(NodeId node) {
- return org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder().node(Nodes.QNAME)
- .nodeWithKey(Node.QNAME, NODE_ID_QNAME, node.getValue()).toInstance();
+ private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBINodeIdentifier(TopLevelListKey listKey) {
+ return org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder().node(Top.QNAME)
+ .nodeWithKey(TopLevelList.QNAME, NODE_ID_QNAME, listKey.getName()).toInstance();
}
- private Future<RpcResult<AddFlowOutput>> addFlowResult(boolean success, long xid) {
- AddFlowOutput output = new AddFlowOutputBuilder() //
- .setTransactionId(new TransactionId(BigInteger.valueOf(xid))).build();
- RpcResult<AddFlowOutput> result = RpcResultBuilder.<AddFlowOutput>status(success).withResult(output).build();
+ private Future<RpcResult<KnockKnockOutput>> knockResult(boolean success, String answer) {
+ KnockKnockOutput output = new KnockKnockOutputBuilder() //
+ .setAnswer(answer).build();
+ RpcResult<KnockKnockOutput> result = RpcResultBuilder.<KnockKnockOutput>status(success).withResult(output).build();
return Futures.immediateFuture(result);
}
- private static AddFlowInputBuilder addFlow(InstanceIdentifier<Node> nodeId) {
- AddFlowInputBuilder builder = new AddFlowInputBuilder();
- builder.setNode(new NodeRef(nodeId));
+ private static KnockKnockInputBuilder knockKnock(InstanceIdentifier<TopLevelList> listId) {
+ KnockKnockInputBuilder builder = new KnockKnockInputBuilder();
+ builder.setKnockerId(listId);
return builder;
}
- private CompositeNode toDomRpc(QName rpcName, AddFlowInput addFlowA) {
+ private CompositeNode toDomRpc(QName rpcName, KnockKnockInput knockInput) {
return new CompositeNodeTOImpl(rpcName, null,
- Collections.<org.opendaylight.yangtools.yang.data.api.Node<?>> singletonList(toDomRpcInput(addFlowA)));
+ Collections.<org.opendaylight.yangtools.yang.data.api.Node<?>>singletonList(toDomRpcInput(knockInput)));
}
}
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.OpendaylightTestRpcServiceService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.RockTheHouseInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
import org.opendaylight.yangtools.yang.common.QName;
private final static QName RPC_NAME = QName.create(RPC_SERVICE_NAMESPACE,
REVISION_DATE, "rock-the-house");
- private static final NodeId MOUNT_NODE = new NodeId("id");
- private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
+ private static final String TLL_NAME = "id";
+ private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
- private static final InstanceIdentifier<Node> BA_MOUNT_ID = createBANodeIdentifier(MOUNT_NODE);
- private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_MOUNT_ID = createBINodeIdentifier(MOUNT_NODE);
+ private static final InstanceIdentifier<TopLevelList> BA_MOUNT_ID = createBATllIdentifier(TLL_NAME);
+ private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_MOUNT_ID = createBITllIdentifier(TLL_NAME);
private BindingTestContext testContext;
private MountProvisionService domMountPointService;
schemaContext = mountSchemaContext;
}
- private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBINodeIdentifier(
- final NodeId mountNode) {
+ private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBITllIdentifier(
+ final String mount) {
return org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier
- .builder().node(Nodes.QNAME)
- .nodeWithKey(Node.QNAME, NODE_ID_QNAME, mountNode.getValue())
+ .builder().node(Top.QNAME)
+ .nodeWithKey(TopLevelList.QNAME, TLL_NAME_QNAME, mount)
.toInstance();
}
- private static InstanceIdentifier<Node> createBANodeIdentifier(
- final NodeId mountNode) {
- return InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, new NodeKey(mountNode)).toInstance();
+ private static InstanceIdentifier<TopLevelList> createBATllIdentifier(
+ final String mount) {
+ return InstanceIdentifier.builder(Top.class)
+ .child(TopLevelList.class, new TopLevelListKey(mount)).toInstance();
}
@SuppressWarnings("deprecation")
import java.util.concurrent.Future;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowOutput;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.OpendaylightOfMigrationTestModelService;
import org.opendaylight.yangtools.yang.binding.BaseIdentity;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
-public class MessageCapturingFlowService implements SalFlowService, AutoCloseable {
-
- private Future<RpcResult<AddFlowOutput>> addFlowResult;
- private Future<RpcResult<RemoveFlowOutput>> removeFlowResult;
- private Future<RpcResult<UpdateFlowOutput>> updateFlowResult;
-
- private final Multimap<InstanceIdentifier<?>, AddFlowInput> receivedAddFlows = HashMultimap.create();
- private final Multimap<InstanceIdentifier<?>, RemoveFlowInput> receivedRemoveFlows = HashMultimap.create();
- private final Multimap<InstanceIdentifier<?>, UpdateFlowInput> receivedUpdateFlows = HashMultimap.create();
- private RoutedRpcRegistration<SalFlowService> registration;
-
- @Override
- public Future<RpcResult<AddFlowOutput>> addFlow(AddFlowInput arg0) {
- receivedAddFlows.put(arg0.getNode().getValue(), arg0);
- return addFlowResult;
- }
-
- @Override
- public Future<RpcResult<RemoveFlowOutput>> removeFlow(RemoveFlowInput arg0) {
- receivedRemoveFlows.put(arg0.getNode().getValue(), arg0);
- return removeFlowResult;
- }
-
- @Override
- public Future<RpcResult<UpdateFlowOutput>> updateFlow(UpdateFlowInput arg0) {
- receivedUpdateFlows.put(arg0.getNode().getValue(), arg0);
- return updateFlowResult;
- }
-
- public Future<RpcResult<AddFlowOutput>> getAddFlowResult() {
- return addFlowResult;
- }
+public class MessageCapturingFlowService implements OpendaylightOfMigrationTestModelService, AutoCloseable {
- public MessageCapturingFlowService setAddFlowResult(Future<RpcResult<AddFlowOutput>> addFlowResult) {
- this.addFlowResult = addFlowResult;
- return this;
- }
+ private Future<RpcResult<KnockKnockOutput>> knockKnockResult;
- public Future<RpcResult<RemoveFlowOutput>> getRemoveFlowResult() {
- return removeFlowResult;
- }
-
- public MessageCapturingFlowService setRemoveFlowResult(Future<RpcResult<RemoveFlowOutput>> removeFlowResult) {
- this.removeFlowResult = removeFlowResult;
- return this;
- }
+ private final Multimap<InstanceIdentifier<?>, KnockKnockInput> receivedKnocks = HashMultimap.create();
+ private RoutedRpcRegistration<OpendaylightOfMigrationTestModelService> registration;
- public Future<RpcResult<UpdateFlowOutput>> getUpdateFlowResult() {
- return updateFlowResult;
+ public Future<RpcResult<KnockKnockOutput>> getKnockKnockResult() {
+ return knockKnockResult;
}
- public MessageCapturingFlowService setUpdateFlowResult(Future<RpcResult<UpdateFlowOutput>> updateFlowResult) {
- this.updateFlowResult = updateFlowResult;
+ public MessageCapturingFlowService setKnockKnockResult(Future<RpcResult<KnockKnockOutput>> kkOutput) {
+ this.knockKnockResult = kkOutput;
return this;
}
- public Multimap<InstanceIdentifier<?>, AddFlowInput> getReceivedAddFlows() {
- return receivedAddFlows;
- }
-
- public Multimap<InstanceIdentifier<?>, RemoveFlowInput> getReceivedRemoveFlows() {
- return receivedRemoveFlows;
- }
-
- public Multimap<InstanceIdentifier<?>, UpdateFlowInput> getReceivedUpdateFlows() {
- return receivedUpdateFlows;
+ public Multimap<InstanceIdentifier<?>, KnockKnockInput> getReceivedKnocks() {
+ return receivedKnocks;
}
public MessageCapturingFlowService registerTo(RpcProviderRegistry registry) {
- registration = registry.addRoutedRpcImplementation(SalFlowService.class, this);
+ registration = registry.addRoutedRpcImplementation(OpendaylightOfMigrationTestModelService.class, this);
assertNotNull(registration);
return this;
}
return ret;
}
+ @Override
+ public Future<RpcResult<KnockKnockOutput>> knockKnock(KnockKnockInput input) {
+ receivedKnocks.put(input.getKnockerId(), input);
+ return knockKnockResult;
+ }
+
}
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-monitoring</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-binding-broker-impl</artifactId>
<groupId>org.opendaylight.yangtools.thirdparty</groupId>
<artifactId>antlr4-runtime-osgi-nohead</artifactId>
</dependency>
+
<!--Compile scopes for all testing dependencies are intentional-->
<!--This way, all testing dependencies can be transitively used by other integration test modules-->
<!--If the dependencies are test scoped, they are not visible to other maven modules depending on sal-binding-it-->
<artifactId>log4j-over-slf4j</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-service</artifactId>
- <scope>provided</scope>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-test-model</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>opendaylight-l2-types</artifactId>
</dependency>
</dependencies>
import static org.ops4j.pax.exam.CoreOptions.frameworkProperty;
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
+import static org.ops4j.pax.exam.CoreOptions.systemPackages;
import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.options.DefaultCompositeOption;
import org.ops4j.pax.exam.util.PathUtils;
bindingAwareSalBundles(),
mavenBundle("commons-codec", "commons-codec").versionAsInProject(),
- systemProperty("org.osgi.framework.system.packages.extra").value("sun.nio.ch"),
+ systemPackages("sun.nio.ch", "sun.misc"),
mavenBundle("io.netty", "netty-common").versionAsInProject(), //
mavenBundle("io.netty", "netty-buffer").versionAsInProject(), //
mavenBundle("io.netty", "netty-handler").versionAsInProject(), //
mavenBundle("org.eclipse.birt.runtime.3_7_1", "org.apache.xml.resolver", "1.2.0"),
mavenBundle(CONTROLLER, "config-netconf-connector").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "netconf-notifications-api").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "ietf-netconf").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "ietf-netconf-notifications").versionAsInProject(), //
mavenBundle(CONTROLLER, "netconf-impl").versionAsInProject(), //
mavenBundle(CONTROLLER, "config-persister-file-xml-adapter").versionAsInProject().noStart(),
mavenBundle(CONTROLLER, "sal-common-util").versionAsInProject(), // //
- mavenBundle(CONTROLLER, "sal-inmemory-datastore").versionAsInProject(), // /
+ mavenBundle("com.lmax", "disruptor").versionAsInProject(),
+ mavenBundle(CONTROLLER, "sal-inmemory-datastore").versionAsInProject(), //
mavenBundle(CONTROLLER, "sal-broker-impl").versionAsInProject(), // //
mavenBundle(CONTROLLER, "sal-core-spi").versionAsInProject().update(), //
}
+ /**
+ * @return option containing models for testing purposes
+ */
+ public static Option salTestModelBundles() {
+ return new DefaultCompositeOption( //
+ mavenBundle(CONTROLLER, "sal-test-model").versionAsInProject()
+ );
+
+ }
+
public static Option baseModelBundles() {
return new DefaultCompositeOption( //
mavenBundle(YANGTOOLS_MODELS, "yang-ext").versionAsInProject(), // //
mavenBundle(YANGTOOLS_MODELS, "ietf-inet-types").versionAsInProject(), // //
mavenBundle(YANGTOOLS_MODELS, "ietf-yang-types").versionAsInProject(), // //
- mavenBundle(YANGTOOLS_MODELS, "opendaylight-l2-types").versionAsInProject(), // //
- mavenBundle(CONTROLLER_MODELS, "model-inventory").versionAsInProject());
+ mavenBundle(YANGTOOLS_MODELS, "opendaylight-l2-types").versionAsInProject() // //
+ );
}
public static Option junitAndMockitoBundles() {
import static org.opendaylight.controller.test.sal.binding.it.TestHelper.baseModelBundles;
import static org.opendaylight.controller.test.sal.binding.it.TestHelper.bindingAwareSalBundles;
import static org.opendaylight.controller.test.sal.binding.it.TestHelper.configMinumumBundles;
-import static org.opendaylight.controller.test.sal.binding.it.TestHelper.flowCapableModelBundles;
+import static org.opendaylight.controller.test.sal.binding.it.TestHelper.salTestModelBundles;
import static org.opendaylight.controller.test.sal.binding.it.TestHelper.junitAndMockitoBundles;
import static org.opendaylight.controller.test.sal.binding.it.TestHelper.mdSalCoreBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
configMinumumBundles(),
// BASE Models
baseModelBundles(),
- flowCapableModelBundles(),
+ salTestModelBundles(),
// Set fail if unresolved bundle present
systemProperty("pax.exam.osgi.unresolved.fail").value("true"),
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import com.google.inject.Inject;
import java.util.concurrent.Future;
-import org.junit.Before;
-import org.junit.Ignore;
+
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext;
import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.controller.sal.core.api.Broker;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.Lists;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListKey;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
+import com.google.inject.Inject;
+
+/**
+ * covers creating, reading and deleting of an item in dataStore
+ */
public class DataServiceTest extends AbstractTest {
protected DataBrokerService consumerDataService;
-
@Inject
Broker broker2;
- @Before
- public void setUp() throws Exception {
- }
-
- /*
+ /**
*
* Ignored this, because classes here are constructed from
* very different class loader as MD-SAL is run into,
* this is code is run from different classloader.
*
+ * @throws Exception
*/
@Test
- @Ignore
public void test() throws Exception {
BindingAwareConsumer consumer1 = new BindingAwareConsumer() {
consumerDataService = session.getSALService(DataBrokerService.class);
}
};
- broker.registerConsumer(consumer1, getBundleContext());
+ broker.registerConsumer(consumer1);
assertNotNull(consumerDataService);
DataModificationTransaction transaction = consumerDataService.beginTransaction();
assertNotNull(transaction);
- InstanceIdentifier<Node> node1 = createNodeRef("0");
- DataObject node = consumerDataService.readConfigurationData(node1);
+ InstanceIdentifier<UnorderedList> node1 = createNodeRef("0");
+ DataObject node = consumerDataService.readConfigurationData(node1);
assertNull(node);
- Node nodeData1 = createNode("0");
+ UnorderedList nodeData1 = createNode("0");
transaction.putConfigurationData(node1, nodeData1);
Future<RpcResult<TransactionStatus>> commitResult = transaction.commit();
assertNotNull(result.getResult());
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Node readedData = (Node) consumerDataService.readConfigurationData(node1);
+ UnorderedList readedData = (UnorderedList) consumerDataService.readConfigurationData(node1);
assertNotNull(readedData);
assertEquals(nodeData1.getKey(), readedData.getKey());
DataModificationTransaction transaction2 = consumerDataService.beginTransaction();
- assertNotNull(transaction);
+ assertNotNull(transaction2);
transaction2.removeConfigurationData(node1);
DataObject readedData2 = consumerDataService.readConfigurationData(node1);
assertNull(readedData2);
-
-
}
- private static InstanceIdentifier<Node> createNodeRef(final String string) {
- NodeKey key = new NodeKey(new NodeId(string));
- return InstanceIdentifier.builder(Nodes.class).child(Node.class, key).build();
+ private static InstanceIdentifier<UnorderedList> createNodeRef(final String string) {
+ UnorderedListKey key = new UnorderedListKey(string);
+ return InstanceIdentifier.builder(Lists.class).child(UnorderedContainer.class).child(UnorderedList.class, key).build();
}
- private static Node createNode(final String string) {
- NodeBuilder ret = new NodeBuilder();
- NodeId id = new NodeId(string);
- ret.setKey(new NodeKey(id));
- ret.setId(id);
+ private static UnorderedList createNode(final String string) {
+ UnorderedListBuilder ret = new UnorderedListBuilder();
+ UnorderedListKey nodeKey = new UnorderedListKey(string);
+ ret.setKey(nodeKey);
+ ret.setName("name of " + string);
+ ret.setName("value of " + string);
return ret.build();
}
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import java.math.BigInteger;
import java.util.ArrayList;
import java.util.List;
-import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.NotificationService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowAdded;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowAddedBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowRemoved;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeErrorNotification;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeExperimenterErrorNotification;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SwitchFlowRemoved;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OpendaylightTestNotificationListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotification;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotificationBuilder;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-@Ignore
+/**
+ * covers registering of notification listener, publishing of notification and receiving of notification.
+ */
public class NotificationTest extends AbstractTest {
- private final FlowListener listener1 = new FlowListener();
- private final FlowListener listener2 = new FlowListener();
+ private static final Logger LOG = LoggerFactory
+ .getLogger(NotificationTest.class);
- private ListenerRegistration<NotificationListener> listener1Reg;
- private ListenerRegistration<NotificationListener> listener2Reg;
+ protected final NotificationTestListener listener1 = new NotificationTestListener();
+ protected final NotificationTestListener listener2 = new NotificationTestListener();
- private NotificationProviderService notifyProviderService;
+ protected ListenerRegistration<NotificationListener> listener1Reg;
+ protected ListenerRegistration<NotificationListener> listener2Reg;
- @Before
- public void setUp() throws Exception {
- }
+ protected NotificationProviderService notifyProviderService;
+ /**
+ * test of delivering of notification
+ * @throws Exception
+ */
@Test
public void notificationTest() throws Exception {
- /**
- *
- * The registration of the Provider 1.
- *
- */
+ LOG.info("The registration of the Provider 1.");
AbstractTestProvider provider1 = new AbstractTestProvider() {
@Override
public void onSessionInitiated(ProviderContext session) {
};
// registerProvider method calls onSessionInitiated method above
- broker.registerProvider(provider1, getBundleContext());
+ broker.registerProvider(provider1);
assertNotNull(notifyProviderService);
- /**
- *
- * The registration of the Consumer 1. It retrieves Notification Service
- * from MD-SAL and registers SalFlowListener as notification listener
- *
- */
+ LOG.info("The registration of the Consumer 1. It retrieves Notification Service "
+ + "from MD-SAL and registers OpendaylightTestNotificationListener as notification listener");
BindingAwareConsumer consumer1 = new BindingAwareConsumer() {
@Override
public void onSessionInitialized(ConsumerContext session) {
}
};
// registerConsumer method calls onSessionInitialized method above
- broker.registerConsumer(consumer1, getBundleContext());
+ broker.registerConsumer(consumer1);
assertNotNull(listener1Reg);
- /**
- * The notification of type FlowAdded with cookie ID 0 is created. The
- * delay 100ms to make sure that the notification was delivered to
- * listener.
- */
- notifyProviderService.publish(flowAdded(0));
+ LOG.info("The notification of type FlowAdded with cookie ID 0 is created. The "
+ + "delay 100ms to make sure that the notification was delivered to "
+ + "listener.");
+ notifyProviderService.publish(noDustNotification("rainy day", 42));
Thread.sleep(100);
/**
* Check that one notification was delivered and has correct cookie.
*
*/
- assertEquals(1, listener1.addedFlows.size());
- assertEquals(0, listener1.addedFlows.get(0).getCookie().getValue().intValue());
+ assertEquals(1, listener1.notificationBag.size());
+ assertEquals("rainy day", listener1.notificationBag.get(0).getReason());
+ assertEquals(42, listener1.notificationBag.get(0).getDaysTillNewDust().intValue());
- /**
- * The registration of the Consumer 2. SalFlowListener is registered
- * registered as notification listener.
- */
+ LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
+ + "registered as notification listener.");
BindingAwareProvider provider = new BindingAwareProvider() {
@Override
};
// registerConsumer method calls onSessionInitialized method above
- broker.registerProvider(provider, getBundleContext());
+ broker.registerProvider(provider);
- /**
- * 3 notifications are published
- */
- notifyProviderService.publish(flowAdded(5));
- notifyProviderService.publish(flowAdded(10));
- notifyProviderService.publish(flowAdded(2));
+ LOG.info("3 notifications are published");
+ notifyProviderService.publish(noDustNotification("rainy day", 5));
+ notifyProviderService.publish(noDustNotification("rainy day", 10));
+ notifyProviderService.publish(noDustNotification("tax collector", 2));
/**
* The delay 100ms to make sure that the notifications were delivered to
* received 4 in total, second 3 in total).
*
*/
- assertEquals(4, listener1.addedFlows.size());
- assertEquals(3, listener2.addedFlows.size());
+ assertEquals(4, listener1.notificationBag.size());
+ assertEquals(3, listener2.notificationBag.size());
/**
* The second listener is closed (unregistered)
*/
listener2Reg.close();
- /**
- *
- * The notification 5 is published
- */
- notifyProviderService.publish(flowAdded(10));
+ LOG.info("The notification 5 is published");
+ notifyProviderService.publish(noDustNotification("entomologist hunt", 10));
/**
* The delay 100ms to make sure that the notification was delivered to
* second consumer because its listener was unregistered.
*
*/
- assertEquals(5, listener1.addedFlows.size());
- assertEquals(3, listener2.addedFlows.size());
+ assertEquals(5, listener1.notificationBag.size());
+ assertEquals(3, listener2.notificationBag.size());
}
/**
- * Creates instance of the type FlowAdded. Only cookie value is set. It is
+ * Creates instance of the type OutOfPixieDustNotification. It is
* used only for testing purpose.
*
- * @param i
- * cookie value
- * @return instance of the type FlowAdded
+ * @param reason
+ * @param days
+ * @return instance of the type OutOfPixieDustNotification
*/
- public static FlowAdded flowAdded(int i) {
- FlowAddedBuilder ret = new FlowAddedBuilder();
- ret.setCookie(new FlowCookie(BigInteger.valueOf(i)));
+ public static OutOfPixieDustNotification noDustNotification(String reason, int days) {
+ OutOfPixieDustNotificationBuilder ret = new OutOfPixieDustNotificationBuilder();
+ ret.setReason(reason).setDaysTillNewDust(days);
return ret.build();
}
/**
*
* Implements
- * {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowListener
- * SalFlowListener} and contains attributes which keep lists of objects of
- * the type
- * {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819. NodeFlow
- * NodeFlow}. The lists are defined for flows which were added, removed or
- * updated.
+ * {@link OpendaylightTestNotificationListener} and contains attributes which keep lists of objects of
+ * the type {@link OutOfFairyDustNotification}.
*/
- private static class FlowListener implements SalFlowListener {
-
- List<FlowAdded> addedFlows = new ArrayList<>();
- List<FlowRemoved> removedFlows = new ArrayList<>();
- List<FlowUpdated> updatedFlows = new ArrayList<>();
-
- @Override
- public void onFlowAdded(FlowAdded notification) {
- addedFlows.add(notification);
- }
-
- @Override
- public void onFlowRemoved(FlowRemoved notification) {
- removedFlows.add(notification);
- };
-
- @Override
- public void onFlowUpdated(FlowUpdated notification) {
- updatedFlows.add(notification);
- }
-
- @Override
- public void onSwitchFlowRemoved(SwitchFlowRemoved notification) {
- // TODO Auto-generated method stub
-
- }
+ public static class NotificationTestListener implements OpendaylightTestNotificationListener {
- @Override
- public void onNodeErrorNotification(NodeErrorNotification notification) {
- // TODO Auto-generated method stub
-
- }
+ List<OutOfPixieDustNotification> notificationBag = new ArrayList<>();
@Override
- public void onNodeExperimenterErrorNotification(
- NodeExperimenterErrorNotification notification) {
- // TODO Auto-generated method stub
-
+ public void onOutOfPixieDustNotification(OutOfPixieDustNotification arg0) {
+ notificationBag.add(arg0);
}
}
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import java.math.BigInteger;
-
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
import org.opendaylight.controller.sal.binding.api.BindingAwareConsumer;
import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeContext;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.TestContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.Lists;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+/**
+ * covers routed rpc creation, registration, invocation, unregistration
+ */
public class RoutedServiceTest extends AbstractTest {
- private SalFlowService salFlowService1;
- private SalFlowService salFlowService2;
+ private static final Logger LOG = LoggerFactory
+ .getLogger(RoutedServiceTest.class);
- private SalFlowService consumerService;
+ protected OpendaylightTestRoutedRpcService odlRoutedService1;
+ protected OpendaylightTestRoutedRpcService odlRoutedService2;
- private RoutedRpcRegistration<SalFlowService> firstReg;
- private RoutedRpcRegistration<SalFlowService> secondReg;
+ protected OpendaylightTestRoutedRpcService consumerService;
+ protected RoutedRpcRegistration<OpendaylightTestRoutedRpcService> firstReg;
+ protected RoutedRpcRegistration<OpendaylightTestRoutedRpcService> secondReg;
+
+ /**
+ * prepare mocks
+ */
@Before
- public void setUp() throws Exception {
- salFlowService1 = mock(SalFlowService.class, "First Flow Service");
- salFlowService2 = mock(SalFlowService.class, "Second Flow Service");
+ public void setUp() {
+ odlRoutedService1 = mock(OpendaylightTestRoutedRpcService.class, "First Flow Service");
+ odlRoutedService2 = mock(OpendaylightTestRoutedRpcService.class, "Second Flow Service");
}
@Test
assertNotNull(getBroker());
BindingAwareProvider provider1 = new AbstractTestProvider() {
-
@Override
public void onSessionInitiated(ProviderContext session) {
assertNotNull(session);
- firstReg = session.addRoutedRpcImplementation(SalFlowService.class, salFlowService1);
+ firstReg = session.addRoutedRpcImplementation(OpendaylightTestRoutedRpcService.class, odlRoutedService1);
}
};
- /**
- * Register provider 1 with first implementation of SalFlowService -
- * service1
- *
- */
- broker.registerProvider(provider1, getBundleContext());
+ LOG.info("Register provider 1 with first implementation of routeSimpleService - service1");
+ broker.registerProvider(provider1);
assertNotNull("Registration should not be null", firstReg);
- assertSame(salFlowService1, firstReg.getInstance());
+ assertSame(odlRoutedService1, firstReg.getInstance());
BindingAwareProvider provider2 = new AbstractTestProvider() {
-
@Override
public void onSessionInitiated(ProviderContext session) {
assertNotNull(session);
- secondReg = session.addRoutedRpcImplementation(SalFlowService.class, salFlowService2);
+ secondReg = session.addRoutedRpcImplementation(OpendaylightTestRoutedRpcService.class, odlRoutedService2);
}
};
- /**
- * Register provider 2 with first implementation of SalFlowService -
- * service2
- *
- */
- broker.registerProvider(provider2, getBundleContext());
+ LOG.info("Register provider 2 with second implementation of routeSimpleService - service2");
+ broker.registerProvider(provider2);
assertNotNull("Registration should not be null", firstReg);
- assertSame(salFlowService2, secondReg.getInstance());
+ assertSame(odlRoutedService2, secondReg.getInstance());
assertNotSame(secondReg, firstReg);
BindingAwareConsumer consumer = new BindingAwareConsumer() {
@Override
public void onSessionInitialized(ConsumerContext session) {
- consumerService = session.getRpcService(SalFlowService.class);
+ consumerService = session.getRpcService(OpendaylightTestRoutedRpcService.class);
}
};
- broker.registerConsumer(consumer, getBundleContext());
+ LOG.info("Register routeService consumer");
+ broker.registerConsumer(consumer);
- assertNotNull("MD-SAL instance of Flow Service should be returned", consumerService);
- assertNotSame("Provider instance and consumer instance should not be same.", salFlowService1, consumerService);
+ assertNotNull("MD-SAL instance of test Service should be returned", consumerService);
+ assertNotSame("Provider instance and consumer instance should not be same.", odlRoutedService1, consumerService);
- NodeRef nodeOne = createNodeRef("foo:node:1");
+ InstanceIdentifier<UnorderedList> nodeOnePath = createNodeRef("foo:node:1");
- /**
- * Provider 1 registers path of node 1
- */
- firstReg.registerPath(NodeContext.class, nodeOne.getValue());
+ LOG.info("Provider 1 registers path of node 1");
+ firstReg.registerPath(TestContext.class, nodeOnePath);
/**
* Consumer creates addFlow message for node one and sends it to the
* MD-SAL
- *
*/
- AddFlowInput addFlowFirstMessage = createSampleAddFlow(nodeOne, 1);
- consumerService.addFlow(addFlowFirstMessage);
+ RoutedSimpleRouteInput simpleRouteFirstFoo = createSimpleRouteInput(nodeOnePath);
+ consumerService.routedSimpleRoute(simpleRouteFirstFoo);
/**
* Verifies that implementation of the first provider received the same
* message from MD-SAL.
- *
*/
- verify(salFlowService1).addFlow(addFlowFirstMessage);
-
+ verify(odlRoutedService1).routedSimpleRoute(simpleRouteFirstFoo);
/**
* Verifies that second instance was not invoked with first message
- *
*/
- verify(salFlowService2, times(0)).addFlow(addFlowFirstMessage);
+ verify(odlRoutedService2, times(0)).routedSimpleRoute(simpleRouteFirstFoo);
- /**
- * Provider 2 registers path of node 2
- *
- */
- NodeRef nodeTwo = createNodeRef("foo:node:2");
- secondReg.registerPath(NodeContext.class, nodeTwo.getValue());
+ LOG.info("Provider 2 registers path of node 2");
+ InstanceIdentifier<UnorderedList> nodeTwo = createNodeRef("foo:node:2");
+ secondReg.registerPath(TestContext.class, nodeTwo);
/**
* Consumer sends message to nodeTwo for three times. Should be
* processed by second instance.
*/
- AddFlowInput AddFlowSecondMessage = createSampleAddFlow(nodeTwo, 2);
- consumerService.addFlow(AddFlowSecondMessage);
- consumerService.addFlow(AddFlowSecondMessage);
- consumerService.addFlow(AddFlowSecondMessage);
+ RoutedSimpleRouteInput simpleRouteSecondFoo = createSimpleRouteInput(nodeTwo);
+ consumerService.routedSimpleRoute(simpleRouteSecondFoo);
+ consumerService.routedSimpleRoute(simpleRouteSecondFoo);
+ consumerService.routedSimpleRoute(simpleRouteSecondFoo);
/**
* Verifies that second instance was invoked 3 times with second message
* and first instance wasn't invoked.
*
*/
- verify(salFlowService2, times(3)).addFlow(AddFlowSecondMessage);
- verify(salFlowService1, times(0)).addFlow(AddFlowSecondMessage);
+ verify(odlRoutedService2, times(3)).routedSimpleRoute(simpleRouteSecondFoo);
+ verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteSecondFoo);
- /**
- * Unregisteration of the path for the node one in the first provider
- *
- */
- firstReg.unregisterPath(NodeContext.class, nodeOne.getValue());
+ LOG.info("Unregistration of the path for the node one in the first provider");
+ firstReg.unregisterPath(TestContext.class, nodeOnePath);
- /**
- * Provider 2 registers path of node 1
- *
- */
- secondReg.registerPath(NodeContext.class, nodeOne.getValue());
+ LOG.info("Provider 2 registers path of node 1");
+ secondReg.registerPath(TestContext.class, nodeOnePath);
/**
* A consumer sends third message to node 1
- *
*/
- AddFlowInput AddFlowThirdMessage = createSampleAddFlow(nodeOne, 3);
- consumerService.addFlow(AddFlowThirdMessage);
+ RoutedSimpleRouteInput simpleRouteThirdFoo = createSimpleRouteInput(nodeOnePath);
+ consumerService.routedSimpleRoute(simpleRouteThirdFoo);
/**
* Verifies that provider 1 wasn't invoked and provider 2 was invoked 1
* time.
+ * TODO: fix unregister path
*/
- verify(salFlowService1, times(0)).addFlow(AddFlowThirdMessage);
- verify(salFlowService2).addFlow(AddFlowThirdMessage);
+ //verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteThirdFoo);
+ verify(odlRoutedService2).routedSimpleRoute(simpleRouteThirdFoo);
}
*
* @param string
* string with key(path)
- * @return instance of the type NodeRef
+ * @return instance identifier to {@link UnorderedList}
*/
- private static NodeRef createNodeRef(String string) {
- NodeKey key = new NodeKey(new NodeId(string));
- InstanceIdentifier<Node> path = InstanceIdentifier.builder(Nodes.class).child(Node.class, key).build();
-
- return new NodeRef(path);
+ private static InstanceIdentifier<UnorderedList> createNodeRef(String string) {
+ UnorderedListKey key = new UnorderedListKey(string);
+ InstanceIdentifier<UnorderedList> path = InstanceIdentifier.builder(Lists.class)
+ .child(UnorderedContainer.class)
+ .child(UnorderedList.class, key)
+ .build();
+
+ return path;
}
/**
*
* @param node
* NodeRef value
- * @param cookie
- * integer with cookie value
- * @return AddFlowInput instance
+ * @return simpleRouteInput instance
*/
- static AddFlowInput createSampleAddFlow(NodeRef node, int cookie) {
- AddFlowInputBuilder ret = new AddFlowInputBuilder();
- ret.setNode(node);
- ret.setCookie(new FlowCookie(BigInteger.valueOf(cookie)));
+ static RoutedSimpleRouteInput createSimpleRouteInput(InstanceIdentifier<UnorderedList> node) {
+ RoutedSimpleRouteInputBuilder ret = new RoutedSimpleRouteInputBuilder();
+ ret.setRoute(node);
return ret.build();
}
}
package org.opendaylight.controller.cluster.common.actor;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class AbstractUntypedActor extends UntypedActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
public AbstractUntypedActor() {
if(LOG.isDebugEnabled()) {
@Override public void onReceive(Object message) throws Exception {
final String messageType = message.getClass().getSimpleName();
if(LOG.isDebugEnabled()) {
- LOG.debug("Received message {}", messageType);
+// LOG.debug("Received message {}", messageType);
}
handleReceive(message);
if(LOG.isDebugEnabled()) {
- LOG.debug("Done handling message {}", messageType);
+// LOG.debug("Done handling message {}", messageType);
}
}
package org.opendaylight.controller.cluster.common.actor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Procedure;
import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ protected final Logger LOG = LoggerFactory.getLogger(getClass());
public AbstractUntypedPersistentActor() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Actor created {}", getSelf());
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Actor created {}", getSelf());
}
getContext().
system().
@Override public void onReceiveCommand(Object message) throws Exception {
final String messageType = message.getClass().getSimpleName();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Received message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Received message {}", messageType);
}
handleCommand(message);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Done handling message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Done handling message {}", messageType);
}
}
@Override public void onReceiveRecover(Object message) throws Exception {
final String messageType = message.getClass().getSimpleName();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Received message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Received message {}", messageType);
}
handleRecover(message);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Done handling message {}", messageType);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Done handling message {}", messageType);
}
}
try {
procedure.apply(o);
} catch (Exception e) {
- LOG.error(e, "An unexpected error occurred");
+ LOG.error("An unexpected error occurred", e);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown from NormalizedNodeInputStreamReader when the input stream does not contain
+ * valid serialized data.
+ *
+ * @author Thomas Pantelis
+ */
+public class InvalidNormalizedNodeStreamException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public InvalidNormalizedNodeStreamException(String message) {
+ super(message);
+ }
+}
private final StringBuilder reusableStringBuilder = new StringBuilder(50);
+ private boolean readSignatureMarker = true;
+
public NormalizedNodeInputStreamReader(InputStream stream) throws IOException {
Preconditions.checkNotNull(stream);
input = new DataInputStream(stream);
@Override
public NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+ readSignatureMarkerAndVersionIfNeeded();
+ return readNormalizedNodeInternal();
+ }
+
+ private void readSignatureMarkerAndVersionIfNeeded() throws IOException {
+ if(readSignatureMarker) {
+ readSignatureMarker = false;
+
+ byte marker = input.readByte();
+ if(marker != NormalizedNodeOutputStreamWriter.SIGNATURE_MARKER) {
+ throw new InvalidNormalizedNodeStreamException(String.format(
+ "Invalid signature marker: %d", marker));
+ }
+
+ input.readShort(); // read the version - not currently used/needed.
+ }
+ }
+
+ private NormalizedNode<?, ?> readNormalizedNodeInternal() throws IOException {
// each node should start with a byte
byte nodeType = input.readByte();
return bytes;
case ValueTypes.YANG_IDENTIFIER_TYPE :
- return readYangInstanceIdentifier();
+ return readYangInstanceIdentifierInternal();
default :
return null;
}
public YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
+ readSignatureMarkerAndVersionIfNeeded();
+ return readYangInstanceIdentifierInternal();
+ }
+
+ private YangInstanceIdentifier readYangInstanceIdentifierInternal() throws IOException {
int size = input.readInt();
List<PathArgument> pathArguments = new ArrayList<>(size);
lastLeafSetQName = nodeType;
- LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
while(child != null) {
builder.withChild(child);
- child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
}
return builder;
}
NormalizedNodeContainerBuilder builder) throws IOException {
LOG.debug("Reading data container (leaf nodes) nodes");
- NormalizedNode<?, ?> child = readNormalizedNode();
+ NormalizedNode<?, ?> child = readNormalizedNodeInternal();
while(child != null) {
builder.addChild(child);
- child = readNormalizedNode();
+ child = readNormalizedNodeInternal();
}
return builder;
}
private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeOutputStreamWriter.class);
+ static final byte SIGNATURE_MARKER = (byte) 0xab;
+ static final short CURRENT_VERSION = (short) 1;
+
static final byte IS_CODE_VALUE = 1;
static final byte IS_STRING_VALUE = 2;
static final byte IS_NULL_VALUE = 3;
private NormalizedNodeWriter normalizedNodeWriter;
+ private boolean wroteSignatureMarker;
+
public NormalizedNodeOutputStreamWriter(OutputStream stream) throws IOException {
Preconditions.checkNotNull(stream);
output = new DataOutputStream(stream);
}
public void writeNormalizedNode(NormalizedNode<?, ?> node) throws IOException {
+ writeSignatureMarkerAndVersionIfNeeded();
normalizedNodeWriter().write(node);
}
+ private void writeSignatureMarkerAndVersionIfNeeded() throws IOException {
+ if(!wroteSignatureMarker) {
+ output.writeByte(SIGNATURE_MARKER);
+ output.writeShort(CURRENT_VERSION);
+ wroteSignatureMarker = true;
+ }
+ }
+
@Override
public void leafNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
Preconditions.checkNotNull(name, "Node identifier should not be null");
private void startNode(final QName qName, byte nodeType) throws IOException {
Preconditions.checkNotNull(qName, "QName of node identifier should not be null.");
+
+ writeSignatureMarkerAndVersionIfNeeded();
+
// First write the type of node
output.writeByte(nodeType);
// Write Start Tag
}
public void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException {
+ writeSignatureMarkerAndVersionIfNeeded();
+ writeYangInstanceIdentifierInternal(identifier);
+ }
+
+ private void writeYangInstanceIdentifierInternal(YangInstanceIdentifier identifier) throws IOException {
Iterable<YangInstanceIdentifier.PathArgument> pathArguments = identifier.getPathArguments();
int size = Iterables.size(pathArguments);
output.writeInt(size);
output.write(bytes);
break;
case ValueTypes.YANG_IDENTIFIER_TYPE:
- writeYangInstanceIdentifier((YangInstanceIdentifier) value);
+ writeYangInstanceIdentifierInternal((YangInstanceIdentifier) value);
break;
case ValueTypes.NULL_TYPE :
break;
import org.apache.commons.lang.SerializationUtils;
import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
public class NormalizedNodeStreamReaderWriterTest {
@Test
- public void testNormalizedNodeStreamReaderWriter() throws IOException {
+ public void testNormalizedNodeStreaming() throws IOException {
- testNormalizedNodeStreamReaderWriter(createTestContainer());
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
+
+ NormalizedNode<?, ?> testContainer = createTestContainer();
+ writer.writeNormalizedNode(testContainer);
QName toaster = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","toaster");
QName darknessFactor = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","darknessFactor");
withNodeIdentifier(new NodeIdentifier(toaster)).
withChild(ImmutableNodes.leafNode(darknessFactor, "1000")).build();
- testNormalizedNodeStreamReaderWriter(Builders.containerBuilder().
+ ContainerNode toasterContainer = Builders.containerBuilder().
withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME)).
- withChild(toasterNode).build());
+ withChild(toasterNode).build();
+ writer.writeNormalizedNode(toasterContainer);
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
+
+ NormalizedNode<?,?> node = reader.readNormalizedNode();
+ Assert.assertEquals(testContainer, node);
+
+ node = reader.readNormalizedNode();
+ Assert.assertEquals(toasterContainer, node);
+
+ writer.close();
}
private NormalizedNode<?, ?> createTestContainer() {
build();
}
- private void testNormalizedNodeStreamReaderWriter(NormalizedNode<?, ?> input) throws IOException {
+ @Test
+ public void testYangInstanceIdentifierStreaming() throws IOException {
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).
+ node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
+ TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
- byte[] byteData = null;
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer =
+ new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
+ writer.writeYangInstanceIdentifier(path);
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
+
+ YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
+ Assert.assertEquals(path, newPath);
+
+ writer.close();
+ }
+
+ @Test
+ public void testNormalizedNodeAndYangInstanceIdentifierStreaming() throws IOException {
- try(ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
- NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream)) {
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream);
- NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
- normalizedNodeWriter.write(input);
- byteData = byteArrayOutputStream.toByteArray();
+ NormalizedNode<?, ?> testContainer = TestModel.createBaseTestContainerBuilder().build();
+ writer.writeNormalizedNode(testContainer);
- }
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).
+ node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
+ TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
+
+ writer.writeYangInstanceIdentifier(path);
NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
- new ByteArrayInputStream(byteData));
+ new ByteArrayInputStream(byteArrayOutputStream.toByteArray()));
NormalizedNode<?,?> node = reader.readNormalizedNode();
- Assert.assertEquals(input, node);
+ Assert.assertEquals(testContainer, node);
+
+ YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
+ Assert.assertEquals(path, newPath);
+
+ writer.close();
+ }
+
+ @Test(expected=InvalidNormalizedNodeStreamException.class, timeout=10000)
+ public void testInvalidNormalizedNodeStream() throws IOException {
+ byte[] protobufBytes = new NormalizedNodeToNodeCodec(null).encode(
+ TestModel.createBaseTestContainerBuilder().build()).getNormalizedNode().toByteArray();
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(protobufBytes));
+
+ reader.readNormalizedNode();
+ }
+
+ @Test(expected=InvalidNormalizedNodeStreamException.class, timeout=10000)
+ public void testInvalidYangInstanceIdentifierStream() throws IOException {
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH).build();
+
+ byte[] protobufBytes = ShardTransactionMessages.DeleteData.newBuilder().setInstanceIdentifierPathArguments(
+ InstanceIdentifierUtils.toSerializable(path)).build().toByteArray();
+
+ NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(protobufBytes));
+
+ reader.readYangInstanceIdentifier();
}
@Test
<type>xml</type>
<classifier>moduleconf</classifier>
</artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/datastore.cfg</file>
+ <type>cfg</type>
+ <classifier>datastore</classifier>
+ </artifact>
</artifacts>
</configuration>
</execution>
loggers = ["akka.event.slf4j.Slf4jLogger"]
actor {
-
provider = "akka.cluster.ClusterActorRefProvider"
serializers {
- java = "akka.serialization.JavaSerializer"
- proto = "akka.remote.serialization.ProtobufSerializer"
- }
+ java = "akka.serialization.JavaSerializer"
+ proto = "akka.remote.serialization.ProtobufSerializer"
+ }
- serialization-bindings {
- "com.google.protobuf.Message" = proto
+ serialization-bindings {
+ "com.google.protobuf.Message" = proto
+ }
+
+ default-dispatcher {
+ # Setting throughput to 1 makes the dispatcher fair. It processes 1 message from
+ # the mailbox before moving on to the next mailbox
+ throughput = 1
+ }
- }
+ default-mailbox {
+ # When not using a BalancingDispatcher it is recommended that we use the SingleConsumerOnlyUnboundedMailbox
+ # as it is the most efficient for multiple producer/single consumer use cases
+ mailbox-type="akka.dispatch.SingleConsumerOnlyUnboundedMailbox"
+ }
}
remote {
log-remote-lifecycle-events = off
cluster {
seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
- auto-down-unreachable-after = 10s
+ auto-down-unreachable-after = 300s
roles = [
"member-1"
netty.tcp {
hostname = "127.0.0.1"
port = 2551
+ maximum-frame-size = 419430400
+ send-buffer-size = 52428800
+ receive-buffer-size = 52428800
}
}
cluster {
seed-nodes = ["akka.tcp://odl-cluster-rpc@127.0.0.1:2551"]
- auto-down-unreachable-after = 10s
+ auto-down-unreachable-after = 300s
}
}
}
--- /dev/null
+# This file specifies property settings for the clustered data store to control its behavior. A
+# property may be applied to every data store type ("config" and "operational") or can be customized
+# differently for each data store type by prefixing the data store type + '.'. For example, specifying
+# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
+# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
+# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+
+# The multiplication factor to be used to determine shard election timeout. The shard election timeout
+# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
+#shard-election-timeout-factor=2
+
+# The interval at which a shard will send a heart beat message to its remote shard.
+#shard-heartbeat-interval-in-millis=500
+
+# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
+#shard-leader-election-timeout-in-seconds=30
+
+# Enable or disable data persistence.
+#persistent=true
+
+# Disable persistence for the operational data store by default.
+operational.persistent=false
+
+# The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.
+#shard-transaction-idle-timeout-in-minutes=10
+
+# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the
+# next messages before it aborts the transaction.
+#shard-transaction-commit-timeout-in-seconds=30
+
+# The maximum allowed capacity for each shard's transaction commit queue.
+#shard-transaction-commit-queue-capacity=20000
+
+# The maximum amount of time to wait for a shard to initialize from persistence on startup before
+# failing an operation (eg transaction create and change listener registration).
+#shard-initialization-timeout-in-seconds=300
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-journal-recovery-log-batch-size=5000
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-batch-count=20000
+
+# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-data-threshold-percentage=12
+
+# The interval at which the leader of the shard will check if its majority followers are active and
+# term itself as isolated.
+#shard-isolated-leader-check-interval-in-millis=5000
+
+# The number of transaction modification operations (put, merge, delete) to batch before sending to the
+# shard transaction actor. Batching improves performance as less modifications messages are sent to the
+# actor and thus lessens the chance that the transaction actor's mailbox queue could get full.
+#shard-batched-modification-count=100
+
+# The maximum amount of time for akka operations (remote or local) to complete before failing.
+#operation-timeout-in-seconds=5
+
+# The initial number of transactions per second that are allowed before the data store should begin
+# applying back pressure. This number is only used as an initial guidance, subsequently the datastore
+# measures the latency for a commit and auto-adjusts the rate limit.
+#transaction-creation-initial-rate-limit=100
+
+# The maximum thread pool size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-pool-size=20
+
+# The maximum queue size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-queue-size=1000
+
+# The maximum queue size for each shard's data store data change listener.
+#max-shard-data-change-listener-queue-size=1000
+
+# The maximum queue size for each shard's data store executor.
+#max-shard-data-store-executor-queue-size=5000
+
import java.util.Iterator;
import java.util.Map;
-import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
normalizedPath), e);
}
}
-
- // Write Augmentation data resolution
- if (legacyData.getValue().size() == 1) {
- final DataNormalizationOperation<?> potentialOp;
-
- try {
- final QName childType = legacyData.getValue().get(0).getNodeType();
- potentialOp = currentOp.getChild(childType);
- } catch (DataNormalizationException e) {
- throw new IllegalArgumentException(String.format("Failed to get child operation for %s", legacyData), e);
- }
-
- if (potentialOp.getIdentifier() instanceof AugmentationIdentifier) {
- currentOp = potentialOp;
- normalizedPath = normalizedPath.node(potentialOp.getIdentifier());
- }
- }
-
Preconditions.checkArgument(currentOp != null,
"Instance Identifier %s does not reference correct schema Node.", normalizedPath);
return new AbstractMap.SimpleEntry<YangInstanceIdentifier, NormalizedNode<?, ?>>(normalizedPath,
YangInstanceIdentifier.create(Lists.newArrayList(new NodeIdentifier(TEST_QNAME), new NodeIdentifier(
OUTER_CONTAINER_QNAME))), outerContBuilder.toInstance()));
- verifyNormalizedInstanceIdentifier(normalizedNodeEntry.getKey(), TEST_QNAME, OUTER_CONTAINER_QNAME,
- Sets.newHashSet(AUGMENTED_LEAF_QNAME));
-
- verifyNormalizedNode(normalizedNodeEntry.getValue(), expAugmentation);
}
@Test
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+
</dependencies>
<build>
package org.opendaylight.controller.cluster.datastore;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.AbstractListeningExecutorService;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
@Override
public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction,
- Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
- final int cohortSize = Iterables.size(cohorts);
final AsyncNotifyingSettableFuture clientSubmitFuture =
new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
- doCanCommit(clientSubmitFuture, transaction, cohorts, cohortSize);
+ doCanCommit(clientSubmitFuture, transaction, cohorts);
return MappingCheckedFuture.create(clientSubmitFuture,
TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
final long startTime = System.nanoTime();
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
if (result == null || !result) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize,
+ handleException(clientSubmitFuture, transaction, cohorts,
CAN_COMMIT, TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER,
new TransactionCommitFailedException(
"Can Commit failed, no detailed cause available."));
} else {
if(remaining.decrementAndGet() == 0) {
// All cohorts completed successfully - we can move on to the preCommit phase
- doPreCommit(startTime, clientSubmitFuture, transaction, cohorts, cohortSize);
+ doPreCommit(startTime, clientSubmitFuture, transaction, cohorts);
}
}
}
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT,
+ handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT,
TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER, t);
}
};
private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
@Override
public void onSuccess(Void notUsed) {
if(remaining.decrementAndGet() == 0) {
// All cohorts completed successfully - we can move on to the commit phase
- doCommit(startTime, clientSubmitFuture, transaction, cohorts, cohortSize);
+ doCommit(startTime, clientSubmitFuture, transaction, cohorts);
}
}
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, PRE_COMMIT,
+ handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT,
TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER, t);
}
};
private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
// Not using Futures.allAsList here to avoid its internal overhead.
- final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ final AtomicInteger remaining = new AtomicInteger(cohorts.size());
FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
@Override
public void onSuccess(Void notUsed) {
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, COMMIT,
+ handleException(clientSubmitFuture, transaction, cohorts, COMMIT,
TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER, t);
}
};
private void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, int cohortSize,
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
final String phase, final TransactionCommitFailedExceptionMapper exMapper,
final Throwable t) {
// Transaction failed - tell all cohorts to abort.
@SuppressWarnings("unchecked")
- ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohortSize];
+ ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohorts.size()];
int i = 0;
for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
canCommitFutures[i++] = cohort.abort();
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
import akka.dispatch.OnComplete;
+import com.google.common.annotations.VisibleForTesting;
import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
import scala.concurrent.Future;
/**
public void init(final YangInstanceIdentifier path, final AsyncDataBroker.DataChangeScope scope) {
dataChangeListenerActor = actorContext.getActorSystem().actorOf(
- DataChangeListener.props(listener));
+ DataChangeListener.props(listener).withDispatcher(actorContext.getNotificationDispatcherPath()));
Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
findFuture.onComplete(new OnComplete<ActorRef>() {
doRegistration(shard, path, scope);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
private void doRegistration(ActorRef shard, final YangInstanceIdentifier path,
reply.getListenerRegistrationPath()));
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
@Override
import akka.util.Timeout;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang3.text.WordUtils;
import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
import org.opendaylight.controller.cluster.datastore.config.FileConfigurationReader;
import org.opendaylight.controller.cluster.raft.ConfigParams;
*/
public class DatastoreContext {
- private final InMemoryDOMDataStoreConfigProperties dataStoreProperties;
- private final Duration shardTransactionIdleTimeout;
- private final int operationTimeoutInSeconds;
- private final String dataStoreMXBeanType;
- private final ConfigParams shardRaftConfig;
- private final int shardTransactionCommitTimeoutInSeconds;
- private final int shardTransactionCommitQueueCapacity;
- private final Timeout shardInitializationTimeout;
- private final Timeout shardLeaderElectionTimeout;
- private final boolean persistent;
- private final ConfigurationReader configurationReader;
- private final long shardElectionTimeoutFactor;
-
- private DatastoreContext(InMemoryDOMDataStoreConfigProperties dataStoreProperties,
- ConfigParams shardRaftConfig, String dataStoreMXBeanType, int operationTimeoutInSeconds,
- Duration shardTransactionIdleTimeout, int shardTransactionCommitTimeoutInSeconds,
- int shardTransactionCommitQueueCapacity, Timeout shardInitializationTimeout,
- Timeout shardLeaderElectionTimeout,
- boolean persistent, ConfigurationReader configurationReader, long shardElectionTimeoutFactor) {
- this.dataStoreProperties = dataStoreProperties;
- this.shardRaftConfig = shardRaftConfig;
- this.dataStoreMXBeanType = dataStoreMXBeanType;
- this.operationTimeoutInSeconds = operationTimeoutInSeconds;
- this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
- this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
- this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
- this.shardInitializationTimeout = shardInitializationTimeout;
- this.shardLeaderElectionTimeout = shardLeaderElectionTimeout;
- this.persistent = persistent;
- this.configurationReader = configurationReader;
- this.shardElectionTimeoutFactor = shardElectionTimeoutFactor;
+ public static final Duration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = Duration.create(10, TimeUnit.MINUTES);
+ public static final int DEFAULT_OPERATION_TIMEOUT_IN_SECONDS = 5;
+ public static final int DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS = 30;
+ public static final int DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE = 1000;
+ public static final int DEFAULT_SNAPSHOT_BATCH_COUNT = 20000;
+ public static final int DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS = 500;
+ public static final int DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS = DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS * 10;
+ public static final int DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY = 20000;
+ public static final Timeout DEFAULT_SHARD_INITIALIZATION_TIMEOUT = new Timeout(5, TimeUnit.MINUTES);
+ public static final Timeout DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT = new Timeout(30, TimeUnit.SECONDS);
+ public static final boolean DEFAULT_PERSISTENT = true;
+ public static final FileConfigurationReader DEFAULT_CONFIGURATION_READER = new FileConfigurationReader();
+ public static final int DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE = 12;
+ public static final int DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR = 2;
+ public static final int DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT = 100;
+ public static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
+ public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT= 100;
+
+ private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
+ private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+ private int operationTimeoutInSeconds = DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+ private String dataStoreMXBeanType;
+ private int shardTransactionCommitTimeoutInSeconds = DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+ private int shardTransactionCommitQueueCapacity = DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
+ private Timeout shardInitializationTimeout = DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+ private Timeout shardLeaderElectionTimeout = DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+ private boolean persistent = DEFAULT_PERSISTENT;
+ private ConfigurationReader configurationReader = DEFAULT_CONFIGURATION_READER;
+ private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
+ private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
+ private String dataStoreType = UNKNOWN_DATA_STORE_TYPE;
+ private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
+
+ private DatastoreContext() {
+ setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
+ setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
+ setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS);
+ setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS);
+ setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE);
+ setElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR);
+ }
+
+ private DatastoreContext(DatastoreContext other) {
+ this.dataStoreProperties = other.dataStoreProperties;
+ this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
+ this.operationTimeoutInSeconds = other.operationTimeoutInSeconds;
+ this.dataStoreMXBeanType = other.dataStoreMXBeanType;
+ this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
+ this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
+ this.shardInitializationTimeout = other.shardInitializationTimeout;
+ this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
+ this.persistent = other.persistent;
+ this.configurationReader = other.configurationReader;
+ this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
+ this.dataStoreType = other.dataStoreType;
+ this.shardBatchedModificationCount = other.shardBatchedModificationCount;
+
+ setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
+ setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
+ setHeartbeatInterval(other.raftConfig.getHeartBeatInterval().toMillis());
+ setIsolatedLeaderCheckInterval(other.raftConfig.getIsolatedCheckIntervalInMillis());
+ setSnapshotDataThresholdPercentage(other.raftConfig.getSnapshotDataThresholdPercentage());
+ setElectionTimeoutFactor(other.raftConfig.getElectionTimeoutFactor());
}
public static Builder newBuilder() {
- return new Builder();
+ return new Builder(new DatastoreContext());
+ }
+
+ public static Builder newBuilderFrom(DatastoreContext context) {
+ return new Builder(new DatastoreContext(context));
}
public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
}
public ConfigParams getShardRaftConfig() {
- return shardRaftConfig;
+ return raftConfig;
}
public int getShardTransactionCommitTimeoutInSeconds() {
}
public long getShardElectionTimeoutFactor(){
- return this.shardElectionTimeoutFactor;
+ return raftConfig.getElectionTimeoutFactor();
+ }
+
+ public String getDataStoreType(){
+ return dataStoreType;
+ }
+
+ public long getTransactionCreationInitialRateLimit() {
+ return transactionCreationInitialRateLimit;
+ }
+
+ private void setHeartbeatInterval(long shardHeartbeatIntervalInMillis){
+ raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
+ TimeUnit.MILLISECONDS));
+ }
+
+ private void setShardJournalRecoveryLogBatchSize(int shardJournalRecoveryLogBatchSize){
+ raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
+ }
+
+
+ private void setIsolatedLeaderCheckInterval(long shardIsolatedLeaderCheckIntervalInMillis) {
+ raftConfig.setIsolatedLeaderCheckInterval(
+ new FiniteDuration(shardIsolatedLeaderCheckIntervalInMillis, TimeUnit.MILLISECONDS));
+ }
+
+ private void setElectionTimeoutFactor(long shardElectionTimeoutFactor) {
+ raftConfig.setElectionTimeoutFactor(shardElectionTimeoutFactor);
+ }
+
+ private void setSnapshotDataThresholdPercentage(int shardSnapshotDataThresholdPercentage) {
+ raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
+ }
+
+ private void setSnapshotBatchCount(long shardSnapshotBatchCount) {
+ raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
+ }
+
+ public int getShardBatchedModificationCount() {
+ return shardBatchedModificationCount;
}
public static class Builder {
- private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
- private Duration shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
- private int operationTimeoutInSeconds = 5;
- private String dataStoreMXBeanType;
- private int shardTransactionCommitTimeoutInSeconds = 30;
- private int shardJournalRecoveryLogBatchSize = 1000;
- private int shardSnapshotBatchCount = 20000;
- private int shardHeartbeatIntervalInMillis = 500;
- private int shardTransactionCommitQueueCapacity = 20000;
- private Timeout shardInitializationTimeout = new Timeout(5, TimeUnit.MINUTES);
- private Timeout shardLeaderElectionTimeout = new Timeout(30, TimeUnit.SECONDS);
- private boolean persistent = true;
- private ConfigurationReader configurationReader = new FileConfigurationReader();
- private int shardIsolatedLeaderCheckIntervalInMillis = shardHeartbeatIntervalInMillis * 10;
- private int shardSnapshotDataThresholdPercentage = 12;
- private long shardElectionTimeoutFactor = 2;
-
- public Builder shardTransactionIdleTimeout(Duration shardTransactionIdleTimeout) {
- this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ private final DatastoreContext datastoreContext;
+ private int maxShardDataChangeExecutorPoolSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE;
+ private int maxShardDataChangeExecutorQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE;
+ private int maxShardDataChangeListenerQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE;
+ private int maxShardDataStoreExecutorQueueSize =
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE;
+
+ private Builder(DatastoreContext datastoreContext) {
+ this.datastoreContext = datastoreContext;
+
+ if(datastoreContext.getDataStoreProperties() != null) {
+ maxShardDataChangeExecutorPoolSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
+ maxShardDataChangeExecutorQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
+ maxShardDataChangeListenerQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
+ maxShardDataStoreExecutorQueueSize =
+ datastoreContext.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
+ }
+ }
+
+ public Builder boundedMailboxCapacity(int boundedMailboxCapacity) {
+ // TODO - this is defined in the yang DataStoreProperties but not currently used.
return this;
}
- public Builder operationTimeoutInSeconds(int operationTimeoutInSeconds) {
- this.operationTimeoutInSeconds = operationTimeoutInSeconds;
+ public Builder enableMetricCapture(boolean enableMetricCapture) {
+ // TODO - this is defined in the yang DataStoreProperties but not currently used.
return this;
}
- public Builder dataStoreMXBeanType(String dataStoreMXBeanType) {
- this.dataStoreMXBeanType = dataStoreMXBeanType;
+
+ public Builder shardTransactionIdleTimeout(long timeout, TimeUnit unit) {
+ datastoreContext.shardTransactionIdleTimeout = Duration.create(timeout, unit);
return this;
}
- public Builder dataStoreProperties(InMemoryDOMDataStoreConfigProperties dataStoreProperties) {
- this.dataStoreProperties = dataStoreProperties;
+ public Builder shardTransactionIdleTimeoutInMinutes(long timeout) {
+ return shardTransactionIdleTimeout(timeout, TimeUnit.MINUTES);
+ }
+
+ public Builder operationTimeoutInSeconds(int operationTimeoutInSeconds) {
+ datastoreContext.operationTimeoutInSeconds = operationTimeoutInSeconds;
+ return this;
+ }
+
+ public Builder dataStoreMXBeanType(String dataStoreMXBeanType) {
+ datastoreContext.dataStoreMXBeanType = dataStoreMXBeanType;
return this;
}
public Builder shardTransactionCommitTimeoutInSeconds(int shardTransactionCommitTimeoutInSeconds) {
- this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
+ datastoreContext.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
return this;
}
public Builder shardJournalRecoveryLogBatchSize(int shardJournalRecoveryLogBatchSize) {
- this.shardJournalRecoveryLogBatchSize = shardJournalRecoveryLogBatchSize;
+ datastoreContext.setShardJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
return this;
}
public Builder shardSnapshotBatchCount(int shardSnapshotBatchCount) {
- this.shardSnapshotBatchCount = shardSnapshotBatchCount;
+ datastoreContext.setSnapshotBatchCount(shardSnapshotBatchCount);
return this;
}
public Builder shardSnapshotDataThresholdPercentage(int shardSnapshotDataThresholdPercentage) {
- this.shardSnapshotDataThresholdPercentage = shardSnapshotDataThresholdPercentage;
+ datastoreContext.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
return this;
}
-
public Builder shardHeartbeatIntervalInMillis(int shardHeartbeatIntervalInMillis) {
- this.shardHeartbeatIntervalInMillis = shardHeartbeatIntervalInMillis;
+ datastoreContext.setHeartbeatInterval(shardHeartbeatIntervalInMillis);
return this;
}
public Builder shardTransactionCommitQueueCapacity(int shardTransactionCommitQueueCapacity) {
- this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
+ datastoreContext.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
return this;
}
public Builder shardInitializationTimeout(long timeout, TimeUnit unit) {
- this.shardInitializationTimeout = new Timeout(timeout, unit);
+ datastoreContext.shardInitializationTimeout = new Timeout(timeout, unit);
return this;
}
+ public Builder shardInitializationTimeoutInSeconds(long timeout) {
+ return shardInitializationTimeout(timeout, TimeUnit.SECONDS);
+ }
+
public Builder shardLeaderElectionTimeout(long timeout, TimeUnit unit) {
- this.shardLeaderElectionTimeout = new Timeout(timeout, unit);
+ datastoreContext.shardLeaderElectionTimeout = new Timeout(timeout, unit);
return this;
}
+ public Builder shardLeaderElectionTimeoutInSeconds(long timeout) {
+ return shardLeaderElectionTimeout(timeout, TimeUnit.SECONDS);
+ }
+
public Builder configurationReader(ConfigurationReader configurationReader){
- this.configurationReader = configurationReader;
+ datastoreContext.configurationReader = configurationReader;
return this;
}
public Builder persistent(boolean persistent){
- this.persistent = persistent;
+ datastoreContext.persistent = persistent;
return this;
}
public Builder shardIsolatedLeaderCheckIntervalInMillis(int shardIsolatedLeaderCheckIntervalInMillis) {
- this.shardIsolatedLeaderCheckIntervalInMillis = shardIsolatedLeaderCheckIntervalInMillis;
+ datastoreContext.setIsolatedLeaderCheckInterval(shardIsolatedLeaderCheckIntervalInMillis);
return this;
}
public Builder shardElectionTimeoutFactor(long shardElectionTimeoutFactor){
- this.shardElectionTimeoutFactor = shardElectionTimeoutFactor;
+ datastoreContext.setElectionTimeoutFactor(shardElectionTimeoutFactor);
return this;
}
+ public Builder transactionCreationInitialRateLimit(long initialRateLimit){
+ datastoreContext.transactionCreationInitialRateLimit = initialRateLimit;
+ return this;
+ }
- public DatastoreContext build() {
- DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
- raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
- TimeUnit.MILLISECONDS));
- raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
- raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
- raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
- raftConfig.setElectionTimeoutFactor(shardElectionTimeoutFactor);
- raftConfig.setIsolatedLeaderCheckInterval(
- new FiniteDuration(shardIsolatedLeaderCheckIntervalInMillis, TimeUnit.MILLISECONDS));
+ public Builder dataStoreType(String dataStoreType){
+ datastoreContext.dataStoreType = dataStoreType;
+ datastoreContext.dataStoreMXBeanType = "Distributed" + WordUtils.capitalize(dataStoreType) + "Datastore";
+ return this;
+ }
- return new DatastoreContext(dataStoreProperties, raftConfig, dataStoreMXBeanType,
- operationTimeoutInSeconds, shardTransactionIdleTimeout,
- shardTransactionCommitTimeoutInSeconds, shardTransactionCommitQueueCapacity,
- shardInitializationTimeout, shardLeaderElectionTimeout,
- persistent, configurationReader, shardElectionTimeoutFactor);
+ public Builder shardBatchedModificationCount(int shardBatchedModificationCount) {
+ datastoreContext.shardBatchedModificationCount = shardBatchedModificationCount;
+ return this;
+ }
+
+ public Builder maxShardDataChangeExecutorPoolSize(int maxShardDataChangeExecutorPoolSize) {
+ this.maxShardDataChangeExecutorPoolSize = maxShardDataChangeExecutorPoolSize;
+ return this;
+ }
+
+ public Builder maxShardDataChangeExecutorQueueSize(int maxShardDataChangeExecutorQueueSize) {
+ this.maxShardDataChangeExecutorQueueSize = maxShardDataChangeExecutorQueueSize;
+ return this;
+ }
+
+ public Builder maxShardDataChangeListenerQueueSize(int maxShardDataChangeListenerQueueSize) {
+ this.maxShardDataChangeListenerQueueSize = maxShardDataChangeListenerQueueSize;
+ return this;
+ }
+
+ public Builder maxShardDataStoreExecutorQueueSize(int maxShardDataStoreExecutorQueueSize) {
+ this.maxShardDataStoreExecutorQueueSize = maxShardDataStoreExecutorQueueSize;
+ return this;
+ }
+
+ public DatastoreContext build() {
+ datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create(
+ maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
+ maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
+ return datastoreContext;
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.io.IOException;
+import java.util.Dictionary;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Class that overlays DatastoreContext settings with settings obtained from an OSGi Config Admin
+ * service.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextConfigAdminOverlay implements AutoCloseable {
+ public static final String CONFIG_ID = "org.opendaylight.controller.cluster.datastore";
+
+ private static final Logger LOG = LoggerFactory.getLogger(DatastoreContextConfigAdminOverlay.class);
+
+ private final DatastoreContextIntrospector introspector;
+ private final BundleContext bundleContext;
+
+ public DatastoreContextConfigAdminOverlay(DatastoreContextIntrospector introspector, BundleContext bundleContext) {
+ this.introspector = introspector;
+ this.bundleContext = bundleContext;
+
+ ServiceReference<ConfigurationAdmin> configAdminServiceReference =
+ bundleContext.getServiceReference(ConfigurationAdmin.class);
+ if(configAdminServiceReference == null) {
+ LOG.warn("No ConfigurationAdmin service found");
+ } else {
+ overlaySettings(configAdminServiceReference);
+ }
+ }
+
+ private void overlaySettings(ServiceReference<ConfigurationAdmin> configAdminServiceReference) {
+ try {
+ ConfigurationAdmin configAdmin = bundleContext.getService(configAdminServiceReference);
+
+ Configuration config = configAdmin.getConfiguration(CONFIG_ID);
+ if(config != null) {
+ Dictionary<String, Object> properties = config.getProperties();
+
+ LOG.debug("Overlaying settings: {}", properties);
+
+ introspector.update(properties);
+ } else {
+ LOG.debug("No Configuration found for {}", CONFIG_ID);
+ }
+ } catch (IOException e) {
+ LOG.error("Error obtaining Configuration for pid {}", CONFIG_ID, e);
+ } catch(IllegalStateException e) {
+ // Ignore - indicates the bundleContext has been closed.
+ } finally {
+ try {
+ bundleContext.ungetService(configAdminServiceReference);
+ } catch (Exception e) {
+ LOG.debug("Error from ungetService", e);
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.primitives.Primitives;
+import java.beans.BeanInfo;
+import java.beans.ConstructorProperties;
+import java.beans.IntrospectionException;
+import java.beans.Introspector;
+import java.beans.MethodDescriptor;
+import java.beans.PropertyDescriptor;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Dictionary;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.text.WordUtils;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Introspects on a DatastoreContext instance to set its properties via reflection.
+ * i
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextIntrospector {
+ private static final Logger LOG = LoggerFactory.getLogger(DatastoreContextIntrospector.class);
+
+ private static final Map<String, Class<?>> dataStorePropTypes = new HashMap<>();
+
+ private static final Map<Class<?>, Constructor<?>> constructors = new HashMap<>();
+
+ private static final Map<Class<?>, Method> yangTypeGetters = new HashMap<>();
+
+ private static final Map<String, Method> builderSetters = new HashMap<>();
+
+ static {
+ try {
+ introspectDatastoreContextBuilder();
+ introspectDataStoreProperties();
+ introspectPrimitiveTypes();
+ } catch (IntrospectionException e) {
+ LOG.error("Error initializing DatastoreContextIntrospector", e);
+ }
+ }
+
+ /**
+ * Introspects each primitive wrapper (ie Integer, Long etc) and String type to find the
+ * constructor that takes a single String argument. For primitive wrappers, this constructor
+ * converts from a String representation.
+ */
+ private static void introspectPrimitiveTypes() {
+
+ Set<Class<?>> primitives = ImmutableSet.<Class<?>>builder().addAll(
+ Primitives.allWrapperTypes()).add(String.class).build();
+ for(Class<?> primitive: primitives) {
+ try {
+ processPropertyType(primitive);
+ } catch (Exception e) {
+ // Ignore primitives that can't be constructed from a String, eg Character and Void.
+ }
+ }
+ }
+
+ /**
+ * Introspects the DatastoreContext.Builder class to find all its setter methods that we will
+ * invoke via reflection. We can't use the bean Introspector here as the Builder setters don't
+ * follow the bean property naming convention, ie setter prefixed with "set", so look for all
+ * the methods that return Builder.
+ */
+ private static void introspectDatastoreContextBuilder() {
+ for(Method method: Builder.class.getMethods()) {
+ if(Builder.class.equals(method.getReturnType())) {
+ builderSetters.put(method.getName(), method);
+ }
+ }
+ }
+
+ /**
+ * Introspects the DataStoreProperties interface that is generated from the DataStoreProperties
+ * yang grouping. We use the bean Introspector to find the types of all the properties defined
+ * in the interface (this is the type returned from the getter method). For each type, we find
+ * the appropriate constructor that we will use.
+ */
+ private static void introspectDataStoreProperties() throws IntrospectionException {
+ BeanInfo beanInfo = Introspector.getBeanInfo(DataStoreProperties.class);
+ for(PropertyDescriptor desc: beanInfo.getPropertyDescriptors()) {
+ processDataStoreProperty(desc.getName(), desc.getPropertyType());
+ }
+
+ // Getter methods that return Boolean and start with "is" instead of "get" aren't recognized as
+ // properties and thus aren't returned from getPropertyDescriptors. A getter starting with
+ // "is" is only supported if it returns primitive boolean. So we'll check for these via
+ // getMethodDescriptors.
+ for(MethodDescriptor desc: beanInfo.getMethodDescriptors()) {
+ String methodName = desc.getName();
+ if(Boolean.class.equals(desc.getMethod().getReturnType()) && methodName.startsWith("is")) {
+ String propertyName = WordUtils.uncapitalize(methodName.substring(2));
+ processDataStoreProperty(propertyName, Boolean.class);
+ }
+ }
+ }
+
+ /**
+ * Processes a property defined on the DataStoreProperties interface.
+ */
+ private static void processDataStoreProperty(String name, Class<?> propertyType) {
+ Preconditions.checkArgument(builderSetters.containsKey(name), String.format(
+ "DataStoreProperties property \"%s\" does not have corresponding setter in DatastoreContext.Builder", name));
+ try {
+ processPropertyType(propertyType);
+ dataStorePropTypes.put(name, propertyType);
+ } catch (Exception e) {
+ LOG.error("Error finding constructor for type {}", propertyType, e);
+ }
+ }
+
+ /**
+ * Finds the appropriate constructor for the specified type that we will use to construct
+ * instances.
+ */
+ private static void processPropertyType(Class<?> propertyType) throws Exception {
+ Class<?> wrappedType = Primitives.wrap(propertyType);
+ if(constructors.containsKey(wrappedType)) {
+ return;
+ }
+
+ // If the type is a primitive (or String type), we look for the constructor that takes a
+ // single String argument, which, for primitives, validates and converts from a String
+ // representation which is the form we get on ingress.
+ if(propertyType.isPrimitive() || Primitives.isWrapperType(propertyType) ||
+ propertyType.equals(String.class))
+ {
+ constructors.put(wrappedType, propertyType.getConstructor(String.class));
+ } else {
+ // This must be a yang-defined type. We need to find the constructor that takes a
+ // primitive as the only argument. This will be used to construct instances to perform
+ // validation (eg range checking). The yang-generated types have a couple single-argument
+ // constructors but the one we want has the bean ConstructorProperties annotation.
+ for(Constructor<?> ctor: propertyType.getConstructors()) {
+ ConstructorProperties ctorPropsAnnotation = ctor.getAnnotation(ConstructorProperties.class);
+ if(ctor.getParameterTypes().length == 1 && ctorPropsAnnotation != null) {
+ findYangTypeGetter(propertyType, ctorPropsAnnotation.value()[0]);
+ constructors.put(propertyType, ctor);
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * Finds the getter method on a yang-generated type for the specified property name.
+ */
+ private static void findYangTypeGetter(Class<?> type, String propertyName)
+ throws Exception {
+ for(PropertyDescriptor desc: Introspector.getBeanInfo(type).getPropertyDescriptors()) {
+ if(desc.getName().equals(propertyName)) {
+ yangTypeGetters.put(type, desc.getReadMethod());
+ return;
+ }
+ }
+
+ throw new IllegalArgumentException(String.format(
+ "Getter method for constructor property %s not found for YANG type %s",
+ propertyName, type));
+ }
+
+ private DatastoreContext context;
+
+ public DatastoreContextIntrospector(DatastoreContext context) {
+ this.context = context;
+ }
+
+ public DatastoreContext getContext() {
+ return context;
+ }
+
+ /**
+ * Applies the given properties to the cached DatastoreContext and yields a new DatastoreContext
+ * instance which can be obtained via {@link getContext}.
+ *
+ * @param properties the properties to apply
+ * @return true if the cached DatastoreContext was updated, false otherwise.
+ */
+ public boolean update(Dictionary<String, Object> properties) {
+ if(properties == null || properties.isEmpty()) {
+ return false;
+ }
+
+ Builder builder = DatastoreContext.newBuilderFrom(context);
+
+ final String dataStoreTypePrefix = context.getDataStoreType() + '.';
+
+ // Sort the property keys by putting the names prefixed with the data store type last. This
+ // is done so data store specific settings are applied after global settings.
+ ArrayList<String> keys = Collections.list(properties.keys());
+ Collections.sort(keys, new Comparator<String>() {
+ @Override
+ public int compare(String key1, String key2) {
+ return key1.startsWith(dataStoreTypePrefix) ? 1 :
+ key2.startsWith(dataStoreTypePrefix) ? -1 : key1.compareTo(key2);
+ }
+ });
+
+ boolean updated = false;
+ for(String key: keys) {
+ Object value = properties.get(key);
+ try {
+ // If the key is prefixed with the data store type, strip it off.
+ if(key.startsWith(dataStoreTypePrefix)) {
+ key = key.replaceFirst(dataStoreTypePrefix, "");
+ }
+
+ key = convertToCamelCase(key);
+
+ // Convert the value to the right type.
+ value = convertValue(key, value);
+ if(value == null) {
+ continue;
+ }
+
+ LOG.debug("Converted value for property {}: {} ({})",
+ key, value, value.getClass().getSimpleName());
+
+ // Call the setter method on the Builder instance.
+ Method setter = builderSetters.get(key);
+ setter.invoke(builder, constructorValueRecursively(
+ Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+
+ updated = true;
+
+ } catch (Exception e) {
+ LOG.error("Error converting value ({}) for property {}", value, key, e);
+ }
+ }
+
+ if(updated) {
+ context = builder.build();
+ }
+
+ return updated;
+ }
+
+ private String convertToCamelCase(String inString) {
+ String str = inString.trim();
+ if(StringUtils.contains(str, '-') || StringUtils.contains(str, ' ')) {
+ str = inString.replace('-', ' ');
+ str = WordUtils.capitalizeFully(str);
+ str = StringUtils.deleteWhitespace(str);
+ }
+
+ return StringUtils.uncapitalize(str);
+ }
+
+ private Object convertValue(String name, Object from) throws Exception {
+ Class<?> propertyType = dataStorePropTypes.get(name);
+ if(propertyType == null) {
+ LOG.debug("Property not found for {}", name);
+ return null;
+ }
+
+ LOG.debug("Type for property {}: {}, converting value {} ({})",
+ name, propertyType.getSimpleName(), from, from.getClass().getSimpleName());
+
+ // Recurse the chain of constructors depth-first to get the resulting value. Eg, if the
+ // property type is the yang-generated NonZeroUint32Type, it's constructor takes a Long so
+ // we have to first construct a Long instance from the input value.
+ Object converted = constructorValueRecursively(propertyType, from.toString());
+
+ // If the converted type is a yang-generated type, call the getter to obtain the actual value.
+ Method getter = yangTypeGetters.get(converted.getClass());
+ if(getter != null) {
+ converted = getter.invoke(converted);
+ }
+
+ return converted;
+ }
+
+ private Object constructorValueRecursively(Class<?> toType, Object fromValue) throws Exception {
+ LOG.debug("convertValueRecursively - toType: {}, fromValue {} ({})",
+ toType.getSimpleName(), fromValue, fromValue.getClass().getSimpleName());
+
+ Constructor<?> ctor = constructors.get(toType);
+
+ LOG.debug("Found {}", ctor);
+
+ if(ctor == null) {
+ throw new IllegalArgumentException(String.format("Constructor not found for type %s", toType));
+ }
+
+ Object value = fromValue;
+
+ // Since the original input type is a String, once we find a constructor that takes a String
+ // argument, we're done recursing.
+ if(!ctor.getParameterTypes()[0].equals(String.class)) {
+ value = constructorValueRecursively(ctor.getParameterTypes()[0], fromValue);
+ }
+
+ return ctor.newInstance(value);
+ }
+}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
private final ActorContext actorContext;
- public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster,
+ private AutoCloseable closeable;
+
+ private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
+
+ public DistributedDataStore(ActorSystem actorSystem, ClusterWrapper cluster,
Configuration configuration, DatastoreContext datastoreContext) {
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
- Preconditions.checkNotNull(type, "type should not be null");
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
Preconditions.checkNotNull(datastoreContext, "datastoreContext should not be null");
+ String type = datastoreContext.getDataStoreType();
+
String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
LOG.info("Creating ShardManager : {}", shardManagerId);
+ String shardDispatcher =
+ new Dispatchers(actorSystem.dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
+
actorContext = new ActorContext(actorSystem, actorSystem.actorOf(
- ShardManager.props(type, cluster, configuration, datastoreContext)
- .withMailbox(ActorContext.MAILBOX), shardManagerId ),
+ ShardManager.props(cluster, configuration, datastoreContext)
+ .withDispatcher(shardDispatcher).withMailbox(ActorContext.MAILBOX), shardManagerId ),
cluster, configuration, datastoreContext);
+
+ datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType());
+ datastoreConfigMXBean.setContext(datastoreContext);
+ datastoreConfigMXBean.registerMBean();
}
public DistributedDataStore(ActorContext actorContext) {
this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
}
+ public void setCloseable(AutoCloseable closeable) {
+ this.closeable = closeable;
+ }
+
@SuppressWarnings("unchecked")
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ actorContext.acquireTxCreationPermit();
return new TransactionProxy(actorContext, TransactionProxy.TransactionType.WRITE_ONLY);
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ actorContext.acquireTxCreationPermit();
return new TransactionProxy(actorContext, TransactionProxy.TransactionType.READ_WRITE);
}
}
@Override
- public void close() throws Exception {
+ public void close() {
+ datastoreConfigMXBean.unregisterMBean();
+
+ if(closeable != null) {
+ try {
+ closeable.close();
+ } catch (Exception e) {
+ LOG.debug("Error closing insance", e);
+ }
+ }
+
actorContext.shutdown();
}
private static volatile ActorSystem persistentActorSystem = null;
- public static DistributedDataStore createInstance(String name, SchemaService schemaService,
- DatastoreContext datastoreContext, BundleContext bundleContext) {
+ public static DistributedDataStore createInstance(SchemaService schemaService,
+ DatastoreContext datastoreContext, BundleContext bundleContext) {
+
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(datastoreContext);
+ DatastoreContextConfigAdminOverlay overlay = new DatastoreContextConfigAdminOverlay(
+ introspector, bundleContext);
ActorSystem actorSystem = getOrCreateInstance(bundleContext, datastoreContext.getConfigurationReader());
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
- final DistributedDataStore dataStore =
- new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
- config, datastoreContext);
+ final DistributedDataStore dataStore = new DistributedDataStore(actorSystem,
+ new ClusterWrapperImpl(actorSystem), config, introspector.getContext());
ShardStrategyFactory.setConfiguration(config);
schemaService.registerSchemaContextListener(dataStore);
+
+ dataStore.setCloseable(overlay);
return dataStore;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
+import org.opendaylight.controller.cluster.datastore.messages.MergeData;
+import org.opendaylight.controller.cluster.datastore.messages.WriteData;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+/**
+ * Implementation of TransactionContextImpl used when talking to a pre-Lithium controller that doesn't
+ * support the BatchedModifications message.
+ *
+ * @author Thomas Pantelis
+ */
+class LegacyTransactionContextImpl extends TransactionContextImpl {
+
+ LegacyTransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
+ ActorContext actorContext, SchemaContext schemaContext, boolean isTxActorLocal,
+ short remoteTransactionVersion, OperationCompleter operationCompleter) {
+ super(transactionPath, actor, identifier, actorContext, schemaContext, isTxActorLocal,
+ remoteTransactionVersion, operationCompleter);
+ }
+
+ @Override
+ public void deleteData(YangInstanceIdentifier path) {
+ recordedOperationFutures.add(executeOperationAsync(
+ new DeleteData(path, getRemoteTransactionVersion())));
+ }
+
+ @Override
+ public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ recordedOperationFutures.add(executeOperationAsync(
+ new MergeData(path, data, getRemoteTransactionVersion())));
+ }
+
+ @Override
+ public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ recordedOperationFutures.add(executeOperationAsync(
+ new WriteData(path, data, getRemoteTransactionVersion())));
+ }
+}
import akka.dispatch.OnComplete;
import com.google.common.base.Preconditions;
import java.util.concurrent.Semaphore;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
final class OperationCompleter extends OnComplete<Object> {
private final Semaphore operationLimiter;
}
@Override
- public void onComplete(Throwable throwable, Object o){
- this.operationLimiter.release();
+ public void onComplete(Throwable throwable, Object message) {
+ if(message instanceof BatchedModificationsReply) {
+ this.operationLimiter.release(((BatchedModificationsReply)message).getNumBatched());
+ } else {
+ this.operationLimiter.release();
+ }
}
}
\ No newline at end of file
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Creator;
import akka.persistence.RecoveryFailure;
import akka.serialization.Serialization;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
+import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
// The state of this Shard
private final InMemoryDOMDataStore store;
- private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
-
/// The name of this shard
private final ShardIdentifier name;
private final Optional<ActorRef> roleChangeNotifier;
+ private final MessageTracker appendEntriesReplyTracker;
+
/**
* Coordinates persistence recovery on startup.
*/
private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
+ private final String txnDispatcherPath;
+
protected Shard(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
super(name.toString(), mapPeerAddresses(peerAddresses),
this.name = name;
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
- this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
+ this.dataPersistenceProvider = (datastoreContext.isPersistent())
+ ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
+ this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
+ .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
+
LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
// create a notifier actor for each cluster member
roleChangeNotifier = createRoleChangeNotifier(name.toString());
+
+ appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
+ getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
}
private static Map<String, String> mapPeerAddresses(
}
if (message instanceof RecoveryFailure){
- LOG.error(((RecoveryFailure) message).cause(), "{}: Recovery failed because of this cause",
- persistenceId());
+ LOG.error("{}: Recovery failed because of this cause",
+ persistenceId(), ((RecoveryFailure) message).cause());
// Even though recovery failed, we still need to finish our recovery, eg send the
// ActorInitialized message and start the txCommitTimeoutCheckSchedule.
onRecoveryComplete();
} else {
super.onReceiveRecover(message);
+ if(LOG.isTraceEnabled()) {
+ appendEntriesReplyTracker.begin();
+ }
}
}
@Override
public void onReceiveCommand(final Object message) throws Exception {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: onReceiveCommand: Received message {} from {}", persistenceId(), message, getSender());
- }
-
- if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
- handleCreateTransaction(message);
- } else if(message instanceof ForwardedReadyTransaction) {
- handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
- } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
- handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
- } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
- handleCommitTransaction(CommitTransaction.fromSerializable(message));
- } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
- handleAbortTransaction(AbortTransaction.fromSerializable(message));
- } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
- closeTransactionChain(CloseTransactionChain.fromSerializable(message));
- } else if (message instanceof RegisterChangeListener) {
- registerChangeListener((RegisterChangeListener) message);
- } else if (message instanceof UpdateSchemaContext) {
- updateSchemaContext((UpdateSchemaContext) message);
- } else if (message instanceof PeerAddressResolved) {
- PeerAddressResolved resolved = (PeerAddressResolved) message;
- setPeerAddress(resolved.getPeerId().toString(),
- resolved.getPeerAddress());
- } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
- handleTransactionCommitTimeoutCheck();
- } else {
- super.onReceiveCommand(message);
+
+ MessageTracker.Context context = appendEntriesReplyTracker.received(message);
+
+ if(context.error().isPresent()){
+ LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
+ context.error());
+ }
+
+ try {
+ if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
+ handleCreateTransaction(message);
+ } else if (message instanceof ForwardedReadyTransaction) {
+ handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
+ } else if (message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
+ handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+ } else if (message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
+ handleCommitTransaction(CommitTransaction.fromSerializable(message));
+ } else if (message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
+ handleAbortTransaction(AbortTransaction.fromSerializable(message));
+ } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)) {
+ closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+ } else if (message instanceof RegisterChangeListener) {
+ registerChangeListener((RegisterChangeListener) message);
+ } else if (message instanceof UpdateSchemaContext) {
+ updateSchemaContext((UpdateSchemaContext) message);
+ } else if (message instanceof PeerAddressResolved) {
+ PeerAddressResolved resolved = (PeerAddressResolved) message;
+ setPeerAddress(resolved.getPeerId().toString(),
+ resolved.getPeerAddress());
+ } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
+ handleTransactionCommitTimeoutCheck();
+ } else {
+ super.onReceiveCommand(message);
+ }
+ } finally {
+ context.done();
}
}
if(cohortEntry != null) {
long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
if(elapsed > transactionCommitTimeout) {
- LOG.warning("{}: Current transaction {} has timed out after {} ms - aborting",
+ LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
doAbortTransaction(cohortEntry.getTransactionID(), null);
Shard.this.persistData(getSender(), transactionID,
new ModificationPayload(cohortEntry.getModification()));
}
- } catch (InterruptedException | ExecutionException | IOException e) {
- LOG.error(e, "{} An exception occurred while preCommitting transaction {}",
- persistenceId(), cohortEntry.getTransactionID());
+ } catch (Exception e) {
+ LOG.error("{} An exception occurred while preCommitting transaction {}",
+ persistenceId(), cohortEntry.getTransactionID(), e);
shardMBean.incrementFailedTransactionsCount();
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
- } catch (InterruptedException | ExecutionException e) {
+ } catch (Exception e) {
sender.tell(new akka.actor.Status.Failure(e), getSelf());
- LOG.error(e, "{}, An exception occurred while committing transaction {}", persistenceId(), transactionID);
+ LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
+ transactionID, e);
shardMBean.incrementFailedTransactionsCount();
+ } finally {
+ commitCoordinator.currentTransactionComplete(transactionID, true);
}
-
- commitCoordinator.currentTransactionComplete(transactionID, true);
}
private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
@Override
public void onFailure(final Throwable t) {
- LOG.error(t, "{}: An exception happened during abort", persistenceId());
+ LOG.error("{}: An exception happened during abort", persistenceId(), t);
if(sender != null) {
sender.tell(new akka.actor.Status.Failure(t), self);
shardMBean.incrementReadOnlyTransactionCount();
- return getContext().actorOf(
- ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
- schemaContext,datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion),
- transactionId.toString());
+ return createShardTransaction(factory.newReadOnlyTransaction(), transactionId, clientVersion);
} else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
shardMBean.incrementReadWriteTransactionCount();
- return getContext().actorOf(
- ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion),
- transactionId.toString());
-
+ return createShardTransaction(factory.newReadWriteTransaction(), transactionId, clientVersion);
} else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
shardMBean.incrementWriteOnlyTransactionCount();
- return getContext().actorOf(
- ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
- schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId(), clientVersion),
- transactionId.toString());
+ return createShardTransaction(factory.newWriteOnlyTransaction(), transactionId, clientVersion);
} else {
throw new IllegalArgumentException(
"Shard="+name + ":CreateTransaction message has unidentified transaction type="
}
}
+ private ActorRef createShardTransaction(DOMStoreTransaction transaction, ShardTransactionIdentifier transactionId,
+ short clientVersion){
+ return getContext().actorOf(
+ ShardTransaction.props(transaction, getSelf(),
+ schemaContext, datastoreContext, shardMBean,
+ transactionId.getRemoteTransactionId(), clientVersion)
+ .withDispatcher(txnDispatcherPath),
+ transactionId.toString());
+
+ }
+
private void createTransaction(CreateTransaction createTransaction) {
try {
ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
} catch (InterruptedException | ExecutionException e) {
shardMBean.incrementFailedTransactionsCount();
- LOG.error(e, "{}: Failed to commit", persistenceId());
+ LOG.error("{}: Failed to commit", persistenceId(), e);
}
}
try {
currentLogRecoveryBatch.add(((ModificationPayload) data).getModification());
} catch (ClassNotFoundException | IOException e) {
- LOG.error(e, "{}: Error extracting ModificationPayload", persistenceId());
+ LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
}
} else if (data instanceof CompositeModificationPayload) {
currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
shardMBean.incrementCommittedTransactionCount();
} catch (InterruptedException | ExecutionException e) {
shardMBean.incrementFailedTransactionsCount();
- LOG.error(e, "{}: Failed to commit", persistenceId());
+ LOG.error("{}: Failed to commit", persistenceId(), e);
}
}
}
try {
applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
} catch (ClassNotFoundException | IOException e) {
- LOG.error(e, "{}: Error extracting ModificationPayload", persistenceId());
+ LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
}
}
else if (data instanceof CompositeModificationPayload) {
transaction.write(DATASTORE_ROOT, node);
syncCommitTransaction(transaction);
} catch (InterruptedException | ExecutionException e) {
- LOG.error(e, "{}: An exception occurred when applying snapshot", persistenceId());
+ LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
} finally {
LOG.info("{}: Done applying snapshot", persistenceId());
}
import akka.actor.ActorRef;
import akka.actor.Status;
-import akka.event.LoggingAdapter;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.LinkedList;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.slf4j.Logger;
/**
* Coordinates commits for a shard ensuring only one concurrent 3-phase commit.
private final int queueCapacity;
- private final LoggingAdapter log;
+ private final Logger log;
private final String name;
- public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, LoggingAdapter log,
+ public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, Logger log,
String name) {
cohortCache = CacheBuilder.newBuilder().expireAfterAccess(
cacheExpiryTimeoutInSec, TimeUnit.SECONDS).build();
// Dequeue the next cohort entry waiting in the queue.
currentCohortEntry = queuedCohortEntries.poll();
if(currentCohortEntry != null) {
+ currentCohortEntry.updateLastAccessTime();
doCanCommit(currentCohortEntry);
}
}
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.cluster.ClusterEvent;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Creator;
import akka.japi.Function;
import akka.japi.Procedure;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
/**
* The ShardManager has the following jobs,
*/
public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ private final Logger LOG = LoggerFactory.getLogger(getClass());
// Stores a mapping between a member name and the address of the member
// Member names look like "member-1", "member-2" etc and are as specified
private final Configuration configuration;
+ private final String shardDispatcherPath;
+
private ShardManagerInfoMBean mBean;
private final DatastoreContext datastoreContext;
private final DataPersistenceProvider dataPersistenceProvider;
/**
- * @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
- * configuration or operational
*/
- protected ShardManager(String type, ClusterWrapper cluster, Configuration configuration,
+ protected ShardManager(ClusterWrapper cluster, Configuration configuration,
DatastoreContext datastoreContext) {
- this.type = Preconditions.checkNotNull(type, "type should not be null");
this.cluster = Preconditions.checkNotNull(cluster, "cluster should not be null");
this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
this.datastoreContext = datastoreContext;
this.dataPersistenceProvider = createDataPersistenceProvider(datastoreContext.isPersistent());
+ this.type = datastoreContext.getDataStoreType();
+ this.shardDispatcherPath =
+ new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
}
- public static Props props(final String type,
+ public static Props props(
final ClusterWrapper cluster,
final Configuration configuration,
final DatastoreContext datastoreContext) {
- Preconditions.checkNotNull(type, "type should not be null");
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
- return Props.create(new ShardManagerCreator(type, cluster, configuration, datastoreContext));
+ return Props.create(new ShardManagerCreator(cluster, configuration, datastoreContext));
}
@Override
knownModules = ImmutableSet.copyOf(msg.getModules());
} else if (message instanceof RecoveryFailure) {
RecoveryFailure failure = (RecoveryFailure) message;
- LOG.error(failure.cause(), "Recovery failed");
+ LOG.error("Recovery failed", failure.cause());
} else if (message instanceof RecoveryCompleted) {
LOG.info("Recovery complete : {}", persistenceId());
if(newModules.containsAll(knownModules)) {
- LOG.info("New SchemaContext has a super set of current knownModules - persisting info");
+ LOG.debug("New SchemaContext has a super set of current knownModules - persisting info");
knownModules = ImmutableSet.copyOf(newModules);
@Override
public void apply(SchemaContextModules param) throws Exception {
- LOG.info("Sending new SchemaContext to Shards");
+ LOG.debug("Sending new SchemaContext to Shards");
for (ShardInformation info : localShards.values()) {
if (info.getActor() == null) {
info.setActor(getContext().actorOf(Shard.props(info.getShardId(),
- info.getPeerAddresses(), datastoreContext, schemaContext),
- info.getShardId().toString()));
+ info.getPeerAddresses(), datastoreContext, schemaContext)
+ .withDispatcher(shardDispatcherPath), info.getShardId().toString()));
} else {
info.getActor().tell(message, getSelf());
}
});
} else {
- LOG.info("Rejecting schema context update because it is not a super set of previously known modules");
+ LOG.debug("Rejecting schema context update - not a super set of previously known modules:\nUPDATE: {}\nKNOWN: {}",
+ newModules, knownModules);
}
}
new Function<Throwable, SupervisorStrategy.Directive>() {
@Override
public SupervisorStrategy.Directive apply(Throwable t) {
- StringBuilder sb = new StringBuilder();
- for(StackTraceElement element : t.getStackTrace()) {
- sb.append("\n\tat ")
- .append(element.toString());
- }
- LOG.warning("Supervisor Strategy of resume applied {}",sb.toString());
+ LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
return SupervisorStrategy.resume();
}
}
private static class ShardManagerCreator implements Creator<ShardManager> {
private static final long serialVersionUID = 1L;
- final String type;
final ClusterWrapper cluster;
final Configuration configuration;
final DatastoreContext datastoreContext;
- ShardManagerCreator(String type, ClusterWrapper cluster,
+ ShardManagerCreator(ClusterWrapper cluster,
Configuration configuration, DatastoreContext datastoreContext) {
- this.type = type;
this.cluster = cluster;
this.configuration = configuration;
this.datastoreContext = datastoreContext;
@Override
public ShardManager create() throws Exception {
- return new ShardManager(type, cluster, configuration, datastoreContext);
+ return new ShardManager(cluster, configuration, datastoreContext);
}
}
static class SchemaContextModules implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -8884620101025936590L;
+
private final Set<String> modules;
SchemaContextModules(Set<String> modules){
protected DOMStoreTransaction getDOMStoreTransaction() {
return transaction;
}
+
+ @Override
+ protected boolean returnCloseTransactionReply() {
+ return false;
+ }
}
*/
package org.opendaylight.controller.cluster.datastore;
-import akka.event.LoggingAdapter;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.Collection;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
/**
* Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
private final SchemaContext schemaContext;
private final String shardName;
private final ExecutorService executor;
- private final LoggingAdapter log;
+ private final Logger log;
private final String name;
- ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, LoggingAdapter log,
+ ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, Logger log,
String name) {
this.schemaContext = schemaContext;
this.shardName = shardName;
* The ShardTransaction Actor delegates all actions to DOMDataReadWriteTransaction
* </p>
* <p>
- * Even though the DOMStore and the DOMStoreTransactionChain implement multiple types of transactions
- * the ShardTransaction Actor only works with read-write transactions. This is just to keep the logic simple. At this
- * time there are no known advantages for creating a read-only or write-only transaction which may change over time
- * at which point we can optimize things in the distributed store as well.
- * </p>
- * <p>
* Handles Messages <br/>
* ---------------- <br/>
* <li> {@link org.opendaylight.controller.cluster.datastore.messages.ReadData}
}
}
+ protected boolean returnCloseTransactionReply() {
+ return true;
+ }
+
private void closeTransaction(boolean sendReply) {
getDOMStoreTransaction().close();
- if(sendReply) {
+ if(sendReply && returnCloseTransactionReply()) {
getSender().tell(CloseTransactionReply.INSTANCE.toSerializable(), getSelf());
}
protected void readData(DOMStoreReadTransaction transaction, ReadData message,
final boolean returnSerialized) {
- final ActorRef sender = getSender();
- final ActorRef self = getSelf();
- final YangInstanceIdentifier path = message.getPath();
- final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
- transaction.read(path);
- future.addListener(new Runnable() {
- @Override
- public void run() {
- try {
- Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
- ReadDataReply readDataReply = new ReadDataReply(optional.orNull());
-
- sender.tell((returnSerialized ? readDataReply.toSerializable(clientTxVersion):
- readDataReply), self);
+ final YangInstanceIdentifier path = message.getPath();
+ try {
+ final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future = transaction.read(path);
+ Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
+ ReadDataReply readDataReply = new ReadDataReply(optional.orNull(), clientTxVersion);
- } catch (Exception e) {
- shardStats.incrementFailedReadTransactionsCount();
- sender.tell(new akka.actor.Status.Failure(e), self);
- }
+ sender().tell((returnSerialized ? readDataReply.toSerializable(): readDataReply), self());
- }
- }, getContext().dispatcher());
+ } catch (Exception e) {
+ LOG.debug(String.format("Unexpected error reading path %s", path), e);
+ shardStats.incrementFailedReadTransactionsCount();
+ sender().tell(new akka.actor.Status.Failure(e), self());
+ }
}
protected void dataExists(DOMStoreReadTransaction transaction, DataExists message,
import akka.actor.ActorRef;
import akka.actor.PoisonPill;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
*/
public class ShardWriteTransaction extends ShardTransaction {
- private final MutableCompositeModification modification = new MutableCompositeModification();
+ private final MutableCompositeModification compositeModification = new MutableCompositeModification();
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
@Override
public void handleReceive(Object message) throws Exception {
- if (message instanceof WriteData) {
- writeData(transaction, (WriteData) message, !SERIALIZED_REPLY);
-
- } else if (message instanceof MergeData) {
- mergeData(transaction, (MergeData) message, !SERIALIZED_REPLY);
-
- } else if (message instanceof DeleteData) {
- deleteData(transaction, (DeleteData) message, !SERIALIZED_REPLY);
-
+ if (message instanceof BatchedModifications) {
+ batchedModifications((BatchedModifications)message);
} else if (message instanceof ReadyTransaction) {
readyTransaction(transaction, !SERIALIZED_REPLY);
-
+ } else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ readyTransaction(transaction, SERIALIZED_REPLY);
} else if(WriteData.isSerializedType(message)) {
writeData(transaction, WriteData.fromSerializable(message), SERIALIZED_REPLY);
} else if(DeleteData.isSerializedType(message)) {
deleteData(transaction, DeleteData.fromSerializable(message), SERIALIZED_REPLY);
- } else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, SERIALIZED_REPLY);
-
} else if (message instanceof GetCompositedModification) {
// This is here for testing only
- getSender().tell(new GetCompositeModificationReply(modification), getSelf());
+ getSender().tell(new GetCompositeModificationReply(compositeModification), getSelf());
} else {
super.handleReceive(message);
}
}
+ private void batchedModifications(BatchedModifications batched) {
+ try {
+ for(Modification modification: batched.getModifications()) {
+ compositeModification.addModification(modification);
+ modification.apply(transaction);
+ }
+
+ getSender().tell(new BatchedModificationsReply(batched.getModifications().size()), getSelf());
+ } catch (Exception e) {
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+ }
+
private void writeData(DOMStoreWriteTransaction transaction, WriteData message,
boolean returnSerialized) {
LOG.debug("writeData at path : {}", message.getPath());
- modification.addModification(
+ compositeModification.addModification(
new WriteModification(message.getPath(), message.getData()));
try {
transaction.write(message.getPath(), message.getData());
boolean returnSerialized) {
LOG.debug("mergeData at path : {}", message.getPath());
- modification.addModification(
+ compositeModification.addModification(
new MergeModification(message.getPath(), message.getData()));
try {
boolean returnSerialized) {
LOG.debug("deleteData at path : {}", message.getPath());
- modification.addModification(new DeleteModification(message.getPath()));
+ compositeModification.addModification(new DeleteModification(message.getPath()));
try {
transaction.delete(message.getPath());
DeleteDataReply deleteDataReply = DeleteDataReply.INSTANCE;
DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
getShardActor().forward(new ForwardedReadyTransaction(transactionID, getClientTxVersion(),
- cohort, modification, returnSerialized), getContext());
+ cohort, compositeModification, returnSerialized), getContext());
// The shard will handle the commit from here so we're no longer needed - self-destruct.
getSelf().tell(PoisonPill.getInstance(), getSelf());
import akka.actor.Terminated;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.datastore.messages.Monitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TerminationMonitor extends UntypedActor{
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ private static final Logger LOG = LoggerFactory.getLogger(TerminationMonitor.class);
public TerminationMonitor(){
- LOG.info("Created TerminationMonitor");
+ LOG.debug("Created TerminationMonitor");
}
@Override public void onReceive(Object message) throws Exception {
import akka.actor.ActorSelection;
import akka.dispatch.Futures;
import akka.dispatch.OnComplete;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
private final List<Future<ActorSelection>> cohortFutures;
private volatile List<ActorSelection> cohorts;
private final String transactionId;
+ private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+ @Override
+ public void run() {
+ }
+
+ @Override
+ public void success() {
+ }
+
+ @Override
+ public void failure() {
+ }
+ };
public ThreePhaseCommitCohortProxy(ActorContext actorContext,
List<Future<ActorSelection>> cohortFutures, String transactionId) {
private Future<Void> buildCohortList() {
Future<Iterable<ActorSelection>> combinedFutures = Futures.sequence(cohortFutures,
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
return combinedFutures.transform(new AbstractFunction1<Iterable<ActorSelection>, Void>() {
@Override
}
return null;
}
- }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
+ }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
}
@Override
finishCanCommit(returnFuture);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
return returnFuture;
}
}
returnFuture.set(Boolean.valueOf(result));
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
private Future<Iterable<Object>> invokeCohorts(Object message) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, cohort);
}
-
- futureList.add(actorContext.executeOperationAsync(cohort, message));
+ futureList.add(actorContext.executeOperationAsync(cohort, message, actorContext.getTransactionCommitOperationTimeout()));
}
- return Futures.sequence(futureList, actorContext.getActorSystem().dispatcher());
+ return Futures.sequence(futureList, actorContext.getClientDispatcher());
}
@Override
@Override
public ListenableFuture<Void> commit() {
- return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
- CommitTransactionReply.SERIALIZABLE_CLASS, true);
+ OperationCallback operationCallback = (cohortFutures.size() == 0) ? NO_OP_CALLBACK :
+ new CommitCallback(actorContext);
+
+ return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
+ CommitTransactionReply.SERIALIZABLE_CLASS, true, operationCallback);
+ }
+
+ private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
+ final Class<?> expectedResponseClass, final boolean propagateException) {
+ return voidOperation(operationName, message, expectedResponseClass, propagateException, NO_OP_CALLBACK);
}
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
- final Class<?> expectedResponseClass, final boolean propagateException) {
+ final Class<?> expectedResponseClass, final boolean propagateException, final OperationCallback callback) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} {}", transactionId, operationName);
if(cohorts != null) {
finishVoidOperation(operationName, message, expectedResponseClass, propagateException,
- returnFuture);
+ returnFuture, callback);
} else {
buildCohortList().onComplete(new OnComplete<Void>() {
@Override
}
} else {
finishVoidOperation(operationName, message, expectedResponseClass,
- propagateException, returnFuture);
+ propagateException, returnFuture, callback);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
return returnFuture;
}
private void finishVoidOperation(final String operationName, final Object message,
- final Class<?> expectedResponseClass, final boolean propagateException,
- final SettableFuture<Void> returnFuture) {
+ final Class<?> expectedResponseClass, final boolean propagateException,
+ final SettableFuture<Void> returnFuture, final OperationCallback callback) {
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} finish {}", transactionId, operationName);
}
+
+ callback.run();
+
Future<Iterable<Object>> combinedFuture = invokeCohorts(message);
combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
}
if(exceptionToPropagate != null) {
+
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
operationName, exceptionToPropagate);
}
returnFuture.set(null);
}
+
+ callback.failure();
} else {
+
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
}
returnFuture.set(null);
+
+ callback.success();
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
@VisibleForTesting
List<Future<ActorSelection>> getCohortFutures() {
return Collections.unmodifiableList(cohortFutures);
}
+
+ private static interface OperationCallback {
+ void run();
+ void success();
+ void failure();
+ }
+
+ private static class CommitCallback implements OperationCallback{
+
+ private static final Logger LOG = LoggerFactory.getLogger(CommitCallback.class);
+ private static final String COMMIT = "commit";
+
+ private final Timer commitTimer;
+ private final ActorContext actorContext;
+ private Timer.Context timerContext;
+
+ CommitCallback(ActorContext actorContext){
+ this.actorContext = actorContext;
+ commitTimer = actorContext.getOperationTimer(COMMIT);
+ }
+
+ @Override
+ public void run() {
+ timerContext = commitTimer.time();
+ }
+
+ @Override
+ public void success() {
+ timerContext.stop();
+
+ Snapshot timerSnapshot = commitTimer.getSnapshot();
+ double allowedLatencyInNanos = timerSnapshot.get95thPercentile();
+
+ long commitTimeoutInSeconds = actorContext.getDatastoreContext()
+ .getShardTransactionCommitTimeoutInSeconds();
+ long commitTimeoutInNanos = TimeUnit.SECONDS.toNanos(commitTimeoutInSeconds);
+
+ // Here we are trying to find out how many transactions per second are allowed
+ double newRateLimit = ((double) commitTimeoutInNanos / allowedLatencyInNanos) / commitTimeoutInSeconds;
+
+ LOG.debug("Data Store {} commit rateLimit adjusted to {} allowedLatencyInNanos = {}",
+ actorContext.getDataStoreType(), newRateLimit, allowedLatencyInNanos);
+
+ actorContext.setTxCreationLimit(newRateLimit);
+ }
+
+ @Override
+ public void failure() {
+ // This would mean we couldn't get a transaction completed in 30 seconds which is
+ // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
+ // not going to be useful - so we leave it as it is
+ }
+ }
+
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ actorContext.acquireTxCreationPermit();
return allocateWriteTransaction(TransactionProxy.TransactionType.READ_WRITE);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ actorContext.acquireTxCreationPermit();
return allocateWriteTransaction(TransactionProxy.TransactionType.WRITE_ONLY);
}
import com.google.common.util.concurrent.SettableFuture;
import java.util.List;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
-import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
-import org.opendaylight.controller.cluster.datastore.messages.VersionedSerializableMessage;
-import org.opendaylight.controller.cluster.datastore.messages.WriteData;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-final class TransactionContextImpl extends AbstractTransactionContext {
+class TransactionContextImpl extends AbstractTransactionContext {
private static final Logger LOG = LoggerFactory.getLogger(TransactionContextImpl.class);
private final ActorContext actorContext;
private final ActorSelection actor;
private final boolean isTxActorLocal;
private final short remoteTransactionVersion;
- private final OperationCompleter operationCompleter;
+ private final OperationCompleter operationCompleter;
+ private BatchedModifications batchedModifications;
TransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
ActorContext actorContext, SchemaContext schemaContext,
}
private Future<Object> completeOperation(Future<Object> operationFuture){
- operationFuture.onComplete(this.operationCompleter, actorContext.getActorSystem().dispatcher());
+ operationFuture.onComplete(this.operationCompleter, actorContext.getClientDispatcher());
return operationFuture;
}
return actor;
}
- private Future<Object> executeOperationAsync(SerializableMessage msg) {
- return completeOperation(actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg : msg.toSerializable()));
+ protected short getRemoteTransactionVersion() {
+ return remoteTransactionVersion;
}
- private Future<Object> executeOperationAsync(VersionedSerializableMessage msg) {
- return completeOperation(actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg :
- msg.toSerializable(remoteTransactionVersion)));
+ protected Future<Object> executeOperationAsync(SerializableMessage msg) {
+ return completeOperation(actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg : msg.toSerializable()));
}
@Override
LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
identifier, recordedOperationFutures.size());
+ // Send the remaining batched modifications if any.
+
+ sendBatchedModifications();
+
// Send the ReadyTransaction message to the Tx actor.
final Future<Object> replyFuture = executeOperationAsync(ReadyTransaction.INSTANCE);
futureList.add(replyFuture);
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
// Transform the combined Future into a Future that returns the cohort actor path from
// the ReadyTransactionReply. That's the end result of the ready operation.
serializedReadyReply.getClass()));
}
}
- }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
+ }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
+ }
+
+ private void batchModification(Modification modification) {
+ if(batchedModifications == null) {
+ batchedModifications = new BatchedModifications(remoteTransactionVersion);
+ }
+
+ batchedModifications.addModification(modification);
+
+ if(batchedModifications.getModifications().size() >=
+ actorContext.getDatastoreContext().getShardBatchedModificationCount()) {
+ sendBatchedModifications();
+ }
+ }
+
+ private void sendBatchedModifications() {
+ if(batchedModifications != null) {
+ LOG.debug("Tx {} sending {} batched modifications", identifier,
+ batchedModifications.getModifications().size());
+
+ recordedOperationFutures.add(executeOperationAsync(batchedModifications));
+ batchedModifications = null;
+ }
}
@Override
public void deleteData(YangInstanceIdentifier path) {
LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- recordedOperationFutures.add(executeOperationAsync(new DeleteData(path)));
+ batchModification(new DeleteModification(path));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- recordedOperationFutures.add(executeOperationAsync(new MergeData(path, data)));
+ batchModification(new MergeModification(path, data));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
LOG.debug("Tx {} writeData called path = {}", identifier, path);
- recordedOperationFutures.add(executeOperationAsync(new WriteData(path, data)));
+ batchModification(new WriteModification(path, data));
}
@Override
LOG.debug("Tx {} readData called path = {}", identifier, path);
+ // Send the remaining batched modifications if any.
+
+ sendBatchedModifications();
+
// If there were any previous recorded put/merge/delete operation reply Futures then we
// must wait for them to successfully complete. This is necessary to honor the read
// uncommitted semantics of the public API contract. If any one fails then fail the read.
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
Lists.newArrayList(recordedOperationFutures),
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
}
};
- combinedFutures.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
}
}
Future<Object> readFuture = executeOperationAsync(new ReadData(path));
- readFuture.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ readFuture.onComplete(onComplete, actorContext.getClientDispatcher());
}
@Override
LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ // Send the remaining batched modifications if any.
+
+ sendBatchedModifications();
+
// If there were any previous recorded put/merge/delete operation reply Futures then we
// must wait for them to successfully complete. This is necessary to honor the read
// uncommitted semantics of the public API contract. If any one fails then fail this
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
Lists.newArrayList(recordedOperationFutures),
- actorContext.getActorSystem().dispatcher());
+ actorContext.getClientDispatcher());
OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
public void onComplete(Throwable failure, Iterable<Object> notUsed)
}
};
- combinedFutures.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
}
}
Future<Object> future = executeOperationAsync(new DataExists(path));
- future.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
+ future.onComplete(onComplete, actorContext.getClientDispatcher());
}
}
private void throttleOperation(int acquirePermits) {
try {
- if(!operationLimiter.tryAcquire(acquirePermits, actorContext.getDatastoreContext().getOperationTimeoutInSeconds(), TimeUnit.SECONDS)){
+ if(!operationLimiter.tryAcquire(acquirePermits,
+ actorContext.getDatastoreContext().getOperationTimeoutInSeconds(), TimeUnit.SECONDS)){
LOG.warn("Failed to acquire operation permit for transaction {}", getIdentifier());
}
} catch (InterruptedException e) {
newTxFutureCallback.setPrimaryShard(primaryShard);
}
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
}
return txFutureCallback;
TransactionProxy.this.transactionType.ordinal(),
getTransactionChainId()).toSerializable());
- createTxFuture.onComplete(this, actorContext.getActorSystem().dispatcher());
+ createTxFuture.onComplete(this, actorContext.getClientDispatcher());
}
@Override
public void run() {
tryCreateTransaction();
}
- }, actorContext.getActorSystem().dispatcher());
+ }, actorContext.getClientDispatcher());
return;
}
}
private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
String transactionPath = reply.getTransactionPath();
- LOG.debug("Tx {} Received transaction actor path {}", identifier, transactionPath);
+ LOG.debug("Tx {} Received {}", identifier, reply);
ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
// Check if TxActor is created in the same node
boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
- return new TransactionContextImpl(transactionPath, transactionActor, identifier,
- actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
+ if(reply.getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
+ return new TransactionContextImpl(transactionPath, transactionActor, identifier,
+ actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
+ } else {
+ return new LegacyTransactionContextImpl(transactionPath, transactionActor, identifier,
+ actorContext, schemaContext, isTxActorLocal, reply.getVersion(), operationCompleter);
+ }
}
}
}
*/
package org.opendaylight.controller.cluster.datastore.compat;
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.japi.Creator;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
-import akka.japi.Creator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An actor to maintain backwards compatibility for the base Helium version where the 3-phase commit
*/
public class BackwardsCompatibleThreePhaseCommitCohort extends AbstractUntypedActor {
- private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
+ private static final Logger LOG = LoggerFactory.getLogger(BackwardsCompatibleThreePhaseCommitCohort.class);
private final String transactionId;
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+
+/**
+ * MXBean interface for data store configuration.
+ *
+ * @author Thomas Pantelis
+ */
+public interface DatastoreConfigurationMXBean {
+ long getShardTransactionIdleTimeoutInSeconds();
+
+ long getOperationTimeoutInSeconds();
+
+ long getShardHeartbeatIntervalInMillis();
+
+ int getShardJournalRecoveryLogBatchSize();
+
+ long getShardIsolatedLeaderCheckIntervalInMillis();
+
+ long getShardElectionTimeoutFactor();
+
+ int getShardSnapshotDataThresholdPercentage();
+
+ long getShardSnapshotBatchCount();
+
+ long getShardTransactionCommitTimeoutInSeconds();
+
+ int getShardTransactionCommitQueueCapacity();
+
+ long getShardInitializationTimeoutInSeconds();
+
+ long getShardLeaderElectionTimeoutInSeconds();
+
+ boolean isPersistent();
+
+ long getTransactionCreationInitialRateLimit();
+
+ int getMaxShardDataChangeExecutorPoolSize();
+
+ int getMaxShardDataChangeExecutorQueueSize();
+
+ int getMaxShardDataChangeListenerQueueSize();
+
+ int getMaxShardDataStoreExecutorQueueSize();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+
+/**
+ * Implementation of DatastoreConfigurationMXBean.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
+ public static final String JMX_CATEGORY_CONFIGURATION = "Configuration";
+
+ private DatastoreContext context;
+
+ public DatastoreConfigurationMXBeanImpl(String mxBeanType) {
+ super("Datastore", mxBeanType, JMX_CATEGORY_CONFIGURATION);
+ }
+
+ public void setContext(DatastoreContext context) {
+ this.context = context;
+ }
+
+ @Override
+ public long getShardTransactionIdleTimeoutInSeconds() {
+ return context.getShardTransactionIdleTimeout().toSeconds();
+ }
+
+ @Override
+ public long getOperationTimeoutInSeconds() {
+ return context.getOperationTimeoutInSeconds();
+ }
+
+ @Override
+ public long getShardHeartbeatIntervalInMillis() {
+ return context.getShardRaftConfig().getHeartBeatInterval().toMillis();
+ }
+
+ @Override
+ public int getShardJournalRecoveryLogBatchSize() {
+ return context.getShardRaftConfig().getJournalRecoveryLogBatchSize();
+ }
+
+ @Override
+ public long getShardIsolatedLeaderCheckIntervalInMillis() {
+ return context.getShardRaftConfig().getIsolatedCheckIntervalInMillis();
+ }
+
+ @Override
+ public long getShardElectionTimeoutFactor() {
+ return context.getShardRaftConfig().getElectionTimeoutFactor();
+ }
+
+ @Override
+ public int getShardSnapshotDataThresholdPercentage() {
+ return context.getShardRaftConfig().getSnapshotDataThresholdPercentage();
+ }
+
+ @Override
+ public long getShardSnapshotBatchCount() {
+ return context.getShardRaftConfig().getSnapshotBatchCount();
+ }
+
+ @Override
+ public long getShardTransactionCommitTimeoutInSeconds() {
+ return context.getShardTransactionCommitTimeoutInSeconds();
+ }
+
+ @Override
+ public int getShardTransactionCommitQueueCapacity() {
+ return context.getShardTransactionCommitQueueCapacity();
+ }
+
+ @Override
+ public long getShardInitializationTimeoutInSeconds() {
+ return context.getShardInitializationTimeout().duration().toSeconds();
+ }
+
+ @Override
+ public long getShardLeaderElectionTimeoutInSeconds() {
+ return context.getShardLeaderElectionTimeout().duration().toSeconds();
+ }
+
+ @Override
+ public boolean isPersistent() {
+ return context.isPersistent();
+ }
+
+ @Override
+ public long getTransactionCreationInitialRateLimit() {
+ return context.getTransactionCreationInitialRateLimit();
+ }
+
+ @Override
+ public int getMaxShardDataChangeExecutorPoolSize() {
+ return context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
+ }
+
+ @Override
+ public int getMaxShardDataChangeExecutorQueueSize() {
+ return context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
+ }
+
+ @Override
+ public int getMaxShardDataChangeListenerQueueSize() {
+ return context.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
+ }
+
+ @Override
+ public int getMaxShardDataStoreExecutorQueueSize() {
+ return context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+
+/**
+ * Message used to batch write, merge, delete modification operations to the ShardTransaction actor.
+ *
+ * @author Thomas Pantelis
+ */
+public class BatchedModifications extends MutableCompositeModification implements SerializableMessage {
+ private static final long serialVersionUID = 1L;
+
+ public BatchedModifications() {
+ }
+
+ public BatchedModifications(short version) {
+ super(version);
+ }
+
+ @Override
+ public Object toSerializable() {
+ return this;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * The reply for the BatchedModifications message.
+ *
+ * @author Thomas Pantelis
+ */
+public class BatchedModificationsReply extends VersionedExternalizableMessage {
+ private static final long serialVersionUID = 1L;
+
+ private int numBatched;
+
+ public BatchedModificationsReply() {
+ }
+
+ public BatchedModificationsReply(int numBatched) {
+ this.numBatched = numBatched;
+ }
+
+
+ public int getNumBatched() {
+ return numBatched;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ super.readExternal(in);
+ numBatched = in.readInt();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ super.writeExternal(out);
+ out.writeInt(numBatched);
+ }
+
+ @Override
+ public Object toSerializable() {
+ return this;
+ }
+}
(short)o.getMessageVersion());
}
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("CreateTransactionReply [transactionPath=").append(transactionPath).append(", transactionId=")
+ .append(transactionId).append(", version=").append(version).append("]");
+ return builder.toString();
+ }
}
package org.opendaylight.controller.cluster.datastore.messages;
-import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-public class DeleteData implements VersionedSerializableMessage, Externalizable {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public class DeleteData extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
public static final Class<DeleteData> SERIALIZABLE_CLASS = DeleteData.class;
private YangInstanceIdentifier path;
- private short version;
public DeleteData() {
}
- public DeleteData(final YangInstanceIdentifier path) {
+ public DeleteData(final YangInstanceIdentifier path, short version) {
+ super(version);
this.path = path;
}
return path;
}
- public short getVersion() {
- return version;
- }
-
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- version = in.readShort(); // Read the version - don't need to do anything with it now
+ super.readExternal(in);
path = SerializationUtils.deserializePath(in);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(version);
+ super.writeExternal(out);
SerializationUtils.serializePath(path, out);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- version = toVersion;
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
// To base or R1 Helium version
} else {
// From base or R1 Helium version
ShardTransactionMessages.DeleteData o = (ShardTransactionMessages.DeleteData) serializable;
- return new DeleteData(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
+ return new DeleteData(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+/**
+ * @deprecated Replaced by BatchedModificationsReply.
+ */
+@Deprecated
public class DeleteDataReply extends EmptyReply {
+ private static final long serialVersionUID = 1L;
private static final Object LEGACY_SERIALIZED_INSTANCE =
ShardTransactionMessages.DeleteDataReply.newBuilder().build();
*
* @author Thomas Pantelis
*/
-public abstract class EmptyReply extends EmptyExternalizable implements VersionedSerializableMessage {
+public abstract class EmptyReply extends EmptyExternalizable {
private final Object legacySerializedInstance;
this.legacySerializedInstance = legacySerializedInstance;
}
- @Override
public Object toSerializable(short toVersion) {
return toVersion >= DataStoreVersions.LITHIUM_VERSION ? this : legacySerializedInstance;
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class MergeData extends ModifyData implements VersionedSerializableMessage {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public class MergeData extends ModifyData {
private static final long serialVersionUID = 1L;
public static final Class<MergeData> SERIALIZABLE_CLASS = MergeData.class;
public MergeData() {
}
- public MergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- super(path, data);
+ public MergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, short version) {
+ super(path, data, version);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- setVersion(toVersion);
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
// To base or R1 Helium version
ShardTransactionMessages.MergeData o = (ShardTransactionMessages.MergeData) serializable;
Decoded decoded = new NormalizedNodeToNodeCodec(null).decode(
o.getInstanceIdentifierPathArguments(), o.getNormalizedNode());
- return new MergeData(decoded.getDecodedPath(), decoded.getDecodedNode());
+ return new MergeData(decoded.getDecodedPath(), decoded.getDecodedNode(),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+/**
+ * @deprecated Replaced by BatchedModificationsReply.
+ */
+@Deprecated
public class MergeDataReply extends EmptyReply {
private static final long serialVersionUID = 1L;
package org.opendaylight.controller.cluster.datastore.messages;
-import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public abstract class ModifyData implements Externalizable {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public abstract class ModifyData extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
private YangInstanceIdentifier path;
private NormalizedNode<?, ?> data;
- private short version;
protected ModifyData() {
}
- protected ModifyData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ protected ModifyData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, short version) {
+ super(version);
this.path = path;
this.data = data;
}
return data;
}
- public short getVersion() {
- return version;
- }
-
- protected void setVersion(short version) {
- this.version = version;
- }
-
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- version = in.readShort();
+ super.readExternal(in);
SerializationUtils.deserializePathAndNode(in, this, APPLIER);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(version);
+ super.writeExternal(out);
SerializationUtils.serializePathAndNode(path, data, out);
}
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.protobuf.ByteString;
-import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class ReadDataReply implements VersionedSerializableMessage, Externalizable {
+public class ReadDataReply extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
public static final Class<ReadDataReply> SERIALIZABLE_CLASS = ReadDataReply.class;
private NormalizedNode<?, ?> normalizedNode;
- private short version;
public ReadDataReply() {
}
- public ReadDataReply(NormalizedNode<?, ?> normalizedNode) {
+ public ReadDataReply(NormalizedNode<?, ?> normalizedNode, short version) {
+ super(version);
this.normalizedNode = normalizedNode;
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- version = in.readShort();
+ super.readExternal(in);
normalizedNode = SerializationUtils.deserializeNormalizedNode(in);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(version);
+ super.writeExternal(out);
SerializationUtils.serializeNormalizedNode(normalizedNode, out);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- version = toVersion;
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
return toSerializableReadDataReply(normalizedNode);
} else {
ShardTransactionMessages.ReadDataReply o =
(ShardTransactionMessages.ReadDataReply) serializable;
- return new ReadDataReply(new NormalizedNodeToNodeCodec(null).decode(o.getNormalizedNode()));
+ return new ReadDataReply(new NormalizedNodeToNodeCodec(null).decode(o.getNormalizedNode()),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Abstract base class for a versioned Externalizable message.
+ *
+ * @author Thomas Pantelis
+ */
+public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage {
+ private static final long serialVersionUID = 1L;
+
+ private short version;
+
+ public VersionedExternalizableMessage() {
+ }
+
+ public VersionedExternalizableMessage(short version) {
+ this.version = version;
+ }
+
+ public short getVersion() {
+ return version;
+ }
+
+ @Override
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+ version = in.readShort();
+ }
+
+ @Override
+ public void writeExternal(ObjectOutput out) throws IOException {
+ out.writeShort(version);
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-/**
- * Interface for a Serializable message with versioning.
- *
- * @author Thomas Pantelis
- */
-public interface VersionedSerializableMessage {
- Object toSerializable(short toVersion);
-}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-public class WriteData extends ModifyData implements VersionedSerializableMessage {
+/**
+ * @deprecated Replaced by BatchedModifications.
+ */
+@Deprecated
+public class WriteData extends ModifyData {
private static final long serialVersionUID = 1L;
public static final Class<WriteData> SERIALIZABLE_CLASS = WriteData.class;
public WriteData() {
}
- public WriteData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- super(path, data);
+ public WriteData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, short version) {
+ super(path, data, version);
}
@Override
- public Object toSerializable(short toVersion) {
- if(toVersion >= DataStoreVersions.LITHIUM_VERSION) {
- setVersion(toVersion);
+ public Object toSerializable() {
+ if(getVersion() >= DataStoreVersions.LITHIUM_VERSION) {
return this;
} else {
// To base or R1 Helium version
ShardTransactionMessages.WriteData o = (ShardTransactionMessages.WriteData) serializable;
Decoded decoded = new NormalizedNodeToNodeCodec(null).decode(
o.getInstanceIdentifierPathArguments(), o.getNormalizedNode());
- return new WriteData(decoded.getDecodedPath(), decoded.getDecodedNode());
+ return new WriteData(decoded.getDecodedPath(), decoded.getDecodedNode(),
+ DataStoreVersions.HELIUM_2_VERSION);
}
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+/**
+ * @deprecated Replaced by BatchedModificationsReply.
+ */
+@Deprecated
public class WriteDataReply extends EmptyReply {
private static final long serialVersionUID = 1L;
public abstract class AbstractModification implements Modification {
private YangInstanceIdentifier path;
+ private short version;
- protected AbstractModification() {
+ protected AbstractModification(short version) {
+ this.version = version;
}
protected AbstractModification(YangInstanceIdentifier path) {
public YangInstanceIdentifier getPath() {
return path;
}
+
+ public short getVersion() {
+ return version;
+ }
}
private static final long serialVersionUID = 1L;
public DeleteModification() {
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public DeleteModification(short version) {
+ super(version);
}
public DeleteModification(YangInstanceIdentifier path) {
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- in.readShort();
setPath(SerializationUtils.deserializePath(in));
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(DataStoreVersions.CURRENT_VERSION);
SerializationUtils.serializePath(getPath(), out);
}
return new DeleteModification(InstanceIdentifierUtils.fromSerializable(o.getPath()));
}
- public static DeleteModification fromStream(ObjectInput in) throws ClassNotFoundException, IOException {
- DeleteModification mod = new DeleteModification();
+ public static DeleteModification fromStream(ObjectInput in, short version)
+ throws ClassNotFoundException, IOException {
+ DeleteModification mod = new DeleteModification(version);
mod.readExternal(in);
return mod;
}
import java.io.IOException;
import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Decoded;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
private static final long serialVersionUID = 1L;
public MergeModification() {
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public MergeModification(short version) {
+ super(version);
}
public MergeModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
return new MergeModification(decoded.getDecodedPath(), decoded.getDecodedNode());
}
- public static MergeModification fromStream(ObjectInput in) throws ClassNotFoundException, IOException {
- MergeModification mod = new MergeModification();
+ public static MergeModification fromStream(ObjectInput in, short version)
+ throws ClassNotFoundException, IOException {
+ MergeModification mod = new MergeModification(version);
mod.readExternal(in);
return mod;
}
public class MutableCompositeModification implements CompositeModification {
private static final long serialVersionUID = 1L;
- private final List<Modification> modifications;
+ private final List<Modification> modifications = new ArrayList<>();
+ private short version;
public MutableCompositeModification() {
- modifications = new ArrayList<>();
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public MutableCompositeModification(short version) {
+ this.version = version;
}
@Override
return COMPOSITE;
}
+ public short getVersion() {
+ return version;
+ }
+
+ public void setVersion(short version) {
+ this.version = version;
+ }
+
/**
* Add a new Modification to the list of Modifications represented by this
* composite
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- in.readShort();
+ version = in.readShort();
int size = in.readInt();
byte type = in.readByte();
switch(type) {
case Modification.WRITE:
- modifications.add(WriteModification.fromStream(in));
+ modifications.add(WriteModification.fromStream(in, version));
break;
case Modification.MERGE:
- modifications.add(MergeModification.fromStream(in));
+ modifications.add(MergeModification.fromStream(in, version));
break;
case Modification.DELETE:
- modifications.add(DeleteModification.fromStream(in));
+ modifications.add(DeleteModification.fromStream(in, version));
break;
}
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(DataStoreVersions.CURRENT_VERSION);
+ out.writeShort(version);
out.writeInt(modifications.size());
builder.setTimeStamp(System.nanoTime());
for (Modification m : modifications) {
- builder.addModification(
- (PersistentMessages.Modification) m.toSerializable());
+ builder.addModification((PersistentMessages.Modification) m.toSerializable());
}
return builder.build();
private NormalizedNode<?, ?> data;
public WriteModification() {
+ this(DataStoreVersions.CURRENT_VERSION);
+ }
+
+ public WriteModification(short version) {
+ super(version);
}
public WriteModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
- in.readShort(); // version
-
SerializationUtils.deserializePathAndNode(in, this, APPLIER);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- out.writeShort(DataStoreVersions.CURRENT_VERSION);
SerializationUtils.serializePathAndNode(getPath(), data, out);
}
return new WriteModification(decoded.getDecodedPath(), decoded.getDecodedNode());
}
- public static WriteModification fromStream(ObjectInput in) throws ClassNotFoundException, IOException {
- WriteModification mod = new WriteModification();
+ public static WriteModification fromStream(ObjectInput in, short version)
+ throws ClassNotFoundException, IOException {
+ WriteModification mod = new WriteModification(version);
mod.readExternal(in);
return mod;
}
import akka.dispatch.Mapper;
import akka.pattern.AskTimeoutException;
import akka.util.Timeout;
+import com.codahale.metrics.JmxReporter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
+import com.google.common.util.concurrent.RateLimiter;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.Await;
+import scala.concurrent.ExecutionContext;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
* but should not be passed to actors especially remote actors
*/
public class ActorContext {
- private static final Logger
- LOG = LoggerFactory.getLogger(ActorContext.class);
-
- public static final String MAILBOX = "bounded-mailbox";
-
+ private static final Logger LOG = LoggerFactory.getLogger(ActorContext.class);
+ private static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
+ private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
+ private static final String METRIC_RATE = "rate";
+ private static final String DOMAIN = "org.opendaylight.controller.cluster.datastore";
private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
new Mapper<Throwable, Throwable>() {
@Override
return actualFailure;
}
};
+ public static final String MAILBOX = "bounded-mailbox";
private final ActorSystem actorSystem;
private final ActorRef shardManager;
private final ClusterWrapper clusterWrapper;
private final Configuration configuration;
private final DatastoreContext datastoreContext;
- private volatile SchemaContext schemaContext;
private final FiniteDuration operationDuration;
private final Timeout operationTimeout;
private final String selfAddressHostPort;
+ private final RateLimiter txRateLimiter;
+ private final MetricRegistry metricRegistry = new MetricRegistry();
+ private final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(DOMAIN).build();
private final int transactionOutstandingOperationLimit;
+ private final Timeout transactionCommitOperationTimeout;
+ private final Dispatchers dispatchers;
+
+ private volatile SchemaContext schemaContext;
public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
ClusterWrapper clusterWrapper, Configuration configuration) {
this.clusterWrapper = clusterWrapper;
this.configuration = configuration;
this.datastoreContext = datastoreContext;
+ this.txRateLimiter = RateLimiter.create(datastoreContext.getTransactionCreationInitialRateLimit());
+ this.dispatchers = new Dispatchers(actorSystem.dispatchers());
- operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(),
- TimeUnit.SECONDS);
+ operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(), TimeUnit.SECONDS);
operationTimeout = new Timeout(operationDuration);
+ transactionCommitOperationTimeout = new Timeout(Duration.create(getDatastoreContext().getShardTransactionCommitTimeoutInSeconds(),
+ TimeUnit.SECONDS));
+
Address selfAddress = clusterWrapper.getSelfAddress();
if (selfAddress != null && !selfAddress.host().isEmpty()) {
}
transactionOutstandingOperationLimit = new CommonConfig(this.getActorSystem().settings().config()).getMailBoxCapacity();
+ jmxReporter.start();
+
}
public DatastoreContext getDatastoreContext() {
throw new UnknownMessageException(String.format(
"FindPrimary returned unkown response: %s", response));
}
- }, FIND_PRIMARY_FAILURE_TRANSFORMER, getActorSystem().dispatcher());
+ }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
}
/**
throw new UnknownMessageException(String.format(
"FindLocalShard returned unkown response: %s", response));
}
- }, getActorSystem().dispatcher());
+ }, getClientDispatcher());
}
private String findPrimaryPathOrNull(String shardName) {
public int getTransactionOutstandingOperationLimit(){
return transactionOutstandingOperationLimit;
}
+
+ /**
+ * This is a utility method that lets us get a Timer object for any operation. This is a little open-ended to allow
+ * us to create a timer for pretty much anything.
+ *
+ * @param operationName
+ * @return
+ */
+ public Timer getOperationTimer(String operationName){
+ final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, datastoreContext.getDataStoreType(), operationName, METRIC_RATE);
+ return metricRegistry.timer(rate);
+ }
+
+ /**
+ * Get the type of the data store to which this ActorContext belongs
+ *
+ * @return
+ */
+ public String getDataStoreType() {
+ return datastoreContext.getDataStoreType();
+ }
+
+ /**
+ * Set the number of transaction creation permits that are to be allowed
+ *
+ * @param permitsPerSecond
+ */
+ public void setTxCreationLimit(double permitsPerSecond){
+ txRateLimiter.setRate(permitsPerSecond);
+ }
+
+ /**
+ * Get the current transaction creation rate limit
+ * @return
+ */
+ public double getTxCreationLimit(){
+ return txRateLimiter.getRate();
+ }
+
+ /**
+ * Try to acquire a transaction creation permit. Will block if no permits are available.
+ */
+ public void acquireTxCreationPermit(){
+ txRateLimiter.acquire();
+ }
+
+ /**
+ * Return the operation timeout to be used when committing transactions
+ * @return
+ */
+ public Timeout getTransactionCommitOperationTimeout(){
+ return transactionCommitOperationTimeout;
+ }
+
+ /**
+ * An akka dispatcher that is meant to be used when processing ask Futures which were triggered by client
+ * code on the datastore
+ * @return
+ */
+ public ExecutionContext getClientDispatcher() {
+ return this.dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
+ }
+
+ public String getNotificationDispatcherPath(){
+ return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
+ }
+
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Preconditions;
+import scala.concurrent.ExecutionContext;
+
+public class Dispatchers {
+ public static final String DEFAULT_DISPATCHER_PATH = "akka.actor.default-dispatcher";
+ public static final String CLIENT_DISPATCHER_PATH = "client-dispatcher";
+ public static final String TXN_DISPATCHER_PATH = "txn-dispatcher";
+ public static final String SHARD_DISPATCHER_PATH = "shard-dispatcher";
+ public static final String NOTIFICATION_DISPATCHER_PATH = "notification-dispatcher";
+
+ private final akka.dispatch.Dispatchers dispatchers;
+
+ public static enum DispatcherType {
+ Client(CLIENT_DISPATCHER_PATH),
+ Transaction(TXN_DISPATCHER_PATH),
+ Shard(SHARD_DISPATCHER_PATH),
+ Notification(NOTIFICATION_DISPATCHER_PATH);
+
+ private final String path;
+ private DispatcherType(String path){
+ this.path = path;
+ }
+ private String path(akka.dispatch.Dispatchers dispatchers){
+ if(dispatchers.hasDispatcher(path)){
+ return path;
+ }
+ return DEFAULT_DISPATCHER_PATH;
+ }
+
+ private ExecutionContext dispatcher(akka.dispatch.Dispatchers dispatchers){
+ if(dispatchers.hasDispatcher(path)){
+ return dispatchers.lookup(path);
+ }
+ return dispatchers.defaultGlobalDispatcher();
+ }
+ }
+
+ public Dispatchers(akka.dispatch.Dispatchers dispatchers){
+ Preconditions.checkNotNull(dispatchers, "dispatchers should not be null");
+ this.dispatchers = dispatchers;
+ }
+
+ public ExecutionContext getDispatcher(DispatcherType dispatcherType){
+ return dispatcherType.dispatcher(this.dispatchers);
+ }
+
+ public String getDispatcherPath(DispatcherType dispatcherType){
+ return dispatcherType.path(this.dispatchers);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.ImmutableList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * MessageTracker is a diagnostic utility class to be used for figuring out why a certain message which was
+ * expected to arrive in a given time interval does not arrive. It attempts to keep track of all the messages that
+ * received between the arrival of two instances of the same message and the amount of time it took to process each
+ * of those messages.
+ * <br/>
+ * Usage of the API is as follows,
+ * <pre>
+ *
+ * // Track the Foo class, Here we expect to see a message of type Foo come in every 10 millis
+ * MessageTracker tracker = new MessageTracker(Foo.class, 10);
+ *
+ * // Begin the tracking process. If this is not called then calling received and done on the resultant Context
+ * // will do nothing
+ * tracker.begin();
+ *
+ * .....
+ *
+ * MessageTracker.Context context = tracker.received(message);
+ *
+ * if(context.error().isPresent()){
+ * LOG.error("{}", context.error().get());
+ * }
+ *
+ * // Some custom processing
+ * process(message);
+ *
+ * context.done();
+ *
+ * </pre>
+ */
+public class MessageTracker {
+
+ private static final Context NO_OP_CONTEXT = new NoOpContext();
+
+ private final Class expectedMessageClass;
+
+ private final long expectedArrivalInterval;
+
+ private final List<MessageProcessingTime> messagesSinceLastExpectedMessage = new LinkedList<>();
+
+ private Stopwatch expectedMessageWatch;
+
+ private boolean enabled = false;
+
+ private Object lastExpectedMessage;
+
+ private Object currentMessage;
+
+ private final CurrentMessageContext currentMessageContext = new CurrentMessageContext();
+
+ /**
+ *
+ * @param expectedMessageClass The class of the message to track
+ * @param expectedArrivalIntervalInMillis The expected arrival interval between two instances of the expected
+ * message
+ */
+ public MessageTracker(Class expectedMessageClass, long expectedArrivalIntervalInMillis){
+ this.expectedMessageClass = expectedMessageClass;
+ this.expectedArrivalInterval = expectedArrivalIntervalInMillis;
+ }
+
+ public void begin(){
+ if(enabled) {
+ return;
+ }
+ enabled = true;
+ expectedMessageWatch = Stopwatch.createStarted();
+ }
+
+ public Context received(Object message){
+ if(!enabled) {
+ return NO_OP_CONTEXT;
+ }
+ this.currentMessage = message;
+ if(expectedMessageClass.isInstance(message)){
+ long actualElapsedTime = expectedMessageWatch.elapsed(TimeUnit.MILLISECONDS);
+ if(actualElapsedTime > expectedArrivalInterval){
+ return new ErrorContext(message, Optional.of(new FailedExpectation(lastExpectedMessage, message,
+ ImmutableList.copyOf(messagesSinceLastExpectedMessage), expectedArrivalInterval,
+ actualElapsedTime)));
+ }
+ this.lastExpectedMessage = message;
+ this.messagesSinceLastExpectedMessage.clear();
+ }
+
+ currentMessageContext.reset();
+ return currentMessageContext;
+ }
+
+ private void processed(Object message, long messageElapseTimeInNanos){
+ if(!enabled) {
+ return;
+ }
+ if(!expectedMessageClass.isInstance(message)){
+ this.messagesSinceLastExpectedMessage.add(new MessageProcessingTime(message.getClass(), messageElapseTimeInNanos));
+ }
+ }
+
+ public List<MessageProcessingTime> getMessagesSinceLastExpectedMessage(){
+ return ImmutableList.copyOf(this.messagesSinceLastExpectedMessage);
+ }
+
+ public static class MessageProcessingTime {
+ private final Class messageClass;
+ private final long elapsedTimeInNanos;
+
+ MessageProcessingTime(Class messageClass, long elapsedTimeInNanos){
+ this.messageClass = messageClass;
+ this.elapsedTimeInNanos = elapsedTimeInNanos;
+ }
+
+ @Override
+ public String toString() {
+ return "MessageProcessingTime{" +
+ "messageClass=" + messageClass.getSimpleName() +
+ ", elapsedTimeInMillis=" + TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos) +
+ '}';
+ }
+
+ public Class getMessageClass() {
+ return messageClass;
+ }
+
+ public long getElapsedTimeInNanos() {
+ return elapsedTimeInNanos;
+ }
+ }
+
+ public interface Error {
+ Object getLastExpectedMessage();
+ Object getCurrentExpectedMessage();
+ List<MessageProcessingTime> getMessageProcessingTimesSinceLastExpectedMessage();
+ }
+
+ private class FailedExpectation implements Error {
+
+ private final Object lastExpectedMessage;
+ private final Object currentExpectedMessage;
+ private final List<MessageProcessingTime> messagesSinceLastExpectedMessage;
+ private final long expectedTimeInMillis;
+ private final long actualTimeInMillis;
+
+ public FailedExpectation(Object lastExpectedMessage, Object message, List<MessageProcessingTime> messagesSinceLastExpectedMessage, long expectedTimeInMillis, long actualTimeInMillis) {
+ this.lastExpectedMessage = lastExpectedMessage;
+ this.currentExpectedMessage = message;
+ this.messagesSinceLastExpectedMessage = messagesSinceLastExpectedMessage;
+ this.expectedTimeInMillis = expectedTimeInMillis;
+ this.actualTimeInMillis = actualTimeInMillis;
+ }
+
+ public Object getLastExpectedMessage() {
+ return lastExpectedMessage;
+ }
+
+ public Object getCurrentExpectedMessage() {
+ return currentExpectedMessage;
+ }
+
+ public List<MessageProcessingTime> getMessageProcessingTimesSinceLastExpectedMessage() {
+ return messagesSinceLastExpectedMessage;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("\n> Last Expected Message = " + lastExpectedMessage);
+ builder.append("\n> Current Expected Message = " + currentExpectedMessage);
+ builder.append("\n> Expected time in between messages = " + expectedTimeInMillis);
+ builder.append("\n> Actual time in between messages = " + actualTimeInMillis);
+ for (MessageProcessingTime time : messagesSinceLastExpectedMessage) {
+ builder.append("\n\t> ").append(time.toString());
+ }
+ return builder.toString();
+ }
+
+ }
+
+ public interface Context {
+ Context done();
+ Optional<? extends Error> error();
+ }
+
+ private static class NoOpContext implements Context {
+
+ @Override
+ public Context done() {
+ return this;
+ }
+
+ @Override
+ public Optional<Error> error() {
+ return Optional.absent();
+ }
+ }
+
+ private class CurrentMessageContext implements Context {
+ Stopwatch stopwatch = Stopwatch.createStarted();
+ boolean done = true;
+
+ public void reset(){
+ Preconditions.checkState(done);
+ done = false;
+ stopwatch.reset().start();
+ }
+
+ @Override
+ public Context done() {
+ processed(currentMessage, stopwatch.elapsed(TimeUnit.NANOSECONDS));
+ done = true;
+ return this;
+ }
+
+ @Override
+ public Optional<? extends Error> error() {
+ return Optional.absent();
+ }
+ }
+
+ private class ErrorContext implements Context {
+ Object message;
+ private final Optional<? extends Error> error;
+ Stopwatch stopwatch;
+
+ ErrorContext(Object message, Optional<? extends Error> error){
+ this.message = message;
+ this.error = error;
+ this.stopwatch = Stopwatch.createStarted();
+ }
+
+ @Override
+ public Context done(){
+ processed(message, this.stopwatch.elapsed(TimeUnit.NANOSECONDS));
+ this.stopwatch.stop();
+ return this;
+ }
+
+ @Override
+ public Optional<? extends Error> error() {
+ return error;
+ }
+ }
+}
import java.io.DataOutputStream;
import java.io.IOException;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.InvalidNormalizedNodeStreamException;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputStreamReader;
import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeOutputStreamWriter;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
}
public static NormalizedNode<?, ?> deserializeNormalizedNode(DataInput in) {
- try {
- boolean present = in.readBoolean();
- if(present) {
- NormalizedNodeInputStreamReader streamReader = streamReader(in);
- return streamReader.readNormalizedNode();
- }
- } catch (IOException e) {
- throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
- }
+ try {
+ return tryDeserializeNormalizedNode(in);
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
+ }
+ }
+
+ private static NormalizedNode<?, ?> tryDeserializeNormalizedNode(DataInput in) throws IOException {
+ boolean present = in.readBoolean();
+ if(present) {
+ NormalizedNodeInputStreamReader streamReader = streamReader(in);
+ return streamReader.readNormalizedNode();
+ }
return null;
}
public static NormalizedNode<?, ?> deserializeNormalizedNode(byte [] bytes) {
NormalizedNode<?, ?> node = null;
try {
- node = deserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
- } catch(Exception e) {
- }
-
- if(node == null) {
- // Must be from legacy protobuf serialization - try that.
+ node = tryDeserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
+ } catch(InvalidNormalizedNodeStreamException e) {
+ // Probably from legacy protobuf serialization - try that.
try {
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(bytes);
node = new NormalizedNodeToNodeCodec(null).decode(serializedNode);
- } catch (InvalidProtocolBufferException e) {
+ } catch (InvalidProtocolBufferException e2) {
throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
}
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
}
return node;
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
-import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.osgi.framework.BundleContext;
-import scala.concurrent.duration.Duration;
-
public class DistributedConfigDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedConfigDataStoreProviderModule {
private BundleContext bundleContext;
}
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
- .dataStoreMXBeanType("DistributedConfigDatastore")
- .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
- .shardTransactionIdleTimeout(Duration.create(
- props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .dataStoreType("config")
+ .maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
+ .maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
+ .maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
+ .maxShardDataStoreExecutorQueueSize(props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue())
+ .shardTransactionIdleTimeoutInMinutes(props.getShardTransactionIdleTimeoutInMinutes().getValue())
.operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
.shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardSnapshotDataThresholdPercentage(props.getShardSnapshotDataThresholdPercentage().getValue().intValue())
- .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
- .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
- .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
+ .shardHeartbeatIntervalInMillis(props.getShardHeartbeatIntervalInMillis().getValue())
+ .shardInitializationTimeoutInSeconds(props.getShardInitializationTimeoutInSeconds().getValue())
+ .shardLeaderElectionTimeoutInSeconds(props.getShardLeaderElectionTimeoutInSeconds().getValue())
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
+ .transactionCreationInitialRateLimit(props.getTransactionCreationInitialRateLimit().getValue())
+ .shardBatchedModificationCount(props.getShardBatchedModificationCount().getValue().intValue())
.build();
- return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
+ return DistributedDataStoreFactory.createInstance(getConfigSchemaServiceDependency(),
datastoreContext, bundleContext);
}
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
-import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.osgi.framework.BundleContext;
-import scala.concurrent.duration.Duration;
-
public class DistributedOperationalDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedOperationalDataStoreProviderModule {
private BundleContext bundleContext;
}
DatastoreContext datastoreContext = DatastoreContext.newBuilder()
- .dataStoreMXBeanType("DistributedOperationalDatastore")
- .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
- .shardTransactionIdleTimeout(Duration.create(
- props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .dataStoreType("operational")
+ .maxShardDataChangeExecutorPoolSize(props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue())
+ .maxShardDataChangeExecutorQueueSize(props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue())
+ .maxShardDataChangeListenerQueueSize(props.getMaxShardDataChangeListenerQueueSize().getValue().intValue())
+ .maxShardDataStoreExecutorQueueSize(props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue())
+ .shardTransactionIdleTimeoutInMinutes(props.getShardTransactionIdleTimeoutInMinutes().getValue())
.operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
.shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardSnapshotDataThresholdPercentage(props.getShardSnapshotDataThresholdPercentage().getValue().intValue())
- .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
- .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
- .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
- TimeUnit.SECONDS)
+ .shardHeartbeatIntervalInMillis(props.getShardHeartbeatIntervalInMillis().getValue())
+ .shardInitializationTimeoutInSeconds(props.getShardInitializationTimeoutInSeconds().getValue())
+ .shardLeaderElectionTimeoutInSeconds(props.getShardLeaderElectionTimeoutInSeconds().getValue())
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
.shardIsolatedLeaderCheckIntervalInMillis(
props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.shardElectionTimeoutFactor(props.getShardElectionTimeoutFactor().getValue())
+ .transactionCreationInitialRateLimit(props.getTransactionCreationInitialRateLimit().getValue())
+ .shardBatchedModificationCount(props.getShardBatchedModificationCount().getValue().intValue())
.build();
- return DistributedDataStoreFactory.createInstance("operational",
- getOperationalSchemaServiceDependency(), datastoreContext, bundleContext);
+ return DistributedDataStoreFactory.createInstance(getOperationalSchemaServiceDependency(),
+ datastoreContext, bundleContext);
}
public void setBundleContext(BundleContext bundleContext) {
}
- leaf shard-hearbeat-interval-in-millis {
+ leaf shard-heartbeat-interval-in-millis {
default 500;
type heartbeat-interval-type;
description "The interval at which a shard will send a heart beat message to its remote shard.";
an operation (eg transaction create).";
}
+ leaf shard-batched-modification-count {
+ default 100;
+ type non-zero-uint32-type;
+ description "The number of transaction modification operations (put, merge, delete) to
+ batch before sending to the shard transaction actor. Batching improves
+ performance as less modifications messages are sent to the actor and thus
+ lessens the chance that the transaction actor's mailbox queue could get full.";
+ }
+
leaf enable-metric-capture {
default false;
type boolean;
description "The interval at which the leader of the shard will check if its majority
followers are active and term itself as isolated";
}
+
+ leaf transaction-creation-initial-rate-limit {
+ default 100;
+ type non-zero-uint32-type;
+ description "The initial number of transactions per second that are allowed before the data store
+ should begin applying back pressure. This number is only used as an initial guidance,
+ subsequently the datastore measures the latency for a commit and auto-adjusts the rate limit";
+ }
}
// Augments the 'configuration' choice node under modules/module.
package org.opendaylight.controller.cluster.datastore;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.SerializationUtils;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertTrue;
+
@Deprecated
public class CompositeModificationByteStringPayloadTest {
entries.add(new ReplicatedLogImplEntry(0, 1, payload));
- assertNotNull(new AppendEntries(10, "foobar", 10, 10, entries, 10).toSerializable());
+ assertNotNull(new AppendEntries(10, "foobar", 10, 10, entries, 10, -1).toSerializable());
}
}
});
AppendEntries appendEntries =
- new AppendEntries(1, "member-1", 0, 100, entries, 1);
+ new AppendEntries(1, "member-1", 0, 100, entries, 1, -1);
AppendEntriesMessages.AppendEntries o = (AppendEntriesMessages.AppendEntries)
appendEntries.toSerializable(RaftVersions.HELIUM_VERSION);
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(
MoreExecutors.sameThreadExecutor());
- doReturn(executor).when(mockActorSystem).dispatcher();
+
ActorContext actorContext = mock(ActorContext.class);
+ doReturn(executor).when(actorContext).getClientDispatcher();
+
String shardName = "shard-1";
final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
shardName, actorContext, mockListener);
shardName, actorContext, mockListener);
doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(Dispatchers.DEFAULT_DISPATCHER_PATH).when(actorContext).getNotificationDispatcherPath();
doReturn(getSystem().actorSelection(getRef().path())).
when(actorContext).actorSelection(getRef().path());
doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import java.io.IOException;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import org.junit.Test;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
+
+/**
+ * Unit tests for DatastoreContextConfigAdminOverlay.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextConfigAdminOverlayTest {
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void test() throws IOException {
+ BundleContext mockBundleContext = mock(BundleContext.class);
+ ServiceReference<ConfigurationAdmin> mockServiceRef = mock(ServiceReference.class);
+ ConfigurationAdmin mockConfigAdmin = mock(ConfigurationAdmin.class);
+ Configuration mockConfig = mock(Configuration.class);
+ DatastoreContextIntrospector mockIntrospector = mock(DatastoreContextIntrospector.class);
+
+ doReturn(mockServiceRef).when(mockBundleContext).getServiceReference(ConfigurationAdmin.class);
+ doReturn(mockConfigAdmin).when(mockBundleContext).getService(mockServiceRef);
+
+ doReturn(mockConfig).when(mockConfigAdmin).getConfiguration(DatastoreContextConfigAdminOverlay.CONFIG_ID);
+
+ doReturn(DatastoreContextConfigAdminOverlay.CONFIG_ID).when(mockConfig).getPid();
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("property", "value");
+ doReturn(properties).when(mockConfig).getProperties();
+
+ try(DatastoreContextConfigAdminOverlay overlay = new DatastoreContextConfigAdminOverlay(
+ mockIntrospector, mockBundleContext)) {
+ }
+
+ verify(mockIntrospector).update(properties);
+
+ verify(mockBundleContext).ungetService(mockServiceRef);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+
+/**
+ * Unit tests for DatastoreContextIntrospector.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreContextIntrospectorTest {
+
+ @Test
+ public void testUpdate() {
+ DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "31");
+ properties.put("operation-timeout-in-seconds", "26");
+ properties.put("shard-transaction-commit-timeout-in-seconds", "100");
+ properties.put("shard-journal-recovery-log-batch-size", "199");
+ properties.put("shard-snapshot-batch-count", "212");
+ properties.put("shard-heartbeat-interval-in-millis", "101");
+ properties.put("shard-transaction-commit-queue-capacity", "567");
+ properties.put("shard-initialization-timeout-in-seconds", "82");
+ properties.put("shard-leader-election-timeout-in-seconds", "66");
+ properties.put("shard-isolated-leader-check-interval-in-millis", "123");
+ properties.put("shard-snapshot-data-threshold-percentage", "100");
+ properties.put("shard-election-timeout-factor", "21");
+ properties.put("shard-batched-modification-count", "901");
+ properties.put("transactionCreationInitialRateLimit", "200");
+ properties.put("MaxShardDataChangeExecutorPoolSize", "41");
+ properties.put("Max-Shard-Data-Change Executor-Queue Size", "1111");
+ properties.put(" max shard data change listener queue size", "2222");
+ properties.put("mAx-shaRd-data-STORE-executor-quEUe-size", "3333");
+ properties.put("persistent", "false");
+
+ boolean updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(31, context.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(26, context.getOperationTimeoutInSeconds());
+ assertEquals(100, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(101, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
+ assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+ assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(21, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(901, context.getShardBatchedModificationCount());
+ assertEquals(200, context.getTransactionCreationInitialRateLimit());
+ assertEquals(41, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(3333, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ assertEquals(false, context.isPersistent());
+
+ properties.put("shard-transaction-idle-timeout-in-minutes", "32");
+ properties.put("operation-timeout-in-seconds", "27");
+ properties.put("shard-heartbeat-interval-in-millis", "102");
+ properties.put("shard-election-timeout-factor", "22");
+ properties.put("max-shard-data-change-executor-pool-size", "42");
+ properties.put("max-shard-data-store-executor-queue-size", "4444");
+ properties.put("persistent", "true");
+
+ updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(32, context.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(27, context.getOperationTimeoutInSeconds());
+ assertEquals(100, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(102, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
+ assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+ assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(22, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(200, context.getTransactionCreationInitialRateLimit());
+ assertEquals(42, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(4444, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ assertEquals(true, context.isPersistent());
+
+ updated = introspector.update(null);
+ assertEquals("updated", false, updated);
+
+ updated = introspector.update(new Hashtable<String, Object>());
+ assertEquals("updated", false, updated);
+ }
+
+
+ @Test
+ public void testUpdateWithInvalidValues() {
+ DatastoreContext context = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector introspector = new DatastoreContextIntrospector(context );
+
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "0"); // bad - must be > 0
+ properties.put("shard-journal-recovery-log-batch-size", "199");
+ properties.put("shard-transaction-commit-timeout-in-seconds", "bogus"); // bad - NaN
+ properties.put("shard-snapshot-batch-count", "212"); // good
+ properties.put("operation-timeout-in-seconds", "4"); // bad - must be >= 5
+ properties.put("shard-heartbeat-interval-in-millis", "99"); // bad - must be >= 100
+ properties.put("shard-transaction-commit-queue-capacity", "567"); // good
+ properties.put("shard-snapshot-data-threshold-percentage", "101"); // bad - must be 0-100
+ properties.put("shard-initialization-timeout-in-seconds", "-1"); // bad - must be > 0
+ properties.put("max-shard-data-change-executor-pool-size", "bogus"); // bad - NaN
+ properties.put("unknownProperty", "1"); // bad - invalid property name
+
+ boolean updated = introspector.update(properties);
+ assertEquals("updated", true, updated);
+ context = introspector.getContext();
+
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT, context.getShardTransactionIdleTimeout());
+ assertEquals(199, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(212, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(567, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
+ context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT, context.getShardInitializationTimeout());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ }
+
+ @Test
+ public void testUpdateWithDatastoreTypeSpecificProperties() {
+ Dictionary<String, Object> properties = new Hashtable<>();
+ properties.put("shard-transaction-idle-timeout-in-minutes", "22"); // global setting
+ properties.put("operational.shard-transaction-idle-timeout-in-minutes", "33"); // operational override
+ properties.put("config.shard-transaction-idle-timeout-in-minutes", "44"); // config override
+
+ properties.put("max-shard-data-change-executor-pool-size", "222"); // global setting
+ properties.put("operational.max-shard-data-change-executor-pool-size", "333"); // operational override
+ properties.put("config.max-shard-data-change-executor-pool-size", "444"); // config override
+
+ properties.put("persistent", "false"); // global setting
+ properties.put("operational.Persistent", "true"); // operational override
+
+ DatastoreContext operContext = DatastoreContext.newBuilder().dataStoreType("operational").build();
+ DatastoreContextIntrospector operIntrospector = new DatastoreContextIntrospector(operContext);
+ boolean updated = operIntrospector.update(properties);
+ assertEquals("updated", true, updated);
+ operContext = operIntrospector.getContext();
+
+ assertEquals(33, operContext.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(true, operContext.isPersistent());
+ assertEquals(333, operContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+
+ DatastoreContext configContext = DatastoreContext.newBuilder().dataStoreType("config").build();
+ DatastoreContextIntrospector configIntrospector = new DatastoreContextIntrospector(configContext);
+ updated = configIntrospector.update(properties);
+ assertEquals("updated", true, updated);
+ configContext = configIntrospector.getContext();
+
+ assertEquals(44, configContext.getShardTransactionIdleTimeout().toMinutes());
+ assertEquals(false, configContext.isPersistent());
+ assertEquals(444, configContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ }
+}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_CONFIGURATION_READER;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_PERSISTENT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SNAPSHOT_BATCH_COUNT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+
+public class DatastoreContextTest {
+
+ @Test
+ public void testNewBuilderWithDefaultSettings() {
+ DatastoreContext context = DatastoreContext.newBuilder().build();
+
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT, context.getShardTransactionIdleTimeout());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS, context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE, context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis(),
+ context.getShardInitializationTimeout().duration().toMillis());
+ assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis(),
+ context.getShardLeaderElectionTimeout().duration().toMillis());
+ assertEquals(DEFAULT_PERSISTENT, context.isPersistent());
+ assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
+ assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, context.getTransactionCreationInitialRateLimit());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT, context.getShardBatchedModificationCount());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE,
+ context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ }
+
+ @Test
+ public void testNewBuilderWithCustomSettings() {
+ DatastoreContext.Builder builder = DatastoreContext.newBuilder();
+
+ builder.shardTransactionIdleTimeout(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.operationTimeoutInSeconds(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS + 1);
+ builder.shardTransactionCommitTimeoutInSeconds(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1);
+ builder.shardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1);
+ builder.shardSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT + 1);
+ builder.shardHeartbeatIntervalInMillis(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1);
+ builder.shardTransactionCommitQueueCapacity(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1);
+ builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.
+ duration().toMillis() + 1, TimeUnit.MILLISECONDS);
+ builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.shardLeaderElectionTimeout(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
+ TimeUnit.MILLISECONDS);
+ builder.persistent(!DEFAULT_PERSISTENT);
+ builder.shardIsolatedLeaderCheckIntervalInMillis(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1);
+ builder.shardSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1);
+ builder.shardElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1);
+ builder.transactionCreationInitialRateLimit(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1);
+ builder.shardBatchedModificationCount(DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1);
+ builder.maxShardDataChangeExecutorPoolSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1);
+ builder.maxShardDataChangeExecutorQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1);
+ builder.maxShardDataChangeListenerQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1);
+ builder.maxShardDataStoreExecutorQueueSize(
+ InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1);
+
+ DatastoreContext context = builder.build();
+
+ verifyCustomSettings(context);
+
+ builder = DatastoreContext.newBuilderFrom(context);
+
+ DatastoreContext newContext = builder.build();
+
+ verifyCustomSettings(newContext);
+
+ Assert.assertNotSame(context, newContext);
+ }
+
+ private void verifyCustomSettings(DatastoreContext context) {
+ assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
+ context.getShardTransactionIdleTimeout().toMillis());
+ assertEquals(DEFAULT_OPERATION_TIMEOUT_IN_SECONDS + 1, context.getOperationTimeoutInSeconds());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1,
+ context.getShardTransactionCommitTimeoutInSeconds());
+ assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1,
+ context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
+ assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT + 1, context.getShardRaftConfig().getSnapshotBatchCount());
+ assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1,
+ context.getShardRaftConfig().getHeartBeatInterval().length());
+ assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1, context.getShardTransactionCommitQueueCapacity());
+ assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT.duration().toMillis() + 1,
+ context.getShardInitializationTimeout().duration().toMillis());
+ assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
+ context.getShardLeaderElectionTimeout().duration().toMillis());
+ assertEquals(!DEFAULT_PERSISTENT, context.isPersistent());
+ assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
+ assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1,
+ context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
+ assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1,
+ context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+ assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1, context.getShardRaftConfig().getElectionTimeoutFactor());
+ assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1, context.getTransactionCreationInitialRateLimit());
+ assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1,
+ context.getShardBatchedModificationCount());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
+ assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1,
+ context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
+ }
+}
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
ShardStrategyFactory.setConfiguration(config);
+ datastoreContextBuilder.dataStoreType(typeName);
+
DatastoreContext datastoreContext = datastoreContextBuilder.build();
- DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
+
+ DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
config, datastoreContext);
SchemaContext schemaContext = SchemaContextHelper.full();
--- /dev/null
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DistributedDataStoreTest extends AbstractActorTest {
+
+ private SchemaContext schemaContext;
+
+ @Mock
+ private ActorContext actorContext;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+
+ schemaContext = TestModel.createTestContext();
+
+ doReturn(schemaContext).when(actorContext).getSchemaContext();
+ doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ }
+
+ @Test
+ public void testRateLimitingUsedInReadWriteTxCreation(){
+ DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext);
+
+ distributedDataStore.newReadWriteTransaction();
+
+ verify(actorContext, times(1)).acquireTxCreationPermit();
+ }
+
+ @Test
+ public void testRateLimitingUsedInWriteOnlyTxCreation(){
+ DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext);
+
+ distributedDataStore.newWriteOnlyTransaction();
+
+ verify(actorContext, times(1)).acquireTxCreationPermit();
+ }
+
+
+ @Test
+ public void testRateLimitingNotUsedInReadOnlyTxCreation(){
+ DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext);
+
+ distributedDataStore.newReadOnlyTransaction();
+ distributedDataStore.newReadOnlyTransaction();
+ distributedDataStore.newReadOnlyTransaction();
+
+ verify(actorContext, times(0)).acquireTxCreationPermit();
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import java.util.concurrent.Semaphore;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
+
+/**
+ * Unit tests for OperationCompleter.
+ *
+ * @author Thomas Pantelis
+ */
+public class OperationCompleterTest {
+
+ @Test
+ public void testOnComplete() throws Exception {
+ int permits = 10;
+ Semaphore operationLimiter = new Semaphore(permits);
+ operationLimiter.acquire(permits);
+ int availablePermits = 0;
+
+ OperationCompleter completer = new OperationCompleter(operationLimiter );
+
+ completer.onComplete(null, new DataExistsReply(true));
+ assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
+
+ completer.onComplete(null, new DataExistsReply(true));
+ assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
+
+ completer.onComplete(null, new IllegalArgumentException());
+ assertEquals("availablePermits", ++availablePermits, operationLimiter.availablePermits());
+
+ completer.onComplete(null, new BatchedModificationsReply(4));
+ availablePermits += 4;
+ assertEquals("availablePermits", availablePermits, operationLimiter.availablePermits());
+ }
+}
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
+import java.net.URI;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import scala.concurrent.Await;
import scala.concurrent.Future;
-import java.net.URI;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
public class ShardManagerTest extends AbstractActorTest {
private static int ID_COUNTER = 1;
}
private Props newShardMgrProps() {
- return ShardManager.props(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(),
- DatastoreContext.newBuilder().build());
+
+ DatastoreContext.Builder builder = DatastoreContext.newBuilder();
+ builder.dataStoreType(shardMrgIDSuffix);
+ return ShardManager.props(new MockClusterWrapper(), new MockConfiguration(), builder.build());
}
@Test
public void testRecoveryApplicable(){
new JavaTestKit(getSystem()) {
{
- final Props persistentProps = ShardManager.props(shardMrgIDSuffix,
- new MockClusterWrapper(),
- new MockConfiguration(),
- DatastoreContext.newBuilder().persistent(true).build());
+ final Props persistentProps = ShardManager.props(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(true).dataStoreType(shardMrgIDSuffix).build());
final TestActorRef<ShardManager> persistentShardManager =
TestActorRef.create(getSystem(), persistentProps);
assertTrue("Recovery Applicable", dataPersistenceProvider1.isRecoveryApplicable());
- final Props nonPersistentProps = ShardManager.props(shardMrgIDSuffix,
- new MockClusterWrapper(),
- new MockConfiguration(),
- DatastoreContext.newBuilder().persistent(false).build());
+ final Props nonPersistentProps = ShardManager.props(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(false).dataStoreType(shardMrgIDSuffix).build());
final TestActorRef<ShardManager> nonPersistentShardManager =
TestActorRef.create(getSystem(), nonPersistentProps);
private static final long serialVersionUID = 1L;
@Override
public ShardManager create() throws Exception {
- return new ShardManager(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build()) {
+ return new ShardManager(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().dataStoreType(shardMrgIDSuffix).build()) {
@Override
protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
DataPersistenceProviderMonitor dataPersistenceProviderMonitor
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
TestShardManager(String shardMrgIDSuffix) {
- super(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(),
- DatastoreContext.newBuilder().build());
+ super(new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().dataStoreType(shardMrgIDSuffix).build());
}
@Override
NormalizedNode<?,?> expectedRoot = readStore(shard, YangInstanceIdentifier.builder().build());
- CaptureSnapshot capture = new CaptureSnapshot(-1, -1, -1, -1);
+ CaptureSnapshot capture = new CaptureSnapshot(-1, -1, -1, -1, -1, -1);
shard.tell(capture, getRef());
assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
// Write data to the Tx
txActor.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)).toSerializable(
- DataStoreVersions.BASE_HELIUM_VERSION), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.BASE_HELIUM_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration, ShardTransactionMessages.WriteDataReply.class);
// Write data to the Tx
txActor.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ DataStoreVersions.BASE_HELIUM_VERSION).toSerializable(), getRef());
- expectMsgClass(duration, WriteDataReply.class);
+ expectMsgClass(duration, WriteDataReply.INSTANCE.toSerializable(
+ DataStoreVersions.BASE_HELIUM_VERSION).getClass());
// Ready the Tx
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.ShardWriteTransaction.GetCompositeModificationReply;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.Duration;
public class ShardTransactionTest extends AbstractActorTest {
"testOnReceiveWriteData");
transaction.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)).toSerializable(
- DataStoreVersions.HELIUM_2_VERSION), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.HELIUM_2_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ShardTransactionMessages.WriteDataReply.class);
// unserialized write
transaction.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)),
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.CURRENT_VERSION),
getRef());
expectMsgClass(duration("5 seconds"), WriteDataReply.class);
"testMergeData");
transaction.tell(new MergeData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)).toSerializable(
- DataStoreVersions.HELIUM_2_VERSION), getRef());
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.HELIUM_2_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ShardTransactionMessages.MergeDataReply.class);
//unserialized merge
transaction.tell(new MergeData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME)),
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), DataStoreVersions.CURRENT_VERSION),
getRef());
expectMsgClass(duration("5 seconds"), MergeDataReply.class);
final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
"testDeleteData");
- transaction.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(
- DataStoreVersions.HELIUM_2_VERSION), getRef());
+ transaction.tell(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.HELIUM_2_VERSION).
+ toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DeleteDataReply.class);
assertModification(transaction, DeleteModification.class);
//unserialized
- transaction.tell(new DeleteData(TestModel.TEST_PATH), getRef());
+ transaction.tell(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION), getRef());
expectMsgClass(duration("5 seconds"), DeleteDataReply.class);
}};
}
+ @Test
+ public void testOnReceiveBatchedModifications() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ DOMStoreWriteTransaction mockWriteTx = Mockito.mock(DOMStoreWriteTransaction.class);
+ final ActorRef transaction = newTransactionActor(mockWriteTx, "testOnReceiveBatchedModifications");
+
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+
+ YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
+
+ BatchedModifications batched = new BatchedModifications(DataStoreVersions.CURRENT_VERSION);
+ batched.addModification(new WriteModification(writePath, writeData));
+ batched.addModification(new MergeModification(mergePath, mergeData));
+ batched.addModification(new DeleteModification(deletePath));
+
+ transaction.tell(batched, getRef());
+
+ BatchedModificationsReply reply = expectMsgClass(duration("5 seconds"), BatchedModificationsReply.class);
+ assertEquals("getNumBatched", 3, reply.getNumBatched());
+
+ JavaTestKit verification = new JavaTestKit(getSystem());
+ transaction.tell(new ShardWriteTransaction.GetCompositedModification(), verification.getRef());
+
+ CompositeModification compositeModification = verification.expectMsgClass(duration("5 seconds"),
+ GetCompositeModificationReply.class).getModification();
+
+ assertEquals("CompositeModification size", 3, compositeModification.getModifications().size());
+
+ WriteModification write = (WriteModification)compositeModification.getModifications().get(0);
+ assertEquals("getPath", writePath, write.getPath());
+ assertEquals("getData", writeData, write.getData());
+
+ MergeModification merge = (MergeModification)compositeModification.getModifications().get(1);
+ assertEquals("getPath", mergePath, merge.getPath());
+ assertEquals("getData", mergeData, merge.getData());
+
+ DeleteModification delete = (DeleteModification)compositeModification.getModifications().get(2);
+ assertEquals("getPath", deletePath, delete.getPath());
+
+ InOrder inOrder = Mockito.inOrder(mockWriteTx);
+ inOrder.verify(mockWriteTx).write(writePath, writeData);
+ inOrder.verify(mockWriteTx).merge(mergePath, mergeData);
+ inOrder.verify(mockWriteTx).delete(deletePath);
+ }};
+ }
@Test
public void testOnReceiveReadyTransaction() throws Exception {
}
@Test
- public void testOnReceiveCloseTransaction() throws Exception {
+ public void testReadWriteTxOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
- "testCloseTransaction");
+ "testReadWriteTxOnReceiveCloseTransaction");
watch(transaction);
}};
}
+ @Test
+ public void testWriteOnlyTxOnReceiveCloseTransaction() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef transaction = newTransactionActor(store.newWriteOnlyTransaction(),
+ "testWriteTxOnReceiveCloseTransaction");
+
+ watch(transaction);
+
+ transaction.tell(new CloseTransaction().toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), CloseTransactionReply.SERIALIZABLE_CLASS);
+ expectTerminated(duration("3 seconds"), transaction);
+ }};
+ }
+
+ @Test
+ public void testReadOnlyTxOnReceiveCloseTransaction() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef transaction = newTransactionActor(store.newReadOnlyTransaction(),
+ "testReadOnlyTxOnReceiveCloseTransaction");
+
+ watch(transaction);
+
+ transaction.tell(new CloseTransaction().toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), Terminated.class);
+ }};
+ }
+
@Test(expected=UnknownMessageException.class)
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
DataStoreVersions.CURRENT_VERSION);
final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
- transaction.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(
- DataStoreVersions.CURRENT_VERSION), ActorRef.noSender());
+ transaction.receive(new DeleteData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION).
+ toSerializable(), ActorRef.noSender());
}
@Test
public void testShardTransactionInactivity() {
datastoreContext = DatastoreContext.newBuilder().shardTransactionIdleTimeout(
- Duration.create(500, TimeUnit.MILLISECONDS)).build();
+ 500, TimeUnit.MILLISECONDS).build();
new JavaTestKit(getSystem()) {{
final ActorRef transaction = newTransactionActor(store.newReadWriteTransaction(),
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import akka.actor.ActorPath;
import akka.actor.ActorSelection;
import akka.actor.Props;
import akka.dispatch.Futures;
+import akka.util.Timeout;
+import com.codahale.metrics.Snapshot;
+import com.codahale.metrics.Timer;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
@Mock
private ActorContext actorContext;
+ @Mock
+ private DatastoreContext datastoreContext;
+
+ @Mock
+ private Timer commitTimer;
+
+ @Mock
+ private Timer.Context commitTimerContext;
+
+ @Mock
+ private Snapshot commitSnapshot;
+
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
+ doReturn(datastoreContext).when(actorContext).getDatastoreContext();
+ doReturn(100).when(datastoreContext).getShardTransactionCommitTimeoutInSeconds();
+ doReturn(commitTimer).when(actorContext).getOperationTimer("commit");
+ doReturn(commitTimerContext).when(commitTimer).time();
+ doReturn(commitSnapshot).when(commitTimer).getSnapshot();
+ doReturn(TimeUnit.MILLISECONDS.toNanos(2000) * 1.0).when(commitSnapshot).get95thPercentile();
+ doReturn(10.0).when(actorContext).getTxCreationLimit();
}
private Future<ActorSelection> newCohort() {
}
stubber.when(actorContext).executeOperationAsync(any(ActorSelection.class),
- isA(requestType));
+ isA(requestType), any(Timeout.class));
}
private void verifyCohortInvocations(int nCohorts, Class<?> requestType) {
verify(actorContext, times(nCohorts)).executeOperationAsync(
- any(ActorSelection.class), isA(requestType));
+ any(ActorSelection.class), isA(requestType), any(Timeout.class));
}
private void propagateExecutionExceptionCause(ListenableFuture<?> future) throws Throwable {
try {
propagateExecutionExceptionCause(proxy.commit());
} finally {
+
+ verify(actorContext, never()).setTxCreationLimit(anyLong());
verifyCohortInvocations(0, CommitTransaction.SERIALIZABLE_CLASS);
}
+
}
@Test
setupMockActorContext(CommitTransaction.SERIALIZABLE_CLASS,
new CommitTransactionReply(), new CommitTransactionReply());
+ assertEquals(10.0, actorContext.getTxCreationLimit(), 1e-15);
+
proxy.canCommit().get(5, TimeUnit.SECONDS);
proxy.preCommit().get(5, TimeUnit.SECONDS);
proxy.commit().get(5, TimeUnit.SECONDS);
verifyCohortInvocations(2, CanCommitTransaction.SERIALIZABLE_CLASS);
verifyCohortInvocations(2, CommitTransaction.SERIALIZABLE_CLASS);
+
+ // Verify that the creation limit was changed to 0.5 (based on setup)
+ verify(actorContext, timeout(5000)).setTxCreationLimit(0.5);
+ }
+
+ @Test
+ public void testDoNotChangeTxCreationLimitWhenCommittingEmptyTxn() throws Exception {
+
+ ThreePhaseCommitCohortProxy proxy = setupProxy(0);
+
+ assertEquals(10.0, actorContext.getTxCreationLimit(), 1e-15);
+
+ proxy.canCommit().get(5, TimeUnit.SECONDS);
+ proxy.preCommit().get(5, TimeUnit.SECONDS);
+ proxy.commit().get(5, TimeUnit.SECONDS);
+
+ verify(actorContext, never()).setTxCreationLimit(anyLong());
}
}
package org.opendaylight.controller.cluster.datastore;
import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
ActorContext actorContext = null;
SchemaContext schemaContext = mock(SchemaContext.class);
+ @Mock
+ ActorContext mockActorContext;
+
@Before
public void setUp() {
+ MockitoAnnotations.initMocks(this);
+
actorContext = new MockActorContext(getSystem());
actorContext.setSchemaContext(schemaContext);
+
+ doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(DatastoreContext.newBuilder().build()).when(mockActorContext).getDatastoreContext();
}
@SuppressWarnings("resource")
Assert.assertNotEquals(one.getTransactionChainId(), two.getTransactionChainId());
}
+
+ @Test
+ public void testRateLimitingUsedInReadWriteTxCreation(){
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ txChainProxy.newReadWriteTransaction();
+
+ verify(mockActorContext, times(1)).acquireTxCreationPermit();
+ }
+
+ @Test
+ public void testRateLimitingUsedInWriteOnlyTxCreation(){
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ txChainProxy.newWriteOnlyTransaction();
+
+ verify(mockActorContext, times(1)).acquireTxCreationPermit();
+ }
+
+
+ @Test
+ public void testRateLimitingNotUsedInReadOnlyTxCreation(){
+ TransactionChainProxy txChainProxy = new TransactionChainProxy(mockActorContext);
+
+ txChainProxy.newReadOnlyTransaction();
+
+ verify(mockActorContext, times(0)).acquireTxCreationPermit();
+ }
}
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
+import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
+import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
@Mock
private ClusterWrapper mockClusterWrapper;
- String memberName = "mock-member";
+ private final String memberName = "mock-member";
+
+ private final Builder dataStoreContextBuilder = DatastoreContext.newBuilder().operationTimeoutInSeconds(2).
+ shardBatchedModificationCount(1);
@BeforeClass
public static void setUpClass() throws IOException {
schemaContext = TestModel.createTestContext();
- DatastoreContext dataStoreContext = DatastoreContext.newBuilder().operationTimeoutInSeconds(2).build();
-
doReturn(getSystem()).when(mockActorContext).getActorSystem();
+ doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
doReturn(memberName).when(mockActorContext).getCurrentMemberName();
doReturn(schemaContext).when(mockActorContext).getSchemaContext();
doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
- doReturn(dataStoreContext).when(mockActorContext).getDatastoreContext();
+ doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
doReturn(10).when(mockActorContext).getTransactionOutstandingOperationLimit();
ShardStrategyFactory.setConfiguration(configuration);
}
private ReadData eqSerializedReadData() {
+ return eqSerializedReadData(TestModel.TEST_PATH);
+ }
+
+ private ReadData eqSerializedReadData(final YangInstanceIdentifier path) {
ArgumentMatcher<ReadData> matcher = new ArgumentMatcher<ReadData>() {
@Override
public boolean matches(Object argument) {
return ReadData.SERIALIZABLE_CLASS.equals(argument.getClass()) &&
- ReadData.fromSerializable(argument).getPath().equals(TestModel.TEST_PATH);
+ ReadData.fromSerializable(argument).getPath().equals(path);
}
};
return argThat(matcher);
}
- private WriteData eqSerializedWriteData(final NormalizedNode<?, ?> nodeToWrite) {
- return eqSerializedWriteData(nodeToWrite, DataStoreVersions.CURRENT_VERSION);
- }
-
- private WriteData eqSerializedWriteData(final NormalizedNode<?, ?> nodeToWrite,
- final int transactionVersion) {
+ private WriteData eqLegacyWriteData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
@Override
public boolean matches(Object argument) {
- if((transactionVersion >= DataStoreVersions.LITHIUM_VERSION &&
- WriteData.SERIALIZABLE_CLASS.equals(argument.getClass())) ||
- (transactionVersion < DataStoreVersions.LITHIUM_VERSION &&
- ShardTransactionMessages.WriteData.class.equals(argument.getClass()))) {
-
+ if(ShardTransactionMessages.WriteData.class.equals(argument.getClass())) {
WriteData obj = WriteData.fromSerializable(argument);
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
+ return obj.getPath().equals(TestModel.TEST_PATH) && obj.getData().equals(nodeToWrite);
}
return false;
return argThat(matcher);
}
- private WriteData eqWriteData(final NormalizedNode<?, ?> nodeToWrite) {
- ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
- @Override
- public boolean matches(Object argument) {
- if(argument instanceof WriteData) {
- WriteData obj = (WriteData) argument;
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
- }
- return false;
- }
- };
-
- return argThat(matcher);
- }
-
- private MergeData eqSerializedMergeData(final NormalizedNode<?, ?> nodeToWrite) {
- return eqSerializedMergeData(nodeToWrite, DataStoreVersions.CURRENT_VERSION);
- }
-
- private MergeData eqSerializedMergeData(final NormalizedNode<?, ?> nodeToWrite,
- final int transactionVersion) {
+ private MergeData eqLegacyMergeData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
@Override
public boolean matches(Object argument) {
- if((transactionVersion >= DataStoreVersions.LITHIUM_VERSION &&
- MergeData.SERIALIZABLE_CLASS.equals(argument.getClass())) ||
- (transactionVersion < DataStoreVersions.LITHIUM_VERSION &&
- ShardTransactionMessages.MergeData.class.equals(argument.getClass()))) {
-
+ if(ShardTransactionMessages.MergeData.class.equals(argument.getClass())) {
MergeData obj = MergeData.fromSerializable(argument);
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
+ return obj.getPath().equals(TestModel.TEST_PATH) && obj.getData().equals(nodeToWrite);
}
return false;
return argThat(matcher);
}
- private MergeData eqMergeData(final NormalizedNode<?, ?> nodeToWrite) {
- ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
- @Override
- public boolean matches(Object argument) {
- if(argument instanceof MergeData) {
- MergeData obj = ((MergeData) argument);
- return obj.getPath().equals(TestModel.TEST_PATH) &&
- obj.getData().equals(nodeToWrite);
- }
-
- return false;
- }
- };
-
- return argThat(matcher);
- }
-
- private DeleteData eqSerializedDeleteData() {
- ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
- @Override
- public boolean matches(Object argument) {
- return DeleteData.SERIALIZABLE_CLASS.equals(argument.getClass()) &&
- DeleteData.fromSerializable(argument).getPath().equals(TestModel.TEST_PATH);
- }
- };
-
- return argThat(matcher);
- }
-
- private DeleteData eqDeleteData() {
+ private DeleteData eqLegacyDeleteData(final YangInstanceIdentifier expPath) {
ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
@Override
public boolean matches(Object argument) {
- return argument instanceof DeleteData &&
- ((DeleteData)argument).getPath().equals(TestModel.TEST_PATH);
+ return ShardTransactionMessages.DeleteData.class.equals(argument.getClass()) &&
+ DeleteData.fromSerializable(argument).getPath().equals(expPath);
}
};
private Future<Object> readSerializedDataReply(NormalizedNode<?, ?> data,
short transactionVersion) {
- return Futures.successful(new ReadDataReply(data).toSerializable(transactionVersion));
+ return Futures.successful(new ReadDataReply(data, transactionVersion).toSerializable());
}
private Future<Object> readSerializedDataReply(NormalizedNode<?, ?> data) {
}
private Future<ReadDataReply> readDataReply(NormalizedNode<?, ?> data) {
- return Futures.successful(new ReadDataReply(data));
+ return Futures.successful(new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION));
}
private Future<Object> dataExistsSerializedReply(boolean exists) {
return Futures.successful(new DataExistsReply(exists));
}
- private Future<Object> writeSerializedDataReply(short version) {
- return Futures.successful(new WriteDataReply().toSerializable(version));
- }
-
- private Future<Object> writeSerializedDataReply() {
- return writeSerializedDataReply(DataStoreVersions.CURRENT_VERSION);
- }
-
- private Future<WriteDataReply> writeDataReply() {
- return Futures.successful(new WriteDataReply());
- }
-
- private Future<Object> mergeSerializedDataReply(short version) {
- return Futures.successful(new MergeDataReply().toSerializable(version));
- }
-
- private Future<Object> mergeSerializedDataReply() {
- return mergeSerializedDataReply(DataStoreVersions.CURRENT_VERSION);
+ private Future<BatchedModificationsReply> batchedModificationsReply(int count) {
+ return Futures.successful(new BatchedModificationsReply(count));
}
private Future<Object> incompleteFuture(){
return mock(Future.class);
}
- private Future<MergeDataReply> mergeDataReply() {
- return Futures.successful(new MergeDataReply());
+ private ActorSelection actorSelection(ActorRef actorRef) {
+ return getSystem().actorSelection(actorRef.path());
+ }
+
+ private void expectBatchedModifications(ActorRef actorRef, int count) {
+ doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
- private Future<Object> deleteSerializedDataReply(short version) {
- return Futures.successful(new DeleteDataReply().toSerializable(version));
+ private void expectBatchedModifications(int count) {
+ doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(BatchedModifications.class));
}
- private Future<Object> deleteSerializedDataReply() {
- return deleteSerializedDataReply(DataStoreVersions.CURRENT_VERSION);
+ private void expectIncompleteBatchedModifications() {
+ doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(BatchedModifications.class));
}
- private Future<DeleteDataReply> deleteDataReply() {
- return Futures.successful(new DeleteDataReply());
+ private void expectReadyTransaction(ActorRef actorRef) {
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
}
- private ActorSelection actorSelection(ActorRef actorRef) {
- return getSystem().actorSelection(actorRef.path());
+ private void expectFailedBatchedModifications(ActorRef actorRef) {
+ doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
}
private CreateTransactionReply createTransactionReply(ActorRef actorRef, int transactionVersion){
public void testRead() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
doReturn(Futures.successful(new Object())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
transactionProxy.read(TestModel.TEST_PATH).checkedGet(5, TimeUnit.SECONDS);
}
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
}
@Test(expected = TestException.class)
public void testReadWithPriorRecordingOperationFailure() throws Throwable {
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
+ when(mockActorContext).getDatastoreContext();
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ expectFailedBatchedModifications(actorRef);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(expectedNode));
+ expectBatchedModifications(actorRef, 1);
doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.write(TestModel.TEST_PATH, expectedNode);
TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
-
assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
+
+ InOrder inOrder = Mockito.inOrder(mockActorContext);
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
}
@Test(expected=IllegalStateException.class)
public void testReadPreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
-
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.read(TestModel.TEST_PATH);
}
public void testExists() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDataExists());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
}
@Test(expected = TestException.class)
public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
+ when(mockActorContext).getDatastoreContext();
+
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
-
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ expectFailedBatchedModifications(actorRef);
doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedDataExists());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
assertEquals("Exists response", true, exists);
+
+ InOrder inOrder = Mockito.inOrder(mockActorContext);
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
}
@Test(expected=IllegalStateException.class)
public void testExistsPreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
-
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.exists(TestModel.TEST_PATH);
}
// Expected
}
} else {
- assertEquals("Recording operation Future result type", expResultType,
+ assertEquals(String.format("Recording operation %d Future result type", i +1 ), expResultType,
Await.result(future, Duration.create(5, TimeUnit.SECONDS)).getClass());
}
}
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class);
+ BatchedModificationsReply.class);
}
@Test
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
final TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
throw caughtEx.get();
}
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class);
+ BatchedModificationsReply.class);
}
@Test(expected=IllegalStateException.class)
public void testWritePreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
-
- transactionProxy.write(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
}
@Test(expected=IllegalStateException.class)
public void testWriteAfterReadyPreConditionCheck() {
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.ready();
- transactionProxy.write(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
}
@Test
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- MergeDataReply.class);
+ BatchedModificationsReply.class);
}
@Test
public void testDelete() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
- doReturn(deleteSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.delete(TestModel.TEST_PATH);
- verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedDeleteData());
+ // This sends the batched modification.
+ transactionProxy.ready();
+
+ verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- DeleteDataReply.class);
+ BatchedModificationsReply.class);
}
private void verifyCohortFutures(ThreePhaseCommitCohortProxy proxy,
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
+ expectReadyTransaction(actorRef);
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
-
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class);
+ BatchedModificationsReply.class);
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+
+ verify(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)),
+ isA(BatchedModifications.class));
}
private ActorRef testCompatibilityWithHeliumVersion(short version) throws Exception {
doReturn(readSerializedDataReply(testNode, version)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- doReturn(writeSerializedDataReply(version)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(testNode, version));
+ doReturn(Futures.successful(new WriteDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyWriteData(testNode));
- doReturn(mergeSerializedDataReply(version)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(testNode, version));
+ doReturn(Futures.successful(new MergeDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyMergeData(testNode));
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ doReturn(Futures.successful(new DeleteDataReply().toSerializable(version))).when(mockActorContext).
+ executeOperationAsync(eq(actorSelection(actorRef)), eqLegacyDeleteData(TestModel.TEST_PATH));
+
+ expectReadyTransaction(actorRef);
doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
eq(actorRef.path().toString()));
transactionProxy.merge(TestModel.TEST_PATH, testNode);
+ transactionProxy.delete(TestModel.TEST_PATH);
+
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- ShardTransactionMessages.WriteDataReply.class, ShardTransactionMessages.MergeDataReply.class);
+ ShardTransactionMessages.WriteDataReply.class, ShardTransactionMessages.MergeDataReply.class,
+ ShardTransactionMessages.DeleteDataReply.class);
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ expectFailedBatchedModifications(actorRef);
- doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
-
- doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ expectReadyTransaction(actorRef);
doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
verifyCohortFutures(proxy, TestException.class);
- verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- MergeDataReply.class, TestException.class);
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(), TestException.class);
}
@Test
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- MergeDataReply.class);
+ BatchedModificationsReply.class);
verifyCohortFutures(proxy, TestException.class);
}
doReturn(Futures.failed(new PrimaryNotFoundException("mock"))).when(
mockActorContext).findPrimaryShardAsync(anyString());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(actorRef, 1);
doReturn(Futures.successful(new Object())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), eqSerializedReadData());
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_WRITE);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ .setTransactionId("txn-1").setTransactionActorPath(actorPath).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
}
@Test
- public void testLocalTxActorWrite() throws Exception {
+ public void testLocalTxActorReady() throws Exception {
ActorSystem actorSystem = getSystem();
ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
+ setTransactionId("txn-1").setTransactionActorPath(actorPath).
+ setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
- executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
eqCreateTransaction(memberName, WRITE_ONLY));
doReturn(true).when(mockActorContext).isPathLocal(actorPath);
- NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ doReturn(batchedModificationsReply(1)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(BatchedModifications.class));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
- transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
- verify(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
-
- //testing local merge
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToWrite));
-
- transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
- verify(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToWrite));
-
-
- //testing local delete
- doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
- transactionProxy.delete(TestModel.TEST_PATH);
-
- verify(mockActorContext).executeOperationAsync(any(ActorSelection.class), eqDeleteData());
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
- WriteDataReply.class, MergeDataReply.class, DeleteDataReply.class);
+ BatchedModificationsReply.class);
// testing ready
doReturn(readyTxReply(shardActorRef.path().toString())).when(mockActorContext).executeOperationAsync(
}
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
+ setTransactionId("txn-1").setTransactionActorPath(actorPath).
+ setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
- long start = System.currentTimeMillis();
+ long start = System.nanoTime();
operation.run(transactionProxy);
- long end = System.currentTimeMillis();
+ long end = System.nanoTime();
- Assert.assertTrue(String.format("took less time than expected %s was %s",
- mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()*1000,
- (end-start)), (end - start) > mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()*1000);
+ long expected = TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds());
+ Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
+ expected, (end-start)), (end - start) > expected);
}
}
String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
- CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
- .setTransactionId("txn-1")
- .setTransactionActorPath(actorPath)
- .build();
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder().
+ setTransactionId("txn-1").setTransactionActorPath(actorPath).
+ setMessageVersion(DataStoreVersions.CURRENT_VERSION).build();
doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
- long start = System.currentTimeMillis();
+ long start = System.nanoTime();
operation.run(transactionProxy);
- long end = System.currentTimeMillis();
+ long end = System.nanoTime();
- Assert.assertTrue(String.format("took more time than expected %s was %s",
- mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()*1000,
- (end-start)), (end - start) <= mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds()*1000);
+ long expected = TimeUnit.SECONDS.toNanos(mockActorContext.getDatastoreContext().getOperationTimeoutInSeconds());
+ Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
+ expected, (end-start)), (end - start) <= expected);
}
public void testWriteThrottling(boolean shardFound){
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectBatchedModifications(2);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectIncompleteBatchedModifications();
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
}
});
-
}
@Test
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectBatchedModifications(2);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqSerializedWriteData(nodeToWrite));
+ expectBatchedModifications(2);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
}
});
-
}
@Test
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToMerge));
+ expectIncompleteBatchedModifications();
transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToMerge));
+ expectBatchedModifications(2);
transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqMergeData(nodeToMerge));
+ expectBatchedModifications(2);
transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
throttleOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
+ expectIncompleteBatchedModifications();
transactionProxy.delete(TestModel.TEST_PATH);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
+ expectBatchedModifications(2);
transactionProxy.delete(TestModel.TEST_PATH);
completeOperation(new TransactionProxyOperation() {
@Override
public void run(TransactionProxy transactionProxy) {
- doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqDeleteData());
+ expectBatchedModifications(2);
transactionProxy.delete(TestModel.TEST_PATH);
public void run(TransactionProxy transactionProxy) {
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
+ expectBatchedModifications(1);
doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
any(ActorSelection.class), any(ReadyTransaction.class));
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
NormalizedNode<?, ?> carsNode = ImmutableNodes.containerNode(CarsModel.BASE_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(nodeToWrite));
-
- doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
- any(ActorSelection.class), eqWriteData(carsNode));
+ expectBatchedModifications(2);
doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
any(ActorSelection.class), any(ReadyTransaction.class));
}
}, 2, true);
}
+
+ @Test
+ public void testModificationOperationBatching() throws Throwable {
+ int shardBatchedModificationCount = 3;
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount).build()).
+ when(mockActorContext).getDatastoreContext();
+
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ expectBatchedModifications(actorRef, shardBatchedModificationCount);
+
+ expectReadyTransaction(actorRef);
+
+ YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ YangInstanceIdentifier writePath3 = TestModel.INNER_LIST_PATH;
+ NormalizedNode<?, ?> writeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
+
+ YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier mergePath2 = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ YangInstanceIdentifier mergePath3 = TestModel.INNER_LIST_PATH;
+ NormalizedNode<?, ?> mergeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
+
+ YangInstanceIdentifier deletePath1 = TestModel.TEST_PATH;
+ YangInstanceIdentifier deletePath2 = TestModel.OUTER_LIST_PATH;
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.write(writePath1, writeNode1);
+ transactionProxy.write(writePath2, writeNode2);
+ transactionProxy.delete(deletePath1);
+ transactionProxy.merge(mergePath1, mergeNode1);
+ transactionProxy.merge(mergePath2, mergeNode2);
+ transactionProxy.write(writePath3, writeNode3);
+ transactionProxy.merge(mergePath3, mergeNode3);
+ transactionProxy.delete(deletePath2);
+
+ // This sends the last batch.
+ transactionProxy.ready();
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), new WriteModification(writePath1, writeNode1),
+ new WriteModification(writePath2, writeNode2), new DeleteModification(deletePath1));
+
+ verifyBatchedModifications(batchedModifications.get(1), new MergeModification(mergePath1, mergeNode1),
+ new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
+
+ verifyBatchedModifications(batchedModifications.get(2), new MergeModification(mergePath3, mergeNode3),
+ new DeleteModification(deletePath2));
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ }
+
+ @Test
+ public void testModificationOperationBatchingWithInterleavedReads() throws Throwable {
+ int shardBatchedModificationCount = 10;
+ doReturn(dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount).build()).
+ when(mockActorContext).getDatastoreContext();
+
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
+
+ expectBatchedModifications(actorRef, shardBatchedModificationCount);
+
+ YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
+
+ YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ YangInstanceIdentifier mergePath2 = TestModel.INNER_LIST_PATH;
+ NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
+
+ YangInstanceIdentifier deletePath = TestModel.OUTER_LIST_PATH;
+
+ doReturn(readSerializedDataReply(writeNode2)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(writePath2));
+
+ doReturn(readSerializedDataReply(mergeNode2)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(mergePath2));
+
+ doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
+
+ transactionProxy.write(writePath1, writeNode1);
+ transactionProxy.write(writePath2, writeNode2);
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(writePath2).
+ get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+ assertEquals("Response NormalizedNode", writeNode2, readOptional.get());
+
+ transactionProxy.merge(mergePath1, mergeNode1);
+ transactionProxy.merge(mergePath2, mergeNode2);
+
+ readOptional = transactionProxy.read(mergePath2).get(5, TimeUnit.SECONDS);
+
+ transactionProxy.delete(deletePath);
+
+ Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
+ assertEquals("Exists response", true, exists);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+ assertEquals("Response NormalizedNode", mergeNode2, readOptional.get());
+
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), new WriteModification(writePath1, writeNode1),
+ new WriteModification(writePath2, writeNode2));
+
+ verifyBatchedModifications(batchedModifications.get(1), new MergeModification(mergePath1, mergeNode1),
+ new MergeModification(mergePath2, mergeNode2));
+
+ verifyBatchedModifications(batchedModifications.get(2), new DeleteModification(deletePath));
+
+ InOrder inOrder = Mockito.inOrder(mockActorContext);
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(writePath2));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData(mergePath2));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(BatchedModifications.class));
+
+ inOrder.verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ BatchedModificationsReply.class, BatchedModificationsReply.class, BatchedModificationsReply.class);
+ }
+
+ private List<BatchedModifications> captureBatchedModifications(ActorRef actorRef) {
+ ArgumentCaptor<BatchedModifications> batchedModificationsCaptor =
+ ArgumentCaptor.forClass(BatchedModifications.class);
+ verify(mockActorContext, Mockito.atLeastOnce()).executeOperationAsync(
+ eq(actorSelection(actorRef)), batchedModificationsCaptor.capture());
+
+ List<BatchedModifications> batchedModifications = filterCaptured(
+ batchedModificationsCaptor, BatchedModifications.class);
+ return batchedModifications;
+ }
+
+ private <T> List<T> filterCaptured(ArgumentCaptor<T> captor, Class<T> type) {
+ List<T> captured = new ArrayList<>();
+ for(T c: captor.getAllValues()) {
+ if(type.isInstance(c)) {
+ captured.add(c);
+ }
+ }
+
+ return captured;
+ }
+
+ private void verifyOneBatchedModification(ActorRef actorRef, Modification expected) {
+ List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
+ assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
+
+ verifyBatchedModifications(batchedModifications.get(0), expected);
+ }
+
+ private void verifyBatchedModifications(Object message, Modification... expected) {
+ assertEquals("Message type", BatchedModifications.class, message.getClass());
+ BatchedModifications batchedModifications = (BatchedModifications)message;
+ assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
+ for(int i = 0; i < batchedModifications.getModifications().size(); i++) {
+ Modification actual = batchedModifications.getModifications().get(i);
+ assertEquals("Modification type", expected[i].getClass(), actual.getClass());
+ assertEquals("getPath", ((AbstractModification)expected[i]).getPath(),
+ ((AbstractModification)actual).getPath());
+ if(actual instanceof WriteModification) {
+ assertEquals("getData", ((WriteModification)expected[i]).getData(),
+ ((WriteModification)actual).getData());
+ }
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static org.junit.Assert.assertEquals;
+import java.io.Serializable;
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+
+/**
+ * Unit tests for BatchedModifications.
+ *
+ * @author Thomas Pantelis
+ */
+public class BatchedModificationsTest {
+
+ @Test
+ public void testSerialization() {
+ YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
+ withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+
+ YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
+ NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+
+ YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
+
+ BatchedModifications batched = new BatchedModifications(DataStoreVersions.CURRENT_VERSION);
+ batched.addModification(new WriteModification(writePath, writeData));
+ batched.addModification(new MergeModification(mergePath, mergeData));
+ batched.addModification(new DeleteModification(deletePath));
+
+ BatchedModifications clone = (BatchedModifications) SerializationUtils.clone(
+ (Serializable) batched.toSerializable());
+
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
+
+ assertEquals("getModifications size", 3, clone.getModifications().size());
+
+ WriteModification write = (WriteModification)clone.getModifications().get(0);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, write.getVersion());
+ assertEquals("getPath", writePath, write.getPath());
+ assertEquals("getData", writeData, write.getData());
+
+ MergeModification merge = (MergeModification)clone.getModifications().get(1);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, merge.getVersion());
+ assertEquals("getPath", mergePath, merge.getPath());
+ assertEquals("getData", mergeData, merge.getData());
+
+ DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, delete.getVersion());
+ assertEquals("getPath", deletePath, delete.getPath());
+ }
+
+ @Test
+ public void testBatchedModificationsReplySerialization() {
+ BatchedModificationsReply clone = (BatchedModificationsReply) SerializationUtils.clone(
+ (Serializable) new BatchedModificationsReply(100).toSerializable());
+ assertEquals("getNumBatched", 100, clone.getNumBatched());
+ }
+}
*
* @author Thomas Pantelis
*/
+@Deprecated
public class DeleteDataTest {
@Test
public void testSerialization() {
YangInstanceIdentifier path = TestModel.TEST_PATH;
- DeleteData expected = new DeleteData(path);
+ DeleteData expected = new DeleteData(path, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", DeleteData.class, serialized.getClass());
assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((DeleteData)serialized).getVersion());
Object clone = SerializationUtils.clone((Serializable) serialized);
- assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((DeleteData)clone).getVersion());
DeleteData actual = DeleteData.fromSerializable(clone);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getPath", expected.getPath(), actual.getPath());
}
public void testSerializationWithHeliumR1Version() throws Exception {
YangInstanceIdentifier path = TestModel.TEST_PATH;
- DeleteData expected = new DeleteData(path);
+ DeleteData expected = new DeleteData(path, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.DeleteData.class, serialized.getClass());
DeleteData actual = DeleteData.fromSerializable(SerializationUtils.clone((Serializable) serialized));
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+@Deprecated
public class MergeDataTest {
@Test
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- MergeData expected = new MergeData(path, data);
+ MergeData expected = new MergeData(path, data, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", MergeData.class, serialized.getClass());
assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((MergeData)serialized).getVersion());
Object clone = SerializationUtils.clone((Serializable) serialized);
- assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((MergeData)clone).getVersion());
MergeData actual = MergeData.fromSerializable(clone);
+ assertEquals("Version", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getPath", expected.getPath(), actual.getPath());
assertEquals("getData", expected.getData(), actual.getData());
}
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- MergeData expected = new MergeData(path, data);
+ MergeData expected = new MergeData(path, data, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.MergeData.class, serialized.getClass());
MergeData actual = MergeData.fromSerializable(SerializationUtils.clone((Serializable) serialized));
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- ReadDataReply expected = new ReadDataReply(data);
+ ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ReadDataReply.class, serialized.getClass());
ReadDataReply actual = ReadDataReply.fromSerializable(SerializationUtils.clone(
(Serializable) serialized));
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getNormalizedNode", expected.getNormalizedNode(), actual.getNormalizedNode());
}
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- ReadDataReply expected = new ReadDataReply(data);
+ ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.ReadDataReply.class, serialized.getClass());
ReadDataReply actual = ReadDataReply.fromSerializable(SerializationUtils.clone(
*
* @author Thomas Pantelis
*/
+@Deprecated
public class WriteDataTest {
@Test
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- WriteData expected = new WriteData(path, data);
+ WriteData expected = new WriteData(path, data, DataStoreVersions.CURRENT_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.CURRENT_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", WriteData.class, serialized.getClass());
assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((WriteData)serialized).getVersion());
Object clone = SerializationUtils.clone((Serializable) serialized);
- assertEquals("Version", DataStoreVersions.CURRENT_VERSION, ((WriteData)clone).getVersion());
WriteData actual = WriteData.fromSerializable(clone);
+ assertEquals("Version", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
assertEquals("getPath", expected.getPath(), actual.getPath());
assertEquals("getData", expected.getData(), actual.getData());
}
new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).
withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
- WriteData expected = new WriteData(path, data);
+ WriteData expected = new WriteData(path, data, DataStoreVersions.HELIUM_1_VERSION);
- Object serialized = expected.toSerializable(DataStoreVersions.HELIUM_1_VERSION);
+ Object serialized = expected.toSerializable();
assertEquals("Serialized type", ShardTransactionMessages.WriteData.class, serialized.getClass());
WriteData actual = WriteData.fromSerializable(SerializationUtils.clone((Serializable) serialized));
import org.apache.commons.lang.SerializationUtils;
import org.junit.Ignore;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.clone(compositeModification);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
+
assertEquals("getModifications size", 3, clone.getModifications().size());
WriteModification write = (WriteModification)clone.getModifications().get(0);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, write.getVersion());
assertEquals("getPath", writePath, write.getPath());
assertEquals("getData", writeData, write.getData());
MergeModification merge = (MergeModification)clone.getModifications().get(1);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, merge.getVersion());
assertEquals("getPath", mergePath, merge.getPath());
assertEquals("getData", mergeData, merge.getData());
DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
+ assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, delete.getVersion());
assertEquals("getPath", deletePath, delete.getPath());
}
compositeModification.addModification(new WriteModification(writePath, writeData));
}
- Stopwatch sw = new Stopwatch();
- sw.start();
+ Stopwatch sw = Stopwatch.createStarted();
for(int i = 0; i < 1000; i++) {
new ModificationPayload(compositeModification);
}
package org.opendaylight.controller.cluster.datastore.utils;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
import akka.actor.Address;
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
import com.google.common.base.Optional;
+import com.typesafe.config.ConfigFactory;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang.time.StopWatch;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.Configuration;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
assertEquals(expected, actual);
}
+ @Test
+ public void testRateLimiting(){
+ DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
+
+ doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
+ doReturn("config").when(mockDataStoreContext).getDataStoreType();
+
+ ActorContext actorContext =
+ new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class), mockDataStoreContext);
+
+ // Check that the initial value is being picked up from DataStoreContext
+ assertEquals(mockDataStoreContext.getTransactionCreationInitialRateLimit(), actorContext.getTxCreationLimit(), 1e-15);
+
+ actorContext.setTxCreationLimit(1.0);
+
+ assertEquals(1.0, actorContext.getTxCreationLimit(), 1e-15);
+
+
+ StopWatch watch = new StopWatch();
+
+ watch.start();
+
+ actorContext.acquireTxCreationPermit();
+ actorContext.acquireTxCreationPermit();
+ actorContext.acquireTxCreationPermit();
+
+ watch.stop();
+
+ assertTrue("did not take as much time as expected", watch.getTime() > 1000);
+ }
+
+ @Test
+ public void testClientDispatcherIsGlobalDispatcher(){
+
+ DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
+
+ doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
+ doReturn("config").when(mockDataStoreContext).getDataStoreType();
+
+ ActorContext actorContext =
+ new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class), mockDataStoreContext);
+
+ assertEquals(getSystem().dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
+
+ }
+
+ @Test
+ public void testClientDispatcherIsNotGlobalDispatcher(){
+
+ DatastoreContext mockDataStoreContext = mock(DatastoreContext.class);
+
+ doReturn(155L).when(mockDataStoreContext).getTransactionCreationInitialRateLimit();
+ doReturn("config").when(mockDataStoreContext).getDataStoreType();
+
+ ActorSystem actorSystem = ActorSystem.create("with-custom-dispatchers", ConfigFactory.load("application-with-custom-dispatchers.conf"));
+
+ ActorContext actorContext =
+ new ActorContext(actorSystem, mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class), mockDataStoreContext);
+
+ assertNotEquals(actorSystem.dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
+
+ actorSystem.shutdown();
+
+ }
+
}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import akka.dispatch.MessageDispatcher;
+import org.junit.Test;
+
+public class DispatchersTest {
+
+ @Test
+ public void testGetDefaultDispatcherPath(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ doReturn(false).when(mockDispatchers).hasDispatcher(anyString());
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ for(Dispatchers.DispatcherType type : Dispatchers.DispatcherType.values()) {
+ assertEquals(Dispatchers.DEFAULT_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(type));
+ }
+
+ }
+
+ @Test
+ public void testGetDefaultDispatcher(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ MessageDispatcher mockGlobalDispatcher = mock(MessageDispatcher.class);
+ doReturn(false).when(mockDispatchers).hasDispatcher(anyString());
+ doReturn(mockGlobalDispatcher).when(mockDispatchers).defaultGlobalDispatcher();
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ for(Dispatchers.DispatcherType type : Dispatchers.DispatcherType.values()) {
+ assertEquals(mockGlobalDispatcher,
+ dispatchers.getDispatcher(type));
+ }
+
+ }
+
+ @Test
+ public void testGetDispatcherPath(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ doReturn(true).when(mockDispatchers).hasDispatcher(anyString());
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ assertEquals(Dispatchers.CLIENT_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Client));
+
+ assertEquals(Dispatchers.TXN_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction));
+
+ assertEquals(Dispatchers.SHARD_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Shard));
+
+ assertEquals(Dispatchers.NOTIFICATION_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification));
+
+ }
+
+ @Test
+ public void testGetDispatcher(){
+ akka.dispatch.Dispatchers mockDispatchers = mock(akka.dispatch.Dispatchers.class);
+ MessageDispatcher mockDispatcher = mock(MessageDispatcher.class);
+ doReturn(true).when(mockDispatchers).hasDispatcher(anyString());
+ doReturn(mockDispatcher).when(mockDispatchers).lookup(anyString());
+ Dispatchers dispatchers = new Dispatchers(mockDispatchers);
+
+ assertEquals(Dispatchers.CLIENT_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Client));
+
+ assertEquals(Dispatchers.TXN_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction));
+
+ assertEquals(Dispatchers.SHARD_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Shard));
+
+ assertEquals(Dispatchers.NOTIFICATION_DISPATCHER_PATH,
+ dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification));
+
+ }
+}
\ No newline at end of file
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MessageTrackerTest {
+
+ private final Logger LOG = LoggerFactory.getLogger(getClass());
+
+ private class Foo {}
+
+ @Test
+ public void testNoTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+ context2.done();
+
+ }
+
+ @Test
+ public void testFailedExpectationOnTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+ Assert.assertEquals(true, context2.error().isPresent());
+ Assert.assertEquals(0, context2.error().get().getMessageProcessingTimesSinceLastExpectedMessage().size());
+
+ }
+
+ @Test
+ public void testFailedExpectationOnTrackingWithMessagesInBetween(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ messageTracker.received("A").done();
+ messageTracker.received(Long.valueOf(10)).done();
+ MessageTracker.Context c = messageTracker.received(Integer.valueOf(100));
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ c.done();
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+
+ Assert.assertEquals(true, context2.error().isPresent());
+
+ MessageTracker.Error error = context2.error().get();
+
+ List<MessageTracker.MessageProcessingTime> messageProcessingTimes =
+ error.getMessageProcessingTimesSinceLastExpectedMessage();
+
+ Assert.assertEquals(3, messageProcessingTimes.size());
+
+ Assert.assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
+ Assert.assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
+ Assert.assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
+ Assert.assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > TimeUnit.MILLISECONDS.toNanos(10));
+ Assert.assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
+ Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+
+ LOG.error("An error occurred : {}" , error);
+
+ }
+
+
+ @Test
+ public void testMetExpectationOnTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(new Foo());
+ context1.done();
+
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context2 = messageTracker.received(new Foo());
+ Assert.assertEquals(false, context2.error().isPresent());
+
+ }
+
+ @Test
+ public void testIllegalStateExceptionWhenDoneIsNotCalledWhileTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ messageTracker.received(new Foo());
+
+ try {
+ messageTracker.received(new Foo());
+ fail("Expected an IllegalStateException");
+ } catch (IllegalStateException e){
+
+ }
+ }
+
+ @Test
+ public void testNoIllegalStateExceptionWhenDoneIsNotCalledWhileNotTracking(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+
+ messageTracker.received(new Foo());
+ messageTracker.received(new Foo());
+ }
+
+ @Test
+ public void testDelayInFirstExpectedMessageArrival(){
+
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ MessageTracker.Context context = messageTracker.received(new Foo());
+
+ Assert.assertEquals(true, context.error().isPresent());
+
+ MessageTracker.Error error = context.error().get();
+
+ Assert.assertEquals(null, error.getLastExpectedMessage());
+ Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+
+ String errorString = error.toString();
+ Assert.assertTrue(errorString.contains("Last Expected Message = null"));
+
+ LOG.error("An error occurred : {}", error);
+ }
+
+ @Test
+ public void testCallingBeginDoesNotResetWatch(){
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
+
+ messageTracker.begin();
+
+ MessageTracker.Context context = messageTracker.received(new Foo());
+
+ Assert.assertEquals(true, context.error().isPresent());
+
+ }
+
+ @Test
+ public void testMessagesSinceLastExpectedMessage(){
+
+ MessageTracker messageTracker = new MessageTracker(Foo.class, 10);
+ messageTracker.begin();
+
+ MessageTracker.Context context1 = messageTracker.received(Integer.valueOf(45)).done();
+
+ Assert.assertEquals(false, context1.error().isPresent());
+
+ MessageTracker.Context context2 = messageTracker.received(Long.valueOf(45)).done();
+
+ Assert.assertEquals(false, context2.error().isPresent());
+
+ List<MessageTracker.MessageProcessingTime> processingTimeList =
+ messageTracker.getMessagesSinceLastExpectedMessage();
+
+ Assert.assertEquals(2, processingTimeList.size());
+
+ assertEquals(Integer.class, processingTimeList.get(0).getMessageClass());
+ assertEquals(Long.class, processingTimeList.get(1).getMessageClass());
+
+ }
+
+}
\ No newline at end of file
private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
- public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).build();
+ public static final YangInstanceIdentifier INNER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).
+ node(OUTER_LIST_QNAME).node(INNER_LIST_QNAME).build();
public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
}
});
- return new AppendEntries(1, "member-1", 0, 100, modification, 1);
+ return new AppendEntries(1, "member-1", 0, 100, modification, 1, -1);
}
public static AppendEntries keyValueAppendEntries() {
}
});
- return new AppendEntries(1, "member-1", 0, 100, modification, 1);
+ return new AppendEntries(1, "member-1", 0, 100, modification, 1, -1);
}
}
--- /dev/null
+akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+ persistence.journal.plugin = "in-memory-journal"
+
+ loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
+
+ actor {
+ serializers {
+ java = "akka.serialization.JavaSerializer"
+ proto = "akka.remote.serialization.ProtobufSerializer"
+ }
+
+ serialization-bindings {
+ "org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification" = java
+ "com.google.protobuf.Message" = proto
+
+ }
+ }
+}
+
+in-memory-journal {
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal"
+}
+
+in-memory-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
+bounded-mailbox {
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+ mailbox-capacity = 1000
+ mailbox-push-timeout-time = 100ms
+}
+
+client-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
+
+transaction-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
+
+shard-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
+
+notification-dispatcher {
+ # Dispatcher is the name of the event-based dispatcher
+ type = Dispatcher
+ # What kind of ExecutionService to use
+ executor = "fork-join-executor"
+ # Configuration for the fork join pool
+ fork-join-executor {
+ # Min number of threads to cap factor-based parallelism number to
+ parallelism-min = 2
+ # Parallelism (threads) ... ceil(available processors * factor)
+ parallelism-factor = 2.0
+ # Max number of threads to cap factor-based parallelism number to
+ parallelism-max = 10
+ }
+ # Throughput defines the maximum number of messages to be
+ # processed per actor before the thread jumps to the next actor.
+ # Set to 1 for as fair as possible.
+ throughput = 100
+}
\ No newline at end of file
--- /dev/null
+org.slf4j.simpleLogger.showDateTime=true
+org.slf4j.simpleLogger.dateTimeFormat=hh:mm:ss,S a
+org.slf4j.simpleLogger.logFile=System.out
+org.slf4j.simpleLogger.showShortLogName=true
+org.slf4j.simpleLogger.levelInBrackets=true
+org.slf4j.simpleLogger.log.org.opendaylight.controller.cluster.datastore=trace
\ No newline at end of file
<groupId>org.osgi</groupId>
<artifactId>org.osgi.core</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-test-model</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
<plugins>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import java.util.EventListener;
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Interface implemented by classes interested in receiving notifications about
+ * data tree changes. This interface differs from {@link DOMDataChangeListener}
+ * in that it provides a cursor-based view of the change, which has potentially
+ * lower overhead.
+ */
+public interface DOMDataTreeChangeListener extends EventListener {
+ /**
+ * Invoked when there was data change for the supplied path, which was used
+ * to register this listener.
+ *
+ * <p>
+ * This method may be also invoked during registration of the listener if
+ * there is any pre-existing data in the conceptual data tree for supplied
+ * path. This initial event will contain all pre-existing data as created.
+ *
+ * <p>
+ * A data change event may be triggered spuriously, e.g. such that data before
+ * and after compare as equal. Implementations of this interface are expected
+ * to recover from such events. Event producers are expected to exert reasonable
+ * effort to suppress such events.
+ *
+ * In other words, it is completely acceptable to observe
+ * a {@link org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode},
+ * which reports a {@link org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType}
+ * other than UNMODIFIED, while the before- and after- data items compare as
+ * equal.
+ *
+ * @param changes Collection of change events, may not be null or empty.
+ */
+ void onDataTreeChanged(@Nonnull Collection<DataTreeCandidate> changes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A {@link DOMService} which allows users to register for changes to a
+ * subtree.
+ */
+public interface DOMDataTreeChangeService extends DOMService {
+ /**
+ * Registers a {@link DOMDataTreeChangeListener} to receive
+ * notifications when data changes under a given path in the conceptual data
+ * tree.
+ * <p>
+ * You are able to register for notifications for any node or subtree
+ * which can be represented using {@link DOMDataTreeIdentifier}.
+ * <p>
+ *
+ * You are able to register for data change notifications for a subtree or leaf
+ * even if it does not exist. You will receive notification once that node is
+ * created.
+ * <p>
+ * If there is any pre-existing data in the data tree for the path for which you are
+ * registering, you will receive an initial data change event, which will
+ * contain all pre-existing data, marked as created.
+ *
+ * <p>
+ * This method returns a {@link ListenerRegistration} object. To
+ * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
+ * method on the returned object.
+ * <p>
+ * You MUST explicitly unregister your listener when you no longer want to receive
+ * notifications. This is especially true in OSGi environments, where failure to
+ * do so during bundle shutdown can lead to stale listeners being still registered.
+ *
+ * @param treeId
+ * Data tree identifier of the subtree which should be watched for
+ * changes.
+ * @param listener
+ * Listener instance which is being registered
+ * @return Listener registration object, which may be used to unregister
+ * your listener using {@link ListenerRegistration#close()} to stop
+ * delivery of change events.
+ */
+ @Nonnull <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DOMDataTreeIdentifier treeId, @Nonnull L listener);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+import java.util.Iterator;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+
+/**
+ * A unique identifier for a particular subtree. It is composed of the logical
+ * data store type and the instance identifier of the root node.
+ */
+public final class DOMDataTreeIdentifier implements Immutable, Path<DOMDataTreeIdentifier>, Serializable, Comparable<DOMDataTreeIdentifier> {
+ private static final long serialVersionUID = 1L;
+ private final YangInstanceIdentifier rootIdentifier;
+ private final LogicalDatastoreType datastoreType;
+
+ public DOMDataTreeIdentifier(final LogicalDatastoreType datastoreType, final YangInstanceIdentifier rootIdentifier) {
+ this.datastoreType = Preconditions.checkNotNull(datastoreType);
+ this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
+ }
+
+ /**
+ * Return the logical data store type.
+ *
+ * @return Logical data store type. Guaranteed to be non-null.
+ */
+ public @Nonnull LogicalDatastoreType getDatastoreType() {
+ return datastoreType;
+ }
+
+ /**
+ * Return the {@link YangInstanceIdentifier} of the root node.
+ *
+ * @return Instance identifier corresponding to the root node.
+ */
+ public @Nonnull YangInstanceIdentifier getRootIdentifier() {
+ return rootIdentifier;
+ }
+
+ @Override
+ public boolean contains(final DOMDataTreeIdentifier other) {
+ return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + datastoreType.hashCode();
+ result = prime * result + rootIdentifier.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DOMDataTreeIdentifier)) {
+ return false;
+ }
+ DOMDataTreeIdentifier other = (DOMDataTreeIdentifier) obj;
+ if (datastoreType != other.datastoreType) {
+ return false;
+ }
+ return rootIdentifier.equals(other.rootIdentifier);
+ }
+
+ @Override
+ public int compareTo(final DOMDataTreeIdentifier o) {
+ int i = datastoreType.compareTo(o.datastoreType);
+ if (i != 0) {
+ return i;
+ }
+
+ final Iterator<PathArgument> mi = rootIdentifier.getPathArguments().iterator();
+ final Iterator<PathArgument> oi = o.rootIdentifier.getPathArguments().iterator();
+
+ while (mi.hasNext()) {
+ if (!oi.hasNext()) {
+ return 1;
+ }
+
+ final PathArgument ma = mi.next();
+ final PathArgument oa = oi.next();
+ i = ma.compareTo(oa);
+ if (i != 0) {
+ return i;
+ }
+ }
+
+ return oi.hasNext() ? -1 : 0;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.base.Preconditions;
+
+
+/**
+ * Failure reported when a data tree is no longer accessible.
+ */
+public class DOMDataTreeInaccessibleException extends DOMDataTreeListeningException {
+ private static final long serialVersionUID = 1L;
+ private final DOMDataTreeIdentifier treeIdentifier;
+
+ public DOMDataTreeInaccessibleException(final DOMDataTreeIdentifier treeIdentifier, final String message) {
+ super(message);
+ this.treeIdentifier = Preconditions.checkNotNull(treeIdentifier);
+ }
+
+ public DOMDataTreeInaccessibleException(final DOMDataTreeIdentifier treeIdentifier, final String message, final Throwable cause) {
+ super(message);
+ this.treeIdentifier = Preconditions.checkNotNull(treeIdentifier);
+ }
+
+ public final DOMDataTreeIdentifier getTreeIdentifier() {
+ return treeIdentifier;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import java.util.EventListener;
+import java.util.Map;
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Interface implemented by data consumers, e.g. processes wanting to act on data
+ * after it has been introduced to the conceptual data tree.
+ */
+public interface DOMDataTreeListener extends EventListener {
+ /**
+ * Invoked whenever one or more registered subtrees change. The logical changes are reported,
+ * as well as the roll up of new state for all subscribed subtrees.
+ *
+ * @param changes The set of changes being reported. Each subscribed subtree may be present
+ * at most once.
+ * @param subtrees Per-subtree state as visible after the reported changes have been applied.
+ * This includes all the subtrees this listener is subscribed to, even those
+ * which have not changed.
+ */
+ void onDataTreeChanged(@Nonnull Collection<DataTreeCandidate> changes, @Nonnull Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>> subtrees);
+
+ /**
+ * Invoked when a subtree listening failure occurs. This can be triggered, for example, when
+ * a connection to external subtree source is broken. The listener will not receive any other
+ * callbacks, but its registration still needs to be closed to prevent resource leak.
+ *
+ * @param cause Collection of failure causes, may not be null or empty.
+ */
+ void onDataTreeFailed(@Nonnull Collection<DOMDataTreeListeningException> causes);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+/**
+ * Base exception for various causes why and {@link DOMDataTreeListener}
+ * may be terminated by the {@link DOMDataTreeService} implementation.
+ */
+public class DOMDataTreeListeningException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public DOMDataTreeListeningException(final String message) {
+ super(message);
+ }
+
+ public DOMDataTreeListeningException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import javax.annotation.Nonnull;
+
+/**
+ * Exception thrown when a loop is detected in the way {@link DOMDataTreeListener}
+ * and {@link DOMDataTreeProducer} instances would be connected.
+ */
+public class DOMDataTreeLoopException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public DOMDataTreeLoopException(final @Nonnull String message) {
+ super(message);
+ }
+
+ public DOMDataTreeLoopException(final @Nonnull String message, final @Nonnull Throwable cause) {
+ super(message, cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import javax.annotation.Nonnull;
+
+/**
+ * A data producer context. It allows transactions to be submitted to the subtrees
+ * specified at instantiation time. At any given time there may be a single transaction
+ * open. It needs to be either submitted or cancelled before another one can be open.
+ * Once a transaction is submitted, it will proceed to be committed asynchronously.
+ *
+ * Each instance has an upper bound on the number of transactions which can be in-flight,
+ * once that capacity is exceeded, an attempt to create a new transaction will block
+ * until some transactions complete.
+ *
+ * Each {@link DOMDataTreeProducer} can be in two logical states, bound and unbound,
+ * which define the lifecycle rules for when is it legal to create and submit transactions
+ * in relationship with {@link DOMDataTreeListener} callbacks.
+ *
+ * When a producer is first created, it is unbound. In this state the producer can be
+ * accessed by any application thread to allocate or submit transactions, as long as
+ * the 'single open transaction' rule is maintained. The producer and any transaction
+ * object MUST NOT be accessed, directly or indirectly, from a {@link DOMDataTreeListener}
+ * callback.
+ *
+ * When a producer is referenced in a call to {@link DOMDataTreeService#registerListener(DOMDataTreeListener, java.util.Collection, boolean, java.util.Collection)},
+ * an attempt will be made to bind the producer to the specified {@link DOMDataTreeListener}.
+ * Such an attempt will fail the producer is already bound, or it has an open transaction.
+ * Once bound, the producer can only be accessed from within the {@link DOMDataTreeListener}
+ * callback on that particular instance. Any transaction which is not submitted by the
+ * time the callback returns will be implicitly cancelled. A producer becomes unbound
+ * when the listener it is bound to becomes unregistered.
+ */
+public interface DOMDataTreeProducer extends DOMDataTreeProducerFactory, AutoCloseable {
+ /**
+ * Allocate a new open transaction on this producer. Any and all transactions
+ * previously allocated must have been either submitted or cancelled by the
+ * time this method is invoked.
+ *
+ * @param barrier Indicates whether this transaction should be a barrier. A barrier
+ * transaction is processed separately from any preceding transactions.
+ * Non-barrier transactions may be merged and processed in a batch,
+ * such that any observers see the modifications contained in them as
+ * if the modifications were made in a single transaction.
+ * @return A new {@link DOMDataWriteTransaction}
+ * @throws {@link IllegalStateException} if a previous transaction was not closed.
+ * @throws {@link IllegalThreadStateException} if the calling thread context does not
+ * match the lifecycle rules enforced by the producer state (e.g. bound or unbound).
+ * This exception is thrown on a best effort basis and programs should not rely
+ * on it for correct operation.
+ */
+ @Nonnull DOMDataWriteTransaction createTransaction(boolean isolated);
+
+ /**
+ * {@inheritDoc}
+ *
+ * When invoked on a {@link DOMDataTreeProducer}, this method has additional restrictions.
+ * There may not be an open transaction from this producer. The method needs to be
+ * invoked in appropriate context, e.g. bound or unbound.
+ *
+ * Specified subtrees must be accessible by this producer. Accessible means they are a subset
+ * of the subtrees specified when the producer is instantiated. The set is further reduced as
+ * child producers are instantiated -- if you create a producer for /a and then a child for
+ * /a/b, /a/b is not accessible from the first producer.
+ *
+ * Once this method returns successfully, this (parent) producer loses the ability to
+ * access the specified paths until the resulting (child) producer is shut down.
+ *
+ * @throws {@link IllegalStateException} if there is an open transaction
+ * @throws {@link IllegalArgumentException} if subtrees contains a subtree which is not
+ * accessible by this producer
+ * @throws {@link IllegalThreadStateException} if the calling thread context does not
+ * match the lifecycle rules enforced by the producer state (e.g. bound or unbound).
+ * This exception is thrown on a best effort basis and programs should not rely
+ * on it for correct operation.
+ */
+ @Override
+ @Nonnull DOMDataTreeProducer createProducer(@Nonnull Collection<DOMDataTreeIdentifier> subtrees);
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws DOMDataTreeProducerBusyException when there is an open transaction.
+ */
+ @Override
+ void close() throws DOMDataTreeProducerException;
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+/**
+ * Exception indicating that the {@link DOMDataTreeProducer} has an open user
+ * transaction and cannot be closed.
+ */
+public class DOMDataTreeProducerBusyException extends DOMDataTreeProducerException {
+ private static final long serialVersionUID = 1L;
+
+ public DOMDataTreeProducerBusyException(final String message) {
+ super(message);
+ }
+
+ public DOMDataTreeProducerBusyException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+/**
+ * Base exception for all exceptions related to {@link DOMDataTreeProducer}s.
+ */
+public class DOMDataTreeProducerException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public DOMDataTreeProducerException(final String message) {
+ super(message);
+ }
+
+ public DOMDataTreeProducerException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import javax.annotation.Nonnull;
+
+/**
+ * Base source of {@link DOMDataTreeProducer}s. This interface is usually not used directly,
+ * but rather through one of its sub-interfaces.
+ */
+public interface DOMDataTreeProducerFactory {
+ /**
+ * Create a producer, which is able to access to a set of trees.
+ *
+ * @param subtrees The collection of subtrees the resulting producer should have access to.
+ * @return A {@link DOMDataTreeProducer} instance.
+ * @throws {@link IllegalArgumentException} if subtrees is empty.
+ */
+ @Nonnull DOMDataTreeProducer createProducer(@Nonnull Collection<DOMDataTreeIdentifier> subtrees);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A {@link DOMService} providing access to the conceptual data tree. Interactions
+ * with the data tree are split into data producers and consumers (listeners). Each
+ * of them operate on a set of subtrees, which need to be declared at instantiation time.
+ *
+ * Returned instances are not thread-safe and expected to be used by a single thread
+ * at a time. Furthermore, producers may not be accessed from consumer callbacks
+ * unless they were specified when the listener is registered.
+ *
+ * The service maintains a loop-free topology of producers and consumers. What this means
+ * is that a consumer is not allowed to access a producer, which affects any of the
+ * subtrees it is subscribed to. This restriction is in place to ensure the system does
+ * not go into a feedback loop, where it is impossible to block either a producer or
+ * a consumer without accumulating excess work in the backlog stemming from its previous
+ * activity.
+ */
+public interface DOMDataTreeService extends DOMDataTreeProducerFactory, DOMService {
+ /**
+ * Register a {@link DOMDataTreeListener} instance. Once registered, the listener
+ * will start receiving changes on the selected subtrees. If the listener cannot
+ * keep up with the rate of changes, and allowRxMerges is set to true, this service
+ * is free to merge the changes, so that a smaller number of them will be reported,
+ * possibly hiding some data transitions (like flaps).
+ *
+ * If the listener wants to write into any producer, that producer has to be mentioned
+ * in the call to this method. Those producers will be bound exclusively to the
+ * registration, so that accessing them outside of this listener's callback will trigger
+ * an error. Any producers mentioned must be idle, e.g. they may not have an open
+ * transaction at the time this method is invoked.
+ *
+ * Each listener instance can be registered at most once. Implementations of this
+ * interface have to guarantee that the listener's methods will not be invoked
+ * concurrently from multiple threads.
+ *
+ * @param listener {@link DOMDataTreeListener} that is being registered
+ * @param subtrees Conceptual subtree identifier of subtrees which should be monitored
+ * for changes. May not be null or empty.
+ * @param allowRxMerges True if the backend may perform ingress state compression.
+ * @param producers {@link DOMDataTreeProducer} instances to bind to the listener.
+ * @return A listener registration. Once closed, the listener will no longer be
+ * invoked and the producers will be unbound.
+ * @throws IllegalArgumentException if subtrees is empty or the listener is already bound
+ * @throws DOMDataTreeLoopException if the registration of the listener to the specified
+ * subtrees with specified producers would form a
+ * feedback loop
+ */
+ @Nonnull <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(@Nonnull T listener,
+ @Nonnull Collection<DOMDataTreeIdentifier> subtrees, boolean allowRxMerges, @Nonnull Collection<DOMDataTreeProducer> producers);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.EventListener;
+import javax.annotation.Nonnull;
+
+/**
+ * A single shard of the conceptual data tree. This interface defines the basic notifications
+ * a shard can receive. Each shard implementation is expected to also implement some of the
+ * datastore-level APIs. Which interfaces are required depends on the {@link DOMDataTreeShardingService}
+ * implementation.
+ */
+public interface DOMDataTreeShard extends EventListener {
+ /**
+ * Invoked whenever a child is getting attached as a more specific prefix under this shard.
+ *
+ * @param prefix Child's prefix
+ * @param child Child shard
+ */
+ void onChildAttached(@Nonnull DOMDataTreeIdentifier prefix, @Nonnull DOMDataTreeShard child);
+
+ /**
+ * Invoked whenever a child is getting detached as a more specific prefix under this shard.
+ *
+ * @param prefix Child's prefix
+ * @param child Child shard
+ */
+ void onChildDetached(@Nonnull DOMDataTreeIdentifier prefix, @Nonnull DOMDataTreeShard child);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import javax.annotation.Nonnull;
+
+/**
+ * Exception thrown when an attempt to attach a conflicting shard to the global
+ * table.
+ */
+public class DOMDataTreeShardingConflictException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public DOMDataTreeShardingConflictException(final @Nonnull String message) {
+ super(message);
+ }
+
+ public DOMDataTreeShardingConflictException(final @Nonnull String message, final @Nonnull Throwable cause) {
+ super(message, cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A {@link DOMService} providing access to details on how the conceptual data tree
+ * is distributed among providers (also known as shards). Each shard is tied to a
+ * single {@link DOMDataTreeIdentifier}. Based on those data tree identifiers, the
+ * shards are organized in a tree, where there is a logical parent/child relationship.
+ *
+ * It is not allowed to attach two shards to the same data tree identifier, which means
+ * the mapping of each piece of information has an unambiguous home. When accessing
+ * the information, the shard with the longest matching data tree identifier is used,
+ * which is why this interface treats it is a prefix.
+ *
+ * Whenever a parent/child relationship is changed, the parent is notified, so it can
+ * understand that a logical child has been attached.
+ */
+public interface DOMDataTreeShardingService extends DOMService {
+ /**
+ * Register a shard as responsible for a particular subtree prefix.
+ *
+ * @param prefix Data tree identifier, may not be null.
+ * @param shard Responsible shard instance
+ * @return A registration. To remove the shard's binding, close the registration.
+ * @throws DOMDataTreeShardingConflictException if the prefix is already bound
+ */
+ @Nonnull <T extends DOMDataTreeShard> ListenerRegistration<T> registerDataTreeShard(@Nonnull DOMDataTreeIdentifier prefix, @Nonnull T shard) throws DOMDataTreeShardingConflictException;
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import java.util.EventListener;
+import javax.annotation.Nonnull;
+
+/**
+ * An {@link EventListener} used to track RPC implementations becoming (un)available
+ * to a {@link DOMRpcService}.
+ */
+public interface DOMRpcAvailabilityListener extends EventListener {
+ /**
+ * Method invoked whenever an RPC type becomes available.
+ *
+ * @param rpcs RPC types newly available
+ */
+ void onRpcAvailable(@Nonnull Collection<DOMRpcIdentifier> rpcs);
+
+ /**
+ * Method invoked whenever an RPC type becomes unavailable.
+ *
+ * @param rpcs RPC types which became unavailable
+ */
+ void onRpcUnavailable(@Nonnull Collection<DOMRpcIdentifier> rpcs);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+/**
+ * Base class for failures that can occur during RPC invocation. This covers
+ * transport and protocol-level failures.
+ */
+public abstract class DOMRpcException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Construct an new instance with a message and an empty cause.
+ *
+ * @param message Exception message
+ */
+ protected DOMRpcException(final String message) {
+ super(message);
+ }
+
+ /**
+ * Construct an new instance with a message and a cause.
+ *
+ * @param message Exception message
+ * @param cause Chained cause
+ */
+ protected DOMRpcException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import java.util.Objects;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Identifier of a RPC context. This is an extension of the YANG RPC, which
+ * always has global context. It allows an RPC to have a instance identifier
+ * attached, so that there can be multiple implementations bound to different
+ * contexts concurrently.
+ */
+public abstract class DOMRpcIdentifier {
+ private static final class Global extends DOMRpcIdentifier {
+ private Global(final @Nonnull SchemaPath type) {
+ super(type);
+ }
+
+ @Override
+ public YangInstanceIdentifier getContextReference() {
+ return null;
+ }
+ }
+
+ private static final class Local extends DOMRpcIdentifier {
+ private final YangInstanceIdentifier contextReference;
+
+ private Local(final @Nonnull SchemaPath type, final @Nonnull YangInstanceIdentifier contextReference) {
+ super(type);
+ this.contextReference = Preconditions.checkNotNull(contextReference);
+ }
+
+ @Override
+ public YangInstanceIdentifier getContextReference() {
+ return contextReference;
+ }
+ }
+
+ private final SchemaPath type;
+
+ private DOMRpcIdentifier(final SchemaPath type) {
+ this.type = Preconditions.checkNotNull(type);
+ }
+
+ /**
+ * Create a global RPC identifier.
+ *
+ * @param type RPC type, SchemaPath of its definition, may not be null
+ * @return A global RPC identifier, guaranteed to be non-null.
+ */
+ public static @Nonnull DOMRpcIdentifier create(final @Nonnull SchemaPath type) {
+ return new Global(type);
+ }
+
+ /**
+ * Create an RPC identifier with a particular context reference.
+ *
+ * @param type RPC type, SchemaPath of its definition, may not be null
+ * @param contextReference Context reference, null means a global RPC identifier.
+ * @return A global RPC identifier, guaranteed to be non-null.
+ */
+ public static @Nonnull DOMRpcIdentifier create(final @Nonnull SchemaPath type, final @Nullable YangInstanceIdentifier contextReference) {
+ if (contextReference == null) {
+ return new Global(type);
+ } else {
+ return new Local(type, contextReference);
+ }
+ }
+
+ /**
+ * Return the RPC type.
+ *
+ * @return RPC type.
+ */
+ public final @Nonnull SchemaPath getType() {
+ return type;
+ }
+
+ /**
+ * Return the RPC context reference. Null value indicates global context.
+ *
+ * @return RPC context reference.
+ */
+ public abstract @Nullable YangInstanceIdentifier getContextReference();
+
+ @Override
+ public final int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + type.hashCode();
+ result = prime * result + (getContextReference() == null ? 0 : getContextReference().hashCode());
+ return result;
+ }
+
+ @Override
+ public final boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DOMRpcIdentifier)) {
+ return false;
+ }
+ DOMRpcIdentifier other = (DOMRpcIdentifier) obj;
+ if (!type.equals(other.type)) {
+ return false;
+ }
+ return Objects.equals(getContextReference(), other.getContextReference());
+ }
+
+ @Override
+ public final String toString() {
+ return MoreObjects.toStringHelper(this).omitNullValues().add("type", type).add("contextReference", getContextReference()).toString();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.util.concurrent.CheckedFuture;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Interface implemented by an individual RPC implementation. This API allows for dispatch
+ * implementations, e.g. an individual object handling a multitude of RPCs.
+ */
+public interface DOMRpcImplementation {
+ /**
+ * Initiate invocation of the RPC. Implementations of this method are
+ * expected to not block on external resources.
+ *
+ * @param rpc RPC identifier which was invoked
+ * @param input Input arguments, null if the RPC does not take any.
+ * @return A {@link CheckedFuture} which will return either a result structure,
+ * or report a subclass of {@link DOMRpcException} reporting a transport
+ * error.
+ */
+ @Nonnull CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull DOMRpcIdentifier rpc, @Nullable NormalizedNode<?, ?> input);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
+
+/**
+ * Exception indicating that no implementation of the requested RPC service is available.
+ */
+public class DOMRpcImplementationNotAvailableException extends DOMRpcException {
+ private static final long serialVersionUID = 1L;
+
+ public DOMRpcImplementationNotAvailableException(@Nonnull final String format, final Object... args) {
+ super(String.format(format, args));
+ }
+
+ public DOMRpcImplementationNotAvailableException(@Nonnull final Throwable cause, @Nonnull final String format, final Object... args) {
+ super(String.format(format, args), Preconditions.checkNotNull(cause));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+
+/**
+ * A registration of a {@link DOMRpcImplementation}. Used to track and revoke a registration
+ * with a {@link DOMRpcProviderService}.
+ *
+ * @param <T> RPC implementation type
+ */
+public interface DOMRpcImplementationRegistration<T extends DOMRpcImplementation> extends ObjectRegistration<T> {
+ @Override
+ void close();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Set;
+import javax.annotation.Nonnull;
+
+/**
+ * A {@link DOMService} which allows registration of RPC implementations with a conceptual
+ * router. The client counterpart of this service is {@link DOMRpcService}.
+ */
+public interface DOMRpcProviderService extends DOMService {
+ /**
+ * Register an {@link DOMRpcImplementation} object with this service.
+ *
+ * @param implementation RPC implementation, must not be null
+ * @param rpcs Array of supported RPC identifiers. Must not be null, empty, or contain a null element.
+ * Each identifier is added exactly once, no matter how many times it occurs.
+ * @return A {@link DOMRpcImplementationRegistration} object, guaranteed to be non-null.
+ * @throws NullPointerException if implementation or types is null
+ * @throws IllegalArgumentException if types is empty or contains a null element.
+ */
+ @Nonnull <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(@Nonnull T implementation, @Nonnull DOMRpcIdentifier... rpcs);
+
+ /**
+ * Register an {@link DOMRpcImplementation} object with this service.
+ *
+ * @param implementation RPC implementation, must not be null
+ * @param rpcs Set of supported RPC identifiers. Must not be null, empty, or contain a null element.
+ * @return A {@link DOMRpcImplementationRegistration} object, guaranteed to be non-null.
+ * @throws NullPointerException if implementation or types is null
+ * @throws IllegalArgumentException if types is empty or contains a null element.
+ */
+ @Nonnull <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(@Nonnull T implementation, @Nonnull Set<DOMRpcIdentifier> rpcs);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import java.util.Collection;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Interface defining a result of an RPC call.
+ */
+public interface DOMRpcResult {
+ /**
+ * Returns a set of errors and warnings which occurred during processing
+ * the call.
+ *
+ * @return a Collection of {@link RpcError}, guaranteed to be non-null. In case
+ * no errors are reported, an empty collection is returned.
+ */
+ @Nonnull Collection<RpcError> getErrors();
+
+ /**
+ * Returns the value result of the call or null if no result is available.
+ *
+ * @return Invocation result, null if the operation has not produced a result. This might
+ * be the case if the operation does not produce a result, or if it failed.
+ */
+ @Nullable NormalizedNode<?, ?> getResult();
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import com.google.common.util.concurrent.CheckedFuture;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * A {@link DOMService} which allows clients to invoke RPCs. The conceptual model of this
+ * service is that of a dynamic router, where the set of available RPC services can change
+ * dynamically. The service allows users to add a listener to track the process of
+ * RPCs becoming available.
+ */
+public interface DOMRpcService extends DOMService {
+ /**
+ * Initiate invocation of an RPC. This method is guaranteed to not block on any external
+ * resources.
+ *
+ * @param type SchemaPath of the RPC to be invoked
+ * @param input Input arguments, null if the RPC does not take any.
+ * @return A {@link CheckedFuture} which will return either a result structure,
+ * or report a subclass of {@link DOMRpcException} reporting a transport
+ * error.
+ */
+ @Nonnull CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input);
+
+ /**
+ * Register a {@link DOMRpcAvailabilityListener} with this service to receive notifications
+ * about RPC implementations becoming (un)available. The listener will be invoked with the
+ * current implementations reported and will be kept uptodate as implementations come and go.
+ *
+ * Users should note that using a listener does not necessarily mean that {@link #invokeRpc(SchemaPath, NormalizedNode)}
+ * will not report a failure due to {@link DOMRpcImplementationNotAvailableException} and
+ * need to be ready to handle it. Implementations are encouraged to take reasonable precautions
+ * to prevent this scenario from occurring.
+ *
+ * @param listener {@link DOMRpcAvailabilityListener} instance to register
+ * @return A {@link DOMRpcAvailabilityListenerRegistration} representing this registration. Performing
+ * a {@link DOMRpcAvailabilityListenerRegistration#close()} will cancel it. Returned object
+ * is guaranteed to be non-null.
+ */
+ @Nonnull <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener);
+}
package org.opendaylight.controller.sal.core.api;
import java.util.concurrent.Future;
-
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+/**
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcService} instead.
+ */
+@Deprecated
public interface RpcConsumptionRegistry {
/**
* Sends an RPC to other components registered to the broker.
*/
package org.opendaylight.controller.sal.core.api;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.Set;
-
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import com.google.common.util.concurrent.ListenableFuture;
-
/**
* {@link Provider}'s implementation of an RPC.
*
* {@link RpcResult}
* <li> {@link Broker} returns the {@link RpcResult} to {@link Consumer}
* </ol>
+ *
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation} instead.
*/
+@Deprecated
public interface RpcImplementation extends Provider.ProviderFunctionality {
/**
/**
* Exception reported when no RPC implementation is found in the system.
+ *
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException} instead.
*/
+@Deprecated
public class RpcImplementationUnavailableException extends RuntimeException {
private static final long serialVersionUID = 1L;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+/**
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService} and {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcService} instead.
+ */
+@Deprecated
public interface RpcProvisionRegistry extends RpcImplementation, BrokerService, RouteChangePublisher<RpcRoutingContext, YangInstanceIdentifier>, DOMService {
/**
package org.opendaylight.controller.sal.core.api;
import java.util.EventListener;
-
import org.opendaylight.yangtools.yang.common.QName;
+/**
+ * @deprecated Use {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener} instead.
+ */
+@Deprecated
public interface RpcRegistrationListener extends EventListener {
public void onRpcImplementationAdded(QName name);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.api;
+
+import static org.junit.Assert.assertNotNull;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.net.URI;
+import java.util.Collections;
+import javax.annotation.Nonnull;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+
+/**
+ * Abstract test suite demonstrating various access patterns on how a {@link DOMDataTreeService}
+ * can be used.
+ */
+public abstract class AbstractDOMDataTreeServiceTestSuite {
+ protected static final QNameModule TEST_MODULE = QNameModule.create(URI.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:test:store"), null);
+
+ protected static final YangInstanceIdentifier UNORDERED_CONTAINER_IID = YangInstanceIdentifier.create(
+ new NodeIdentifier(QName.create(TEST_MODULE, "lists")),
+ new NodeIdentifier(QName.create(TEST_MODULE, "unordered-container")));
+ protected static final DOMDataTreeIdentifier UNORDERED_CONTAINER_TREE = new DOMDataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, UNORDERED_CONTAINER_IID);
+
+ /**
+ * Return a reference to the service used in this test. The instance
+ * needs to be reused within the same test and must be isolated between
+ * tests.
+ *
+ * @return {@link DOMDataTreeService} instance.
+ */
+ protected abstract @Nonnull DOMDataTreeService service();
+
+ /**
+ * A simple unbound producer. It write some basic things into the data store based on the
+ * test model.
+ * @throws DOMDataTreeProducerException
+ * @throws TransactionCommitFailedException
+ */
+ @Test
+ public final void testBasicProducer() throws DOMDataTreeProducerException, TransactionCommitFailedException {
+ // Create a producer. It is an AutoCloseable resource, hence the try-with pattern
+ try (final DOMDataTreeProducer prod = service().createProducer(Collections.singleton(UNORDERED_CONTAINER_TREE))) {
+ assertNotNull(prod);
+
+ final DOMDataWriteTransaction tx = prod.createTransaction(true);
+ assertNotNull(tx);
+
+ tx.put(LogicalDatastoreType.OPERATIONAL, UNORDERED_CONTAINER_IID, ImmutableContainerNodeBuilder.create().build());
+
+ final CheckedFuture<Void, TransactionCommitFailedException> f = tx.submit();
+ assertNotNull(f);
+
+ f.checkedGet();
+ }
+ }
+
+ // TODO: simple listener
+}
<artifactId>guava</artifactId>
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
+ <groupId>com.lmax</groupId>
+ <artifactId>disruptor</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-yang-types</artifactId>
</dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Collection;
import java.util.EnumMap;
import java.util.Map;
import java.util.Map.Entry;
* the Future fails with a {@link TransactionCommitFailedException}.
*/
protected abstract CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts);
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts);
/**
* Creates a new composite read-only transaction
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+abstract class AbstractDOMRpcRoutingTableEntry {
+ private final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls;
+ private final SchemaPath schemaPath;
+
+ protected AbstractDOMRpcRoutingTableEntry(final SchemaPath schemaPath, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ this.schemaPath = Preconditions.checkNotNull(schemaPath);
+ this.impls = Preconditions.checkNotNull(impls);
+ }
+
+ protected final SchemaPath getSchemaPath() {
+ return schemaPath;
+ }
+
+ protected final List<DOMRpcImplementation> getImplementations(final YangInstanceIdentifier context) {
+ return impls.get(context);
+ }
+
+ final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> getImplementations() {
+ return impls;
+ }
+
+ public boolean containsContext(final YangInstanceIdentifier contextReference) {
+ return impls.containsKey(contextReference);
+ }
+
+ final Set<YangInstanceIdentifier> registeredIdentifiers() {
+ return impls.keySet();
+ }
+
+ final AbstractDOMRpcRoutingTableEntry add(final DOMRpcImplementation implementation, final List<YangInstanceIdentifier> newRpcs) {
+ final Builder<YangInstanceIdentifier, List<DOMRpcImplementation>> vb = ImmutableMap.builder();
+ for (Entry<YangInstanceIdentifier, List<DOMRpcImplementation>> ve : impls.entrySet()) {
+ if (newRpcs.remove(ve.getKey())) {
+ final ArrayList<DOMRpcImplementation> i = new ArrayList<>(ve.getValue().size() + 1);
+ i.addAll(ve.getValue());
+ i.add(implementation);
+ vb.put(ve.getKey(), i);
+ } else {
+ vb.put(ve);
+ }
+ }
+
+ return newInstance(vb.build());
+ }
+
+ final AbstractDOMRpcRoutingTableEntry remove(final DOMRpcImplementation implementation, final List<YangInstanceIdentifier> removed) {
+ final Builder<YangInstanceIdentifier, List<DOMRpcImplementation>> vb = ImmutableMap.builder();
+ for (Entry<YangInstanceIdentifier, List<DOMRpcImplementation>> ve : impls.entrySet()) {
+ if (removed.remove(ve.getKey())) {
+ final ArrayList<DOMRpcImplementation> i = new ArrayList<>(ve.getValue());
+ i.remove(implementation);
+ // We could trimToSize(), but that may perform another copy just to get rid
+ // of a single element. That is probably not worth the trouble.
+ if (!i.isEmpty()) {
+ vb.put(ve.getKey(), i);
+ }
+ } else {
+ vb.put(ve);
+ }
+ }
+
+ final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> v = vb.build();
+ return v.isEmpty() ? null : newInstance(v);
+ }
+
+ protected abstract CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input);
+ protected abstract AbstractDOMRpcRoutingTableEntry newInstance(final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls);
+}
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
-import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collection;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
};
private static final Logger LOG = LoggerFactory.getLogger(CommitCoordinationTask.class);
- private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
+ private final Collection<DOMStoreThreePhaseCommitCohort> cohorts;
private final DurationStatisticsTracker commitStatTracker;
private final DOMDataWriteTransaction tx;
- private final int cohortSize;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
final DurationStatisticsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
this.commitStatTracker = commitStatTracker;
- this.cohortSize = Iterables.size(cohorts);
}
@Override
*
*/
private ListenableFuture<?>[] canCommitAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.canCommit();
*
*/
private ListenableFuture<?>[] preCommitAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.preCommit();
* @return List of all cohorts futures from can commit phase.
*/
private ListenableFuture<?>[] commitAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.commit();
*/
private ListenableFuture<Void> abortAsyncAll() {
- final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
ops[i++] = cohort.abort();
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
+import java.util.Collection;
import java.util.Map;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLong;
@Override
public CheckedFuture<Void, TransactionCommitFailedException> submit(
- final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final DOMDataWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
checkNotFailed();
checkNotClosed();
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableMultimap;
+import com.google.common.collect.ImmutableMultimap.Builder;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Multimaps;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.lmax.disruptor.EventHandler;
+import com.lmax.disruptor.InsufficientCapacityException;
+import com.lmax.disruptor.SleepingWaitStrategy;
+import com.lmax.disruptor.WaitStrategy;
+import com.lmax.disruptor.dsl.Disruptor;
+import com.lmax.disruptor.dsl.ProducerType;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Joint implementation of {@link DOMNotificationPublishService} and {@link DOMNotificationService}. Provides
+ * routing of notifications from publishers to subscribers.
+ *
+ * Internal implementation works by allocating a two-handler Disruptor. The first handler delivers notifications
+ * to subscribed listeners and the second one notifies whoever may be listening on the returned future. Registration
+ * state tracking is performed by a simple immutable multimap -- when a registration or unregistration occurs we
+ * re-generate the entire map from scratch and set it atomically. While registrations/unregistrations synchronize
+ * on this instance, notifications do not take any locks here.
+ *
+ * The fully-blocking {@link #publish(long, DOMNotification, Collection)} and non-blocking {@link #offerNotification(DOMNotification)}
+ * are realized using the Disruptor's native operations. The bounded-blocking {@link #offerNotification(DOMNotification, long, TimeUnit)}
+ * is realized by arming a background wakeup interrupt.
+ */
+public final class DOMNotificationRouter implements AutoCloseable, DOMNotificationPublishService, DOMNotificationService {
+ private static final ListenableFuture<Void> NO_LISTENERS = Futures.immediateFuture(null);
+ private static final WaitStrategy DEFAULT_STRATEGY = new SleepingWaitStrategy();
+ private static final EventHandler<DOMNotificationRouterEvent> DISPATCH_NOTIFICATIONS = new EventHandler<DOMNotificationRouterEvent>() {
+ @Override
+ public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) throws Exception {
+ event.deliverNotification();
+
+ }
+ };
+ private static final EventHandler<DOMNotificationRouterEvent> NOTIFY_FUTURE = new EventHandler<DOMNotificationRouterEvent>() {
+ @Override
+ public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) {
+ event.setFuture();
+ }
+ };
+
+ private final Disruptor<DOMNotificationRouterEvent> disruptor;
+ private final ExecutorService executor;
+ private volatile Multimap<SchemaPath, ListenerRegistration<? extends DOMNotificationListener>> listeners = ImmutableMultimap.of();
+
+ private DOMNotificationRouter(final ExecutorService executor, final Disruptor<DOMNotificationRouterEvent> disruptor) {
+ this.executor = Preconditions.checkNotNull(executor);
+ this.disruptor = Preconditions.checkNotNull(disruptor);
+ }
+
+ @SuppressWarnings("unchecked")
+ public static DOMNotificationRouter create(final int queueDepth) {
+ final ExecutorService executor = Executors.newCachedThreadPool();
+ final Disruptor<DOMNotificationRouterEvent> disruptor = new Disruptor<>(DOMNotificationRouterEvent.FACTORY, queueDepth, executor, ProducerType.MULTI, DEFAULT_STRATEGY);
+
+ disruptor.after(DISPATCH_NOTIFICATIONS).handleEventsWith(NOTIFY_FUTURE);
+ disruptor.start();
+
+ return new DOMNotificationRouter(executor, disruptor);
+ }
+
+ @Override
+ public synchronized <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener, final Collection<SchemaPath> types) {
+ final ListenerRegistration<T> reg = new AbstractListenerRegistration<T>(listener) {
+ @Override
+ protected void removeRegistration() {
+ final ListenerRegistration<T> me = this;
+
+ synchronized (DOMNotificationRouter.this) {
+ listeners = ImmutableMultimap.copyOf(Multimaps.filterValues(listeners, new Predicate<ListenerRegistration<? extends DOMNotificationListener>>() {
+ @Override
+ public boolean apply(final ListenerRegistration<? extends DOMNotificationListener> input) {
+ return input != me;
+ }
+ }));
+ }
+ }
+ };
+
+ if (!types.isEmpty()) {
+ final Builder<SchemaPath, ListenerRegistration<? extends DOMNotificationListener>> b = ImmutableMultimap.builder();
+ b.putAll(listeners);
+
+ for (SchemaPath t : types) {
+ b.put(t, reg);
+ }
+
+ listeners = b.build();
+ }
+
+ return reg;
+ }
+
+ @Override
+ public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener, final SchemaPath... types) {
+ return registerNotificationListener(listener, Arrays.asList(types));
+ }
+
+ private ListenableFuture<Void> publish(final long seq, final DOMNotification notification, final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers) {
+ final DOMNotificationRouterEvent event = disruptor.get(seq);
+ final ListenableFuture<Void> future = event.initialize(notification, subscribers);
+ disruptor.getRingBuffer().publish(seq);
+ return future;
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> putNotification(final DOMNotification notification) throws InterruptedException {
+ final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers = listeners.get(notification.getType());
+ if (subscribers.isEmpty()) {
+ return NO_LISTENERS;
+ }
+
+ final long seq = disruptor.getRingBuffer().next();
+ return publish(seq, notification, subscribers);
+ }
+
+ private ListenableFuture<? extends Object> tryPublish(final DOMNotification notification, final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers) {
+ final long seq;
+ try {
+ seq = disruptor.getRingBuffer().tryNext();
+ } catch (InsufficientCapacityException e) {
+ return DOMNotificationPublishService.REJECTED;
+ }
+
+ return publish(seq, notification, subscribers);
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> offerNotification(final DOMNotification notification) {
+ final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers = listeners.get(notification.getType());
+ if (subscribers.isEmpty()) {
+ return NO_LISTENERS;
+ }
+
+ return tryPublish(notification, subscribers);
+ }
+
+ @Override
+ public ListenableFuture<? extends Object> offerNotification(final DOMNotification notification, final long timeout,
+ final TimeUnit unit) throws InterruptedException {
+ final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers = listeners.get(notification.getType());
+ if (subscribers.isEmpty()) {
+ return NO_LISTENERS;
+ }
+
+ // Attempt to perform a non-blocking publish first
+ final ListenableFuture<? extends Object> noBlock = tryPublish(notification, subscribers);
+ if (!DOMNotificationPublishService.REJECTED.equals(noBlock)) {
+ return noBlock;
+ }
+
+ /*
+ * FIXME: we need a background thread, which will watch out for blocking too long. Here
+ * we will arm a tasklet for it and synchronize delivery of interrupt properly.
+ */
+ throw new UnsupportedOperationException("Not implemented yet");
+ }
+
+ @Override
+ public void close() {
+ disruptor.shutdown();
+ executor.shutdown();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.lmax.disruptor.EventFactory;
+import java.util.Collection;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * A single notification event in the disruptor ringbuffer. These objects are reused,
+ * so they do have mutable state.
+ */
+final class DOMNotificationRouterEvent {
+ public static final EventFactory<DOMNotificationRouterEvent> FACTORY = new EventFactory<DOMNotificationRouterEvent>() {
+ @Override
+ public DOMNotificationRouterEvent newInstance() {
+ return new DOMNotificationRouterEvent();
+ }
+ };
+
+ private Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers;
+ private DOMNotification notification;
+ private SettableFuture<Void> future;
+
+ private DOMNotificationRouterEvent() {
+ // Hidden on purpose, initialized in initialize()
+ }
+
+ ListenableFuture<Void> initialize(final DOMNotification notification, final Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers) {
+ this.notification = Preconditions.checkNotNull(notification);
+ this.subscribers = Preconditions.checkNotNull(subscribers);
+ this.future = SettableFuture.create();
+ return this.future;
+ }
+
+ void deliverNotification() {
+ for (ListenerRegistration<? extends DOMNotificationListener> r : subscribers) {
+ final DOMNotificationListener l = r.getInstance();
+ if (l != null) {
+ l.onNotification(notification);
+ }
+ }
+ }
+
+ void setFuture() {
+ future.set(null);
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMRpcImplementationRegistration;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+public final class DOMRpcRouter implements AutoCloseable, DOMRpcService, DOMRpcProviderService, SchemaContextListener {
+ private static final ThreadFactory THREAD_FACTORY = new ThreadFactoryBuilder().setNameFormat("DOMRpcRouter-listener-%s").setDaemon(true).build();
+ private final ExecutorService listenerNotifier = Executors.newSingleThreadExecutor(THREAD_FACTORY);
+ @GuardedBy("this")
+ private Collection<ListenerRegistration<? extends DOMRpcAvailabilityListener>> listeners = Collections.emptyList();
+ private volatile DOMRpcRoutingTable routingTable = DOMRpcRoutingTable.EMPTY;
+
+ @Override
+ public <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(final T implementation, final DOMRpcIdentifier... rpcs) {
+ return registerRpcImplementation(implementation, ImmutableSet.copyOf(rpcs));
+ }
+
+ private static Collection<DOMRpcIdentifier> notPresentRpcs(final DOMRpcRoutingTable table, final Collection<DOMRpcIdentifier> candidates) {
+ return ImmutableSet.copyOf(Collections2.filter(candidates, new Predicate<DOMRpcIdentifier>() {
+ @Override
+ public boolean apply(final DOMRpcIdentifier input) {
+ return !table.contains(input);
+ }
+ }));
+ }
+
+ private synchronized void removeRpcImplementation(final DOMRpcImplementation implementation, final Set<DOMRpcIdentifier> rpcs) {
+ final DOMRpcRoutingTable oldTable = routingTable;
+ final DOMRpcRoutingTable newTable = oldTable.remove(implementation, rpcs);
+
+ final Collection<DOMRpcIdentifier> removedRpcs = notPresentRpcs(newTable, rpcs);
+ final Collection<ListenerRegistration<? extends DOMRpcAvailabilityListener>> capturedListeners = listeners;
+ routingTable = newTable;
+
+ listenerNotifier.execute(new Runnable() {
+ @Override
+ public void run() {
+ for (ListenerRegistration<? extends DOMRpcAvailabilityListener> l : capturedListeners) {
+ // Need to ensure removed listeners do not get notified
+ synchronized (DOMRpcRouter.this) {
+ if (listeners.contains(l)) {
+ l.getInstance().onRpcUnavailable(removedRpcs);
+ }
+ }
+ }
+ }
+ });
+ }
+
+ @Override
+ public synchronized <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(final T implementation, final Set<DOMRpcIdentifier> rpcs) {
+ final DOMRpcRoutingTable oldTable = routingTable;
+ final DOMRpcRoutingTable newTable = oldTable.add(implementation, rpcs);
+
+ final Collection<DOMRpcIdentifier> addedRpcs = notPresentRpcs(oldTable, rpcs);
+ final Collection<ListenerRegistration<? extends DOMRpcAvailabilityListener>> capturedListeners = listeners;
+ routingTable = newTable;
+
+ listenerNotifier.execute(new Runnable() {
+ @Override
+ public void run() {
+ for (ListenerRegistration<? extends DOMRpcAvailabilityListener> l : capturedListeners) {
+ // Need to ensure removed listeners do not get notified
+ synchronized (DOMRpcRouter.this) {
+ if (listeners.contains(l)) {
+ l.getInstance().onRpcAvailable(addedRpcs);
+ }
+ }
+ }
+ }
+ });
+
+ return new AbstractDOMRpcImplementationRegistration<T>(implementation) {
+ @Override
+ protected void removeRegistration() {
+ removeRpcImplementation(getInstance(), rpcs);
+ }
+ };
+ }
+
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type, final NormalizedNode<?, ?> input) {
+ return routingTable.invokeRpc(type, input);
+ }
+
+ private synchronized void removeListener(final ListenerRegistration<? extends DOMRpcAvailabilityListener> reg) {
+ listeners = ImmutableList.copyOf(Collections2.filter(listeners, new Predicate<Object>() {
+ @Override
+ public boolean apply(final Object input) {
+ return !reg.equals(input);
+ }
+ }));
+ }
+
+ @Override
+ public synchronized <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(final T listener) {
+ final ListenerRegistration<T> ret = new AbstractListenerRegistration<T>(listener) {
+ @Override
+ protected void removeRegistration() {
+ removeListener(this);
+ }
+ };
+
+ final Builder<ListenerRegistration<? extends DOMRpcAvailabilityListener>> b = ImmutableList.builder();
+ b.addAll(listeners);
+ b.add(ret);
+ listeners = b.build();
+ final Map<SchemaPath, Set<YangInstanceIdentifier>> capturedRpcs = routingTable.getRpcs();
+
+ listenerNotifier.execute(new Runnable() {
+ @Override
+ public void run() {
+ for (final Entry<SchemaPath, Set<YangInstanceIdentifier>> e : capturedRpcs.entrySet()) {
+ listener.onRpcAvailable(Collections2.transform(e.getValue(), new Function<YangInstanceIdentifier, DOMRpcIdentifier>() {
+ @Override
+ public DOMRpcIdentifier apply(final YangInstanceIdentifier input) {
+ return DOMRpcIdentifier.create(e.getKey(), input);
+ }
+ }));
+ }
+ }
+ });
+
+ return ret;
+ }
+
+ @Override
+ public synchronized void onGlobalContextUpdated(final SchemaContext context) {
+ final DOMRpcRoutingTable oldTable = routingTable;
+ final DOMRpcRoutingTable newTable = oldTable.setSchemaContext(context);
+ routingTable = newTable;
+ }
+
+ @Override
+ public void close() {
+ listenerNotifier.shutdown();
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
+import com.google.common.collect.LinkedListMultimap;
+import com.google.common.collect.ListMultimap;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
+
+final class DOMRpcRoutingTable {
+ private static final QName CONTEXT_REFERENCE = QName.cachedReference(QName.create("urn:opendaylight:yang:extension:yang-ext", "2013-07-09", "context-reference"));
+
+ static final DOMRpcRoutingTable EMPTY = new DOMRpcRoutingTable();
+ private static final Function<AbstractDOMRpcRoutingTableEntry, Set<YangInstanceIdentifier>> EXTRACT_IDENTIFIERS =
+ new Function<AbstractDOMRpcRoutingTableEntry, Set<YangInstanceIdentifier>>() {
+ @Override
+ public Set<YangInstanceIdentifier> apply(final AbstractDOMRpcRoutingTableEntry input) {
+ return input.registeredIdentifiers();
+ }
+ };
+ private final Map<SchemaPath, AbstractDOMRpcRoutingTableEntry> rpcs;
+ private final SchemaContext schemaContext;
+
+ private DOMRpcRoutingTable() {
+ rpcs = Collections.emptyMap();
+ schemaContext = null;
+ }
+
+ private DOMRpcRoutingTable(final Map<SchemaPath, AbstractDOMRpcRoutingTableEntry> rpcs, final SchemaContext schemaContext) {
+ this.rpcs = Preconditions.checkNotNull(rpcs);
+ this.schemaContext = schemaContext;
+ }
+
+ private static ListMultimap<SchemaPath, YangInstanceIdentifier> decomposeIdentifiers(final Set<DOMRpcIdentifier> rpcs) {
+ final ListMultimap<SchemaPath, YangInstanceIdentifier> ret = LinkedListMultimap.create();
+ for (DOMRpcIdentifier i : rpcs) {
+ ret.put(i.getType(), i.getContextReference());
+ }
+ return ret;
+ }
+
+ DOMRpcRoutingTable add(final DOMRpcImplementation implementation, final Set<DOMRpcIdentifier> rpcs) {
+ if (rpcs.isEmpty()) {
+ return this;
+ }
+
+ // First decompose the identifiers to a multimap
+ final ListMultimap<SchemaPath, YangInstanceIdentifier> toAdd = decomposeIdentifiers(rpcs);
+
+ // Now iterate over existing entries, modifying them as appropriate...
+ final Builder<SchemaPath, AbstractDOMRpcRoutingTableEntry> mb = ImmutableMap.builder();
+ for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> re : this.rpcs.entrySet()) {
+ List<YangInstanceIdentifier> newRpcs = toAdd.removeAll(re.getKey());
+ if (!newRpcs.isEmpty()) {
+ final AbstractDOMRpcRoutingTableEntry ne = re.getValue().add(implementation, newRpcs);
+ mb.put(re.getKey(), ne);
+ } else {
+ mb.put(re);
+ }
+ }
+
+ // Finally add whatever is left in the decomposed multimap
+ for (Entry<SchemaPath, Collection<YangInstanceIdentifier>> e : toAdd.asMap().entrySet()) {
+ final Builder<YangInstanceIdentifier, List<DOMRpcImplementation>> vb = ImmutableMap.builder();
+ final List<DOMRpcImplementation> v = Collections.singletonList(implementation);
+ for (YangInstanceIdentifier i : e.getValue()) {
+ vb.put(i, v);
+ }
+
+ mb.put(e.getKey(), createRpcEntry(schemaContext, e.getKey(), vb.build()));
+ }
+
+ return new DOMRpcRoutingTable(mb.build(), schemaContext);
+ }
+
+ DOMRpcRoutingTable remove(final DOMRpcImplementation implementation, final Set<DOMRpcIdentifier> rpcs) {
+ if (rpcs.isEmpty()) {
+ return this;
+ }
+
+ // First decompose the identifiers to a multimap
+ final ListMultimap<SchemaPath, YangInstanceIdentifier> toRemove = decomposeIdentifiers(rpcs);
+
+ // Now iterate over existing entries, modifying them as appropriate...
+ final Builder<SchemaPath, AbstractDOMRpcRoutingTableEntry> b = ImmutableMap.builder();
+ for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> e : this.rpcs.entrySet()) {
+ final List<YangInstanceIdentifier> removed = toRemove.removeAll(e.getKey());
+ if (!removed.isEmpty()) {
+ final AbstractDOMRpcRoutingTableEntry ne = e.getValue().remove(implementation, removed);
+ if (ne != null) {
+ b.put(e.getKey(), ne);
+ }
+ } else {
+ b.put(e);
+ }
+ }
+
+ // All done, whatever is in toRemove, was not there in the first place
+ return new DOMRpcRoutingTable(b.build(), schemaContext);
+ }
+
+ boolean contains(final DOMRpcIdentifier input) {
+ final AbstractDOMRpcRoutingTableEntry contexts = rpcs.get(input.getType());
+ return contexts != null && contexts.containsContext(input.getContextReference());
+ }
+
+ Map<SchemaPath, Set<YangInstanceIdentifier>> getRpcs() {
+ return Maps.transformValues(rpcs, EXTRACT_IDENTIFIERS);
+ }
+
+ private static RpcDefinition findRpcDefinition(final SchemaContext context, final SchemaPath schemaPath) {
+ if (context != null) {
+ final QName qname = schemaPath.getPathFromRoot().iterator().next();
+ final Module module = context.findModuleByNamespaceAndRevision(qname.getNamespace(), qname.getRevision());
+ if (module != null && module.getRpcs() != null) {
+ for (RpcDefinition rpc : module.getRpcs()) {
+ if (qname.equals(rpc.getQName())) {
+ return rpc;
+ }
+ }
+ }
+ }
+
+ return null;
+ }
+
+ private static AbstractDOMRpcRoutingTableEntry createRpcEntry(final SchemaContext context, final SchemaPath key, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> implementations) {
+ final RpcDefinition rpcDef = findRpcDefinition(context, key);
+ if (rpcDef != null) {
+ final ContainerSchemaNode input = rpcDef.getInput();
+ if (input != null) {
+ for (DataSchemaNode c : input.getChildNodes()) {
+ for (UnknownSchemaNode extension : c.getUnknownSchemaNodes()) {
+ if (CONTEXT_REFERENCE.equals(extension.getNodeType())) {
+ final YangInstanceIdentifier keyId = YangInstanceIdentifier.builder().node(input.getQName()).node(c.getQName()).build();
+ return new RoutedDOMRpcRoutingTableEntry(rpcDef, keyId, implementations);
+ }
+ }
+ }
+ }
+
+ return new GlobalDOMRpcRoutingTableEntry(rpcDef, implementations);
+ } else {
+ return new UnknownDOMRpcRoutingTableEntry(key, implementations);
+ }
+ }
+
+ CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type, final NormalizedNode<?, ?> input) {
+ final AbstractDOMRpcRoutingTableEntry entry = rpcs.get(type);
+ if (entry == null) {
+ return Futures.<DOMRpcResult, DOMRpcException>immediateFailedCheckedFuture(new DOMRpcImplementationNotAvailableException("No implementation of RPC %s available", type));
+ }
+
+ return entry.invokeRpc(input);
+ }
+
+ DOMRpcRoutingTable setSchemaContext(final SchemaContext context) {
+ final Builder<SchemaPath, AbstractDOMRpcRoutingTableEntry> b = ImmutableMap.builder();
+
+ for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> e : rpcs.entrySet()) {
+ b.put(e.getKey(), createRpcEntry(context, e.getKey(), e.getValue().getImplementations()));
+ }
+
+ return new DOMRpcRoutingTable(b.build(), context);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.List;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+
+final class GlobalDOMRpcRoutingTableEntry extends AbstractDOMRpcRoutingTableEntry {
+ private final DOMRpcIdentifier rpcId;
+
+ private GlobalDOMRpcRoutingTableEntry(final DOMRpcIdentifier rpcId, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ super(rpcId.getType(), impls);
+ this.rpcId = Preconditions.checkNotNull(rpcId);
+ }
+
+ // We do not need the RpcDefinition, but this makes sure we do not
+ // forward something we don't know to be an RPC.
+ GlobalDOMRpcRoutingTableEntry(final RpcDefinition def, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ super(def.getPath(), impls);
+ this.rpcId = DOMRpcIdentifier.create(def.getPath());
+ }
+
+ @Override
+ protected CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input) {
+ return getImplementations(null).get(0).invokeRpc(rpcId, input);
+ }
+
+ @Override
+ protected GlobalDOMRpcRoutingTableEntry newInstance(final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ return new GlobalDOMRpcRoutingTableEntry(rpcId, impls);
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Objects;
-import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
@Override
public String toString() {
- return addToStringAttributes(Objects.toStringHelper(this)).toString();
+ return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
}
protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
/*
* This forces allocateTransaction() on a slow path, which has to happen after
- * this method has completed executing.
+ * this method has completed executing. Also inflightTx may be updated outside
+ * the lock, hence we need to re-check.
*/
@GuardedBy("this")
private void processIfReady() {
- final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
- if (tx != null) {
- processTransaction(tx);
+ if (inflightTx == null) {
+ final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
+ if (tx != null) {
+ processTransaction(tx);
+ }
}
}
*/
final boolean success = READY_UPDATER.compareAndSet(this, null, tx);
Preconditions.checkState(success, "Transaction %s collided on ready state", tx, readyTx);
- LOG.debug("Transaction {} readied");
+ LOG.debug("Transaction {} readied", tx);
/*
* We do not see a transaction being in-flight, so we need to take care of dispatching
}
@Override
- public void close() {
+ public synchronized void close() {
final PingPongTransaction notLocked = lockedTx;
Preconditions.checkState(notLocked == null, "Attempted to close chain with outstanding transaction %s", notLocked);
- synchronized (this) {
- processIfReady();
- delegate.close();
+ // Force allocations on slow path. We will complete the rest
+ final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
+
+ // Make sure no transaction is outstanding. Otherwise sleep a bit and retry
+ while (inflightTx != null) {
+ LOG.debug("Busy-waiting for in-flight transaction {} to complete", inflightTx);
+ Thread.yield();
+ continue;
}
+
+ // If we have an outstanding transaction, send it down
+ if (tx != null) {
+ processTransaction(tx);
+ }
+
+ // All done, close the delegate. All new allocations should fail.
+ delegate.close();
}
@Override
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import java.util.List;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodes;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class RoutedDOMRpcRoutingTableEntry extends AbstractDOMRpcRoutingTableEntry {
+ private static final Logger LOG = LoggerFactory.getLogger(RoutedDOMRpcRoutingTableEntry.class);
+ private final DOMRpcIdentifier globalRpcId;
+ private final YangInstanceIdentifier keyId;
+
+ private RoutedDOMRpcRoutingTableEntry(final DOMRpcIdentifier globalRpcId, final YangInstanceIdentifier keyId, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ super(globalRpcId.getType(), impls);
+ this.keyId = Preconditions.checkNotNull(keyId);
+ this.globalRpcId = Preconditions.checkNotNull(globalRpcId);
+ }
+
+ RoutedDOMRpcRoutingTableEntry(final RpcDefinition def, final YangInstanceIdentifier keyId, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ super(def.getPath(), impls);
+ this.keyId = Preconditions.checkNotNull(keyId);
+ this.globalRpcId = DOMRpcIdentifier.create(def.getPath());
+ }
+
+ @Override
+ protected CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input) {
+ final Optional<NormalizedNode<?, ?>> maybeKey = NormalizedNodes.findNode(input, keyId);
+
+ // Routing key is present, attempt to deliver as a routed RPC
+ if (maybeKey.isPresent()) {
+ final NormalizedNode<?, ?> key = maybeKey.get();
+ final Object value = key.getValue();
+ if (value instanceof YangInstanceIdentifier) {
+ final YangInstanceIdentifier iid = (YangInstanceIdentifier) value;
+ final List<DOMRpcImplementation> impls = getImplementations(iid);
+ if (impls != null) {
+ return impls.get(0).invokeRpc(DOMRpcIdentifier.create(getSchemaPath(), iid), input);
+ }
+ LOG.debug("No implementation for context {} found", iid);
+ } else {
+ LOG.warn("Ignoring wrong context value {}", value);
+ }
+ }
+
+ final List<DOMRpcImplementation> impls = getImplementations(null);
+ if (impls != null) {
+ return impls.get(0).invokeRpc(globalRpcId, input);
+ } else {
+ return Futures.<DOMRpcResult, DOMRpcException>immediateFailedCheckedFuture(new DOMRpcImplementationNotAvailableException("No implementation of RPC %s available", getSchemaPath()));
+ }
+ }
+
+ @Override
+ protected RoutedDOMRpcRoutingTableEntry newInstance(final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ return new RoutedDOMRpcRoutingTableEntry(globalRpcId, keyId, impls);
+ }
+}
\ No newline at end of file
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.Collection;
import java.util.Map;
import java.util.concurrent.RejectedExecutionException;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
@Override
protected CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShard;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+
+final class ShardRegistration<T extends DOMDataTreeShard> extends AbstractListenerRegistration<T> {
+ private final DOMDataTreeIdentifier prefix;
+ private final ShardedDOMDataTree tree;
+
+ protected ShardRegistration(final ShardedDOMDataTree tree, final DOMDataTreeIdentifier prefix, final T shard) {
+ super(shard);
+ this.tree = Preconditions.checkNotNull(tree);
+ this.prefix = Preconditions.checkNotNull(prefix);
+ }
+
+ DOMDataTreeIdentifier getPrefix() {
+ return prefix;
+ }
+
+ @Override
+ protected void removeRegistration() {
+ tree.removeShard(this);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShard;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShardingConflictException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShardingService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class ShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDOMDataTree.class);
+ private final Map<LogicalDatastoreType, ShardingTableEntry> shardingTables = new EnumMap<>(LogicalDatastoreType.class);
+ @GuardedBy("this")
+ private final Map<DOMDataTreeIdentifier, DOMDataTreeProducer> idToProducer = new TreeMap<>();
+
+ @GuardedBy("this")
+ private ShardingTableEntry lookupShard(final DOMDataTreeIdentifier prefix) {
+ final ShardingTableEntry t = shardingTables.get(prefix.getDatastoreType());
+ if (t == null) {
+ return null;
+ }
+
+ return t.lookup(prefix.getRootIdentifier());
+ }
+
+ @GuardedBy("this")
+ private void storeShard(final DOMDataTreeIdentifier prefix, final ShardRegistration<?> reg) {
+ ShardingTableEntry t = shardingTables.get(prefix.getDatastoreType());
+ if (t == null) {
+ t = new ShardingTableEntry();
+ shardingTables.put(prefix.getDatastoreType(), t);
+ }
+
+ t.store(prefix.getRootIdentifier(), reg);
+ }
+
+ void removeShard(final ShardRegistration<?> reg) {
+ final DOMDataTreeIdentifier prefix = reg.getPrefix();
+ final ShardRegistration<?> parentReg;
+
+ synchronized (this) {
+ final ShardingTableEntry t = shardingTables.get(prefix.getDatastoreType());
+ if (t == null) {
+ LOG.warn("Shard registration {} points to non-existent table", reg);
+ return;
+ }
+
+ t.remove(prefix.getRootIdentifier());
+ parentReg = lookupShard(prefix).getRegistration();
+
+ /*
+ * FIXME: adjust all producers. This is tricky, as we need different locking strategy,
+ * simply because we risk AB/BA deadlock with a producer being split off from
+ * a producer.
+ *
+ */
+ }
+
+ if (parentReg != null) {
+ parentReg.getInstance().onChildDetached(prefix, reg.getInstance());
+ }
+ }
+
+ @Override
+ public <T extends DOMDataTreeShard> ListenerRegistration<T> registerDataTreeShard(final DOMDataTreeIdentifier prefix, final T shard) throws DOMDataTreeShardingConflictException {
+ final ShardRegistration<T> reg;
+ final ShardRegistration<?> parentReg;
+
+ synchronized (this) {
+ /*
+ * Lookup the parent shard (e.g. the one which currently matches the prefix),
+ * and if it exists, check if its registration prefix does not collide with
+ * this registration.
+ */
+ final ShardingTableEntry parent = lookupShard(prefix);
+ parentReg = parent.getRegistration();
+ if (parentReg != null && prefix.equals(parentReg.getPrefix())) {
+ throw new DOMDataTreeShardingConflictException(String.format("Prefix %s is already occupied by shard {}", prefix, parentReg.getInstance()));
+ }
+
+ // FIXME: wrap the shard in a proper adaptor based on implemented interface
+
+ reg = new ShardRegistration<T>(this, prefix, shard);
+
+ storeShard(prefix, reg);
+
+ // FIXME: update any producers/registrations
+ }
+
+ // Notify the parent shard
+ if (parentReg != null) {
+ parentReg.getInstance().onChildAttached(prefix, shard);
+ }
+
+ return reg;
+ }
+
+ @GuardedBy("this")
+ private DOMDataTreeProducer findProducer(final DOMDataTreeIdentifier subtree) {
+ for (Entry<DOMDataTreeIdentifier, DOMDataTreeProducer> e : idToProducer.entrySet()) {
+ if (e.getKey().contains(subtree)) {
+ return e.getValue();
+ }
+ }
+
+ return null;
+ }
+
+ synchronized void destroyProducer(final ShardedDOMDataTreeProducer producer) {
+ for (DOMDataTreeIdentifier s : producer.getSubtrees()) {
+ DOMDataTreeProducer r = idToProducer.remove(s);
+ if (!producer.equals(r)) {
+ LOG.error("Removed producer %s on subtree %s while removing %s", r, s, producer);
+ }
+ }
+ }
+
+ @GuardedBy("this")
+ private DOMDataTreeProducer createProducer(final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap) {
+ // Record the producer's attachment points
+ final DOMDataTreeProducer ret = ShardedDOMDataTreeProducer.create(this, shardMap);
+ for (DOMDataTreeIdentifier s : shardMap.keySet()) {
+ idToProducer.put(s, ret);
+ }
+
+ return ret;
+ }
+
+ @Override
+ public synchronized DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
+ Preconditions.checkArgument(!subtrees.isEmpty(), "Subtrees may not be empty");
+
+ final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap = new HashMap<>();
+ for (DOMDataTreeIdentifier s : subtrees) {
+ // Attempting to create a disconnected producer -- all subtrees have to be unclaimed
+ final DOMDataTreeProducer producer = findProducer(s);
+ Preconditions.checkArgument(producer == null, "Subtree %s is attached to producer %s", s, producer);
+
+ shardMap.put(s, lookupShard(s).getRegistration().getInstance());
+ }
+
+ return createProducer(shardMap);
+ }
+
+ synchronized DOMDataTreeProducer createProducer(final ShardedDOMDataTreeProducer parent, final Collection<DOMDataTreeIdentifier> subtrees) {
+ Preconditions.checkNotNull(parent);
+
+ final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap = new HashMap<>();
+ for (DOMDataTreeIdentifier s : subtrees) {
+ shardMap.put(s, lookupShard(s).getRegistration().getInstance());
+ }
+
+ return createProducer(shardMap);
+ }
+
+ @Override
+ public synchronized <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(final T listener, final Collection<DOMDataTreeIdentifier> subtrees, final boolean allowRxMerges, final Collection<DOMDataTreeProducer> producers) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.BiMap;
+import com.google.common.collect.ImmutableBiMap;
+import com.google.common.collect.ImmutableBiMap.Builder;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.Set;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducerBusyException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeProducerException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeShard;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ShardedDOMDataTreeProducer implements DOMDataTreeProducer {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDOMDataTreeProducer.class);
+ private final BiMap<DOMDataTreeShard, DOMStoreTransactionChain> shardToChain;
+ private final Map<DOMDataTreeIdentifier, DOMDataTreeShard> idToShard;
+ private final ShardedDOMDataTree dataTree;
+
+ @GuardedBy("this")
+ private Map<DOMDataTreeIdentifier, DOMDataTreeProducer> children = Collections.emptyMap();
+ @GuardedBy("this")
+ private DOMDataWriteTransaction openTx;
+ @GuardedBy("this")
+ private boolean closed;
+
+ ShardedDOMDataTreeProducer(final ShardedDOMDataTree dataTree, final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap, final Set<DOMDataTreeShard> shards) {
+ this.dataTree = Preconditions.checkNotNull(dataTree);
+
+ // Create shard -> chain map
+ final Builder<DOMDataTreeShard, DOMStoreTransactionChain> cb = ImmutableBiMap.builder();
+ final Queue<Exception> es = new LinkedList<>();
+
+ for (DOMDataTreeShard s : shards) {
+ if (s instanceof DOMStore) {
+ try {
+ final DOMStoreTransactionChain c = ((DOMStore)s).createTransactionChain();
+ LOG.trace("Using DOMStore chain {} to access shard {}", c, s);
+ cb.put(s, c);
+ } catch (Exception e) {
+ LOG.error("Failed to instantiate chain for shard {}", s, e);
+ es.add(e);
+ }
+ } else {
+ LOG.error("Unhandled shard instance type {}", s.getClass());
+ }
+ }
+ this.shardToChain = cb.build();
+
+ // An error was encountered, close chains and report the error
+ if (shardToChain.size() != shards.size()) {
+ for (DOMStoreTransactionChain c : shardToChain.values()) {
+ try {
+ c.close();
+ } catch (Exception e) {
+ LOG.warn("Exception raised while closing chain %s", c, e);
+ }
+ }
+
+ final IllegalStateException e = new IllegalStateException("Failed to completely allocate contexts", es.poll());
+ while (!es.isEmpty()) {
+ e.addSuppressed(es.poll());
+ }
+
+ throw e;
+ }
+
+ idToShard = ImmutableMap.copyOf(shardMap);
+ }
+
+ @Override
+ public synchronized DOMDataWriteTransaction createTransaction(final boolean isolated) {
+ Preconditions.checkState(!closed, "Producer is already closed");
+ Preconditions.checkState(openTx == null, "Transaction %s is still open", openTx);
+
+ // Allocate backing transactions
+ final Map<DOMDataTreeShard, DOMStoreWriteTransaction> shardToTx = new HashMap<>();
+ for (Entry<DOMDataTreeShard, DOMStoreTransactionChain> e : shardToChain.entrySet()) {
+ shardToTx.put(e.getKey(), e.getValue().newWriteOnlyTransaction());
+ }
+
+ // Create the ID->transaction map
+ final ImmutableMap.Builder<DOMDataTreeIdentifier, DOMStoreWriteTransaction> b = ImmutableMap.builder();
+ for (Entry<DOMDataTreeIdentifier, DOMDataTreeShard> e : idToShard.entrySet()) {
+ b.put(e.getKey(), shardToTx.get(e.getValue()));
+ }
+
+ final ShardedDOMDataWriteTransaction ret = new ShardedDOMDataWriteTransaction(this, b.build());
+ openTx = ret;
+ return ret;
+ }
+
+ @GuardedBy("this")
+ private boolean haveSubtree(final DOMDataTreeIdentifier subtree) {
+ for (DOMDataTreeIdentifier i : idToShard.keySet()) {
+ if (i.contains(subtree)) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ @GuardedBy("this")
+ private DOMDataTreeProducer lookupChild(final DOMDataTreeIdentifier s) {
+ for (Entry<DOMDataTreeIdentifier, DOMDataTreeProducer> e : children.entrySet()) {
+ if (e.getKey().contains(s)) {
+ return e.getValue();
+ }
+ }
+
+ return null;
+ }
+
+ @Override
+ public synchronized DOMDataTreeProducer createProducer(final Collection<DOMDataTreeIdentifier> subtrees) {
+ Preconditions.checkState(!closed, "Producer is already closed");
+ Preconditions.checkState(openTx == null, "Transaction %s is still open", openTx);
+
+ for (DOMDataTreeIdentifier s : subtrees) {
+ // Check if the subtree was visible at any time
+ if (!haveSubtree(s)) {
+ throw new IllegalArgumentException(String.format("Subtree %s was never available in producer %s", s, this));
+ }
+
+ // Check if the subtree has not been delegated to a child
+ final DOMDataTreeProducer child = lookupChild(s);
+ Preconditions.checkArgument(child == null, "Subtree %s is delegated to child producer %s", s, child);
+
+ // Check if part of the requested subtree is not delegated to a child.
+ for (DOMDataTreeIdentifier c : children.keySet()) {
+ if (s.contains(c)) {
+ throw new IllegalArgumentException(String.format("Subtree %s cannot be delegated as it is superset of already-delegated %s", s, c));
+ }
+ }
+ }
+
+ final DOMDataTreeProducer ret = dataTree.createProducer(this, subtrees);
+ final ImmutableMap.Builder<DOMDataTreeIdentifier, DOMDataTreeProducer> cb = ImmutableMap.builder();
+ cb.putAll(children);
+ for (DOMDataTreeIdentifier s : subtrees) {
+ cb.put(s, ret);
+ }
+
+ children = cb.build();
+ return ret;
+ }
+
+ @Override
+ public synchronized void close() throws DOMDataTreeProducerException {
+ if (!closed) {
+ if (openTx != null) {
+ throw new DOMDataTreeProducerBusyException(String.format("Transaction %s is still open", openTx));
+ }
+
+ closed = true;
+ dataTree.destroyProducer(this);
+ }
+ }
+
+ static DOMDataTreeProducer create(final ShardedDOMDataTree dataTree, final Map<DOMDataTreeIdentifier, DOMDataTreeShard> shardMap) {
+ /*
+ * FIXME: we do not allow multiple multiple shards in a producer because we do not implement the
+ * synchronization primitives yet
+ */
+ final Set<DOMDataTreeShard> shards = ImmutableSet.copyOf(shardMap.values());
+ if (shards.size() > 1) {
+ throw new UnsupportedOperationException("Cross-shard producers are not supported yet");
+ }
+
+ return new ShardedDOMDataTreeProducer(dataTree, shardMap, shards);
+ }
+
+ Set<DOMDataTreeIdentifier> getSubtrees() {
+ return idToShard.keySet();
+ }
+
+ synchronized void cancelTransaction(final ShardedDOMDataWriteTransaction transaction) {
+ if (!openTx.equals(transaction)) {
+ LOG.warn("Transaction {} is not open in producer {}", transaction, this);
+ return;
+ }
+
+ LOG.debug("Transaction {} cancelled", transaction);
+ openTx = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.concurrent.GuardedBy;
+import javax.annotation.concurrent.NotThreadSafe;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@NotThreadSafe
+final class ShardedDOMDataWriteTransaction implements DOMDataWriteTransaction {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardedDOMDataWriteTransaction.class);
+ private static final AtomicLong COUNTER = new AtomicLong();
+ private final Map<DOMDataTreeIdentifier, DOMStoreWriteTransaction> idToTransaction;
+ private final ShardedDOMDataTreeProducer producer;
+ private final String identifier;
+ @GuardedBy("this")
+ private boolean closed = false;
+
+ ShardedDOMDataWriteTransaction(final ShardedDOMDataTreeProducer producer, final Map<DOMDataTreeIdentifier, DOMStoreWriteTransaction> idToTransaction) {
+ this.producer = Preconditions.checkNotNull(producer);
+ this.idToTransaction = Preconditions.checkNotNull(idToTransaction);
+ this.identifier = "SHARDED-DOM-" + COUNTER.getAndIncrement();
+ }
+
+ // FIXME: use atomic operations
+ @GuardedBy("this")
+ private DOMStoreWriteTransaction lookup(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ final DOMDataTreeIdentifier id = new DOMDataTreeIdentifier(store, path);
+
+ for (Entry<DOMDataTreeIdentifier, DOMStoreWriteTransaction> e : idToTransaction.entrySet()) {
+ if (e.getKey().contains(id)) {
+ return e.getValue();
+ }
+ }
+
+ throw new IllegalArgumentException(String.format("Path %s is not acessible from transaction %s", id, this));
+ }
+
+ @Override
+ public String getIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public synchronized boolean cancel() {
+ if (closed) {
+ return false;
+ }
+
+ LOG.debug("Cancelling transaction {}", identifier);
+ for (DOMStoreWriteTransaction tx : ImmutableSet.copyOf(idToTransaction.values())) {
+ tx.close();
+ }
+
+ closed = true;
+ producer.cancelTransaction(this);
+ return true;
+ }
+
+ @Override
+ public synchronized CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ Preconditions.checkState(!closed, "Transaction %s is already closed", identifier);
+
+ final Set<DOMStoreWriteTransaction> txns = ImmutableSet.copyOf(idToTransaction.values());
+ final List<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
+ for (DOMStoreWriteTransaction tx : txns) {
+ cohorts.add(tx.ready());
+ }
+
+ try {
+ return Futures.immediateCheckedFuture(new CommitCoordinationTask(this, cohorts, null).call());
+ } catch (TransactionCommitFailedException e) {
+ return Futures.immediateFailedCheckedFuture(e);
+ }
+ }
+
+ @Override
+ @Deprecated
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
+ }
+
+ @Override
+ public synchronized void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ lookup(store, path).delete(path);
+ }
+
+ @Override
+ public synchronized void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ lookup(store, path).write(path, data);
+ }
+
+ @Override
+ public synchronized void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ lookup(store, path).merge(path, data);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class ShardingTableEntry implements Identifiable<PathArgument> {
+ private static final Logger LOG = LoggerFactory.getLogger(ShardingTableEntry.class);
+ private final Map<PathArgument, ShardingTableEntry> children = Collections.emptyMap();
+ private final PathArgument identifier;
+ private ShardRegistration<?> registration;
+
+ ShardingTableEntry() {
+ identifier = null;
+ }
+
+ ShardingTableEntry(final PathArgument identifier) {
+ this.identifier = Preconditions.checkNotNull(identifier);
+ }
+
+ @Override
+ public PathArgument getIdentifier() {
+ return identifier;
+ }
+
+ public ShardRegistration<?> getRegistration() {
+ return registration;
+ }
+
+ ShardingTableEntry lookup(final YangInstanceIdentifier id) {
+ final Iterator<PathArgument> it = id.getPathArguments().iterator();
+ ShardingTableEntry entry = this;
+
+ while (it.hasNext()) {
+ final PathArgument a = it.next();
+ final ShardingTableEntry child = entry.children.get(a);
+ if (child == null) {
+ LOG.debug("Lookup of {} stopped at {}", id, a);
+ break;
+ }
+
+ entry = child;
+ }
+
+ return entry;
+ }
+
+ void store(final YangInstanceIdentifier id, final ShardRegistration<?> reg) {
+ final Iterator<PathArgument> it = id.getPathArguments().iterator();
+ ShardingTableEntry entry = this;
+
+ while (it.hasNext()) {
+ final PathArgument a = it.next();
+ ShardingTableEntry child = entry.children.get(a);
+ if (child == null) {
+ child = new ShardingTableEntry(a);
+ entry.children.put(a, child);
+ }
+ }
+
+ Preconditions.checkState(entry.registration == null);
+ entry.registration = reg;
+ }
+
+ private boolean remove(final Iterator<PathArgument> it) {
+ if (it.hasNext()) {
+ final PathArgument arg = it.next();
+ final ShardingTableEntry child = children.get(arg);
+ if (child != null) {
+ if (child.remove(it)) {
+ children.remove(arg);
+ }
+ } else {
+ LOG.warn("Cannot remove non-existent child {}", arg);
+ }
+ }
+
+ return registration == null && children.isEmpty();
+ }
+
+ void remove(final YangInstanceIdentifier id) {
+ this.remove(id.getPathArguments().iterator());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import java.util.List;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+final class UnknownDOMRpcRoutingTableEntry extends AbstractDOMRpcRoutingTableEntry {
+ private final CheckedFuture<DOMRpcResult, DOMRpcException> unknownRpc;
+
+ UnknownDOMRpcRoutingTableEntry(final SchemaPath schemaPath, final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ super(schemaPath, impls);
+ unknownRpc = Futures.<DOMRpcResult, DOMRpcException>immediateFailedCheckedFuture(
+ new DOMRpcImplementationNotAvailableException("SchemaPath %s is not resolved to an RPC", schemaPath));
+ }
+
+ @Override
+ protected CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input) {
+ return unknownRpc;
+ }
+
+ @Override
+ protected UnknownDOMRpcRoutingTableEntry newInstance(final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
+ return new UnknownDOMRpcRoutingTableEntry(getSchemaPath(), impls);
+ }
+}
\ No newline at end of file
@Before
public void setupStore() {
- InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService());
schemaContext = TestModel.createTestContext();
operStore.onGlobalContextUpdated(schemaContext);
public void setupStore() {
InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
- MoreExecutors.sameThreadExecutor());
+ MoreExecutors.newDirectExecutorService());
InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
- MoreExecutors.sameThreadExecutor());
+ MoreExecutors.newDirectExecutorService());
schemaContext = TestModel.createTestContext();
operStore.onGlobalContextUpdated(schemaContext);
@Before
public void setupStore() {
- InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService());
schemaContext = TestModel.createTestContext();
operStore.onGlobalContextUpdated(schemaContext);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+
+/**
+ * Abstract base class for {@link DOMRpcImplementationRegistration} implementations.
+ */
+public abstract class AbstractDOMRpcImplementationRegistration<T extends DOMRpcImplementation> extends AbstractObjectRegistration<T> implements DOMRpcImplementationRegistration<T> {
+ protected AbstractDOMRpcImplementationRegistration(final T instance) {
+ super(instance);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ImmutableSet;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
+
+/**
+ * Convenience abstract base class for {@link DOMRpcProviderService} implementations.
+ */
+public abstract class AbstractDOMRpcProviderService implements DOMRpcProviderService {
+ @Override
+ public final <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(final T implementation, final DOMRpcIdentifier... types) {
+ return registerRpcImplementation(implementation, ImmutableSet.copyOf(types));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.annotations.Beta;
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Objects;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Utility class implementing {@link DefaultDOMRpcResult}.
+ */
+@Beta
+public final class DefaultDOMRpcResult implements DOMRpcResult, Immutable, Serializable {
+ private static final long serialVersionUID = 1L;
+ private final Collection<RpcError> errors;
+ private final NormalizedNode<?, ?> result;
+
+ private static Collection<RpcError> asCollection(final RpcError... errors) {
+ if (errors.length == 0) {
+ return Collections.emptyList();
+ } else {
+ return Arrays.asList(errors);
+ }
+ }
+
+ public DefaultDOMRpcResult(final NormalizedNode<?, ?> result, final RpcError... errors) {
+ this(result, asCollection(errors));
+ }
+
+ public DefaultDOMRpcResult(final RpcError... errors) {
+ this(null, asCollection(errors));
+ }
+
+ public DefaultDOMRpcResult(final NormalizedNode<?, ?> result) {
+ this(result, Collections.<RpcError>emptyList());
+ }
+
+ public DefaultDOMRpcResult(final NormalizedNode<?, ?> result, final @Nonnull Collection<RpcError> errors) {
+ this.result = result;
+ this.errors = Preconditions.checkNotNull(errors);
+ }
+
+ public DefaultDOMRpcResult(final @Nonnull Collection<RpcError> errors) {
+ this(null, errors);
+ }
+
+ @Override
+ public @Nonnull Collection<RpcError> getErrors() {
+ return errors;
+ }
+
+ @Override
+ public NormalizedNode<?, ?> getResult() {
+ return result;
+ }
+
+ @Override
+ public int hashCode() {
+ int ret = errors.hashCode();
+ if (result != null) {
+ ret = 31 * ret + result.hashCode();
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean equals(final Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof DefaultDOMRpcResult)) {
+ return false;
+ }
+
+ final DefaultDOMRpcResult other = (DefaultDOMRpcResult) obj;
+ if (!errors.equals(other.errors)) {
+ return false;
+ }
+ return Objects.equals(result, other.result);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.CheckedFuture;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Utility implementation which implements {@link DOMRpcImplementation} by forwarding it to
+ * a backing delegate.
+ */
+public abstract class ForwardingDOMRpcImplementation extends ForwardingObject implements DOMRpcImplementation {
+ @Override
+ protected abstract @Nonnull DOMRpcImplementation delegate();
+
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final DOMRpcIdentifier type, final NormalizedNode<?, ?> input) {
+ return delegate().invokeRpc(type, input);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import java.util.Set;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
+
+/**
+ * Utility class which implements {@link DOMRpcProviderService} by forwarding
+ * requests to a backing instance.
+ */
+public abstract class ForwardingDOMRpcProviderService extends ForwardingObject implements DOMRpcProviderService {
+ @Override
+ protected abstract @Nonnull DOMRpcProviderService delegate();
+
+ @Override
+ public <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(final T implementation, final DOMRpcIdentifier... types) {
+ return delegate().registerRpcImplementation(implementation, types);
+ }
+
+ @Override
+ public <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(final T implementation, final Set<DOMRpcIdentifier> types) {
+ return delegate().registerRpcImplementation(implementation, types);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import java.util.Collection;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Utility class which implements {@link DOMRpcResult} by forwarding all methods
+ * to a backing instance.
+ */
+public abstract class ForwardingDOMRpcResult extends ForwardingObject implements DOMRpcResult {
+ @Override
+ protected abstract @Nonnull DOMRpcResult delegate();
+
+ @Override
+ public Collection<RpcError> getErrors() {
+ return delegate().getErrors();
+ }
+
+ @Override
+ public NormalizedNode<?, ?> getResult() {
+ return delegate().getResult();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.CheckedFuture;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Utility {@link DOMRpcService} which forwards all requests to a backing delegate instance.
+ */
+public abstract class ForwardingDOMRpcService extends ForwardingObject implements DOMRpcService {
+ @Override
+ protected abstract @Nonnull DOMRpcService delegate();
+
+ @Override
+ public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type, final NormalizedNode<?, ?> input) {
+ return delegate().invokeRpc(type, input);
+ }
+
+ @Override
+ public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(final T listener) {
+ return delegate().registerRpcListener(listener);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Interface implemented by DOMStore implementations which allow registration
+ * of {@link DOMDataTreeChangeListener} instances.
+ */
+public interface DOMStoreTreeChangePublisher {
+ /**
+ * Registers a {@link DOMDataTreeChangeListener} to receive
+ * notifications when data changes under a given path in the conceptual data
+ * tree.
+ * <p>
+ * You are able to register for notifications for any node or subtree
+ * which can be represented using {@link YangInstanceIdentifier}.
+ * <p>
+ *
+ * You are able to register for data change notifications for a subtree or leaf
+ * even if it does not exist. You will receive notification once that node is
+ * created.
+ * <p>
+ * If there is any pre-existing data in data tree on path for which you are
+ * registering, you will receive initial data change event, which will
+ * contain all pre-existing data, marked as created.
+ *
+ * <p>
+ * This method returns a {@link ListenerRegistration} object. To
+ * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
+ * method on this returned object.
+ * <p>
+ * You MUST explicitly unregister your listener when you no longer want to receive
+ * notifications. This is especially true in OSGi environments, where failure to
+ * do so during bundle shutdown can lead to stale listeners being still registered.
+ *
+ * @param treeId
+ * Data tree identifier of the subtree which should be watched for
+ * changes.
+ * @param listener
+ * Listener instance which is being registered
+ * @return Listener registration object, which may be used to unregister
+ * your listener using {@link ListenerRegistration#close()} to stop
+ * delivery of change events.
+ */
+ @Nonnull <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(@Nonnull YangInstanceIdentifier treeId, @Nonnull L listener);
+}
<groupId>org.osgi</groupId>
<artifactId>org.osgi.core</artifactId>
</dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
</dependencies>
<build>
<plugins>
<version>1.2.0-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-distributed-datastore</artifactId>
+ </dependency>
<!-- Test Dependencies -->
<dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>1.7.7</version>
</dependency>
</dependencies>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
- <version>1.5</version>
<executions>
<execution>
<phase>package</phase>
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.japi.Creator;
+import com.google.common.base.Stopwatch;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
private final Configuration configuration;
private final String followerId;
private final Logger LOG = LoggerFactory.getLogger(DummyShard.class);
+ private long lastMessageIndex = -1;
+ private long lastMessageSize = 0;
+ private Stopwatch appendEntriesWatch;
public DummyShard(Configuration configuration, String followerId) {
this.configuration = configuration;
}
protected void handleAppendEntries(AppendEntries req) throws InterruptedException {
- LOG.info("{} - Received AppendEntries message : leader term, index, size = {}, {}, {}", followerId, req.getTerm(),req.getLeaderCommit(), req.getEntries().size());
+ LOG.info("{} - Received AppendEntries message : leader term = {}, index = {}, prevLogIndex = {}, size = {}",
+ followerId, req.getTerm(),req.getLeaderCommit(), req.getPrevLogIndex(), req.getEntries().size());
+
+ if(appendEntriesWatch != null){
+ long elapsed = appendEntriesWatch.elapsed(TimeUnit.SECONDS);
+ if(elapsed >= 5){
+ LOG.error("More than 5 seconds since last append entry, elapsed Time = {} seconds" +
+ ", leaderCommit = {}, prevLogIndex = {}, size = {}",
+ elapsed, req.getLeaderCommit(), req.getPrevLogIndex(), req.getEntries().size());
+ }
+ appendEntriesWatch.reset().start();
+ } else {
+ appendEntriesWatch = Stopwatch.createStarted();
+ }
+
+ if(lastMessageIndex == req.getLeaderCommit() && req.getEntries().size() > 0 && lastMessageSize > 0){
+ LOG.error("{} - Duplicate message with leaderCommit = {} prevLogIndex = {} received", followerId, req.getLeaderCommit(), req.getPrevLogIndex());
+ }
+
+ lastMessageIndex = req.getLeaderCommit();
+ lastMessageSize = req.getEntries().size();
+
long lastIndex = req.getLeaderCommit();
- if (req.getEntries().size() > 0)
- lastIndex = req.getEntries().get(0).getIndex();
+ if (req.getEntries().size() > 0) {
+ for(ReplicatedLogEntry entry : req.getEntries()) {
+ lastIndex = entry.getIndex();
+ }
+ }
- if (configuration.shouldCauseTrouble()) {
+ if (configuration.shouldCauseTrouble() && req.getEntries().size() > 0) {
boolean ignore = false;
if (configuration.shouldDropReplies()) {
--- /dev/null
+org.slf4j.simpleLogger.showDateTime=true
+org.slf4j.simpleLogger.dateTimeFormat=hh:mm:ss,S a
+org.slf4j.simpleLogger.logFile=System.out
+org.slf4j.simpleLogger.showShortLogName=true
+org.slf4j.simpleLogger.levelInBrackets=true
+org.slf4j.simpleLogger.defaultLogLevel=error
\ No newline at end of file
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import com.google.common.base.Objects;
-import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Preconditions;
-
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.slf4j.Logger;
@Override
public final String toString() {
- return addToStringAttributes(Objects.toStringHelper(this)).toString();
+ return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
}
/**
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
-import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerWalker;
import org.opendaylight.yangtools.util.concurrent.NotificationManager;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
* Resolves and submits notification tasks to the specified manager.
*/
public synchronized void resolve(final NotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> manager) {
- try (final Walker w = listenerRoot.getWalker()) {
+ try (final ListenerWalker w = listenerRoot.getWalker()) {
// Defensive: reset internal state
collectedEvents = ArrayListMultimap.create();
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Multimap;
-
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
-import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Node;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Multimap;
+
/**
* Recursion state used in {@link ResolveDataChangeEventsTask}. Instances of this
* method track which listeners are affected by a particular change node. It takes
*/
private final Collection<Builder> inheritedOne;
private final YangInstanceIdentifier nodeId;
- private final Collection<Node> nodes;
+ private final Collection<ListenerNode> nodes;
private final Map<DataChangeListenerRegistration<?>, Builder> subBuilders;
private final Map<DataChangeListenerRegistration<?>, Builder> oneBuilders;
private ResolveDataChangeState(final YangInstanceIdentifier nodeId,
final Iterable<Builder> inheritedSub, final Collection<Builder> inheritedOne,
- final Collection<Node> nodes) {
+ final Collection<ListenerNode> nodes) {
this.nodeId = Preconditions.checkNotNull(nodeId);
this.nodes = Preconditions.checkNotNull(nodes);
this.inheritedSub = Preconditions.checkNotNull(inheritedSub);
final Map<DataChangeListenerRegistration<?>, Builder> sub = new HashMap<>();
final Map<DataChangeListenerRegistration<?>, Builder> one = new HashMap<>();
final Map<DataChangeListenerRegistration<?>, Builder> base = new HashMap<>();
- for (Node n : nodes) {
+ for (ListenerNode n : nodes) {
for (DataChangeListenerRegistration<?> l : n.getListeners()) {
final Builder b = DOMImmutableDataChangeEvent.builder(DataChangeScope.BASE);
switch (l.getScope()) {
* @param root root node
* @return
*/
- public static ResolveDataChangeState initial(final YangInstanceIdentifier rootId, final Node root) {
+ public static ResolveDataChangeState initial(final YangInstanceIdentifier rootId, final ListenerNode root) {
return new ResolveDataChangeState(rootId, Collections.<Builder>emptyList(),
Collections.<Builder>emptyList(), Collections.singletonList(root));
}
LOG.trace("Collected events {}", map);
}
- private static Collection<Node> getListenerChildrenWildcarded(final Collection<Node> parentNodes,
+ private static Collection<ListenerNode> getListenerChildrenWildcarded(final Collection<ListenerNode> parentNodes,
final PathArgument child) {
if (parentNodes.isEmpty()) {
return Collections.emptyList();
}
- final List<Node> result = new ArrayList<>();
+ final List<ListenerNode> result = new ArrayList<>();
if (child instanceof NodeWithValue || child instanceof NodeIdentifierWithPredicates) {
NodeIdentifier wildcardedIdentifier = new NodeIdentifier(child.getNodeType());
addChildNodes(result, parentNodes, wildcardedIdentifier);
return result;
}
- private static void addChildNodes(final List<Node> result, final Collection<Node> parentNodes, final PathArgument childIdentifier) {
- for (Node node : parentNodes) {
- Optional<Node> child = node.getChild(childIdentifier);
+ private static void addChildNodes(final List<ListenerNode> result, final Collection<ListenerNode> parentNodes, final PathArgument childIdentifier) {
+ for (ListenerNode node : parentNodes) {
+ Optional<ListenerNode> child = node.getChild(childIdentifier);
if (child.isPresent()) {
result.add(child.get());
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.MoreObjects.ToStringHelper;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.impl.tree;
+
+import com.google.common.base.Optional;
+import java.lang.ref.Reference;
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
+import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.DataChangeListenerRegistrationImpl;
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.StoreTreeNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is a single node within the listener tree. Note that the data returned from
+ * and instance of this class is guaranteed to have any relevance or consistency
+ * only as long as the {@link ListenerWalker} instance through which it is reached remains
+ * unclosed.
+ *
+ * @author Robert Varga
+ */
+public class ListenerNode implements StoreTreeNode<ListenerNode>, Identifiable<PathArgument> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ListenerNode.class);
+
+ private final Collection<DataChangeListenerRegistration<?>> listeners = new ArrayList<>();
+ private final Map<PathArgument, ListenerNode> children = new HashMap<>();
+ private final PathArgument identifier;
+ private final Reference<ListenerNode> parent;
+
+ ListenerNode(final ListenerNode parent, final PathArgument identifier) {
+ this.parent = new WeakReference<>(parent);
+ this.identifier = identifier;
+ }
+
+ @Override
+ public PathArgument getIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public Optional<ListenerNode> getChild(final PathArgument child) {
+ return Optional.fromNullable(children.get(child));
+ }
+
+ /**
+ * Return the list of current listeners. This collection is guaranteed
+ * to be immutable only while the walker, through which this node is
+ * reachable remains unclosed.
+ *
+ * @return the list of current listeners
+ */
+ public Collection<DataChangeListenerRegistration<?>> getListeners() {
+ return listeners;
+ }
+
+ ListenerNode ensureChild(final PathArgument child) {
+ ListenerNode potential = children.get(child);
+ if (potential == null) {
+ potential = new ListenerNode(this, child);
+ children.put(child, potential);
+ }
+ return potential;
+ }
+
+ void addListener(final DataChangeListenerRegistration<?> listener) {
+ listeners.add(listener);
+ LOG.debug("Listener {} registered", listener);
+ }
+
+ void removeListener(final DataChangeListenerRegistrationImpl<?> listener) {
+ listeners.remove(listener);
+ LOG.debug("Listener {} unregistered", listener);
+
+ // We have been called with the write-lock held, so we can perform some cleanup.
+ removeThisIfUnused();
+ }
+
+ private void removeThisIfUnused() {
+ final ListenerNode p = parent.get();
+ if (p != null && listeners.isEmpty() && children.isEmpty()) {
+ p.removeChild(identifier);
+ }
+ }
+
+ private void removeChild(final PathArgument arg) {
+ children.remove(arg);
+ removeThisIfUnused();
+ }
+
+ @Override
+ public String toString() {
+ return "Node [identifier=" + identifier + ", listeners=" + listeners.size() + ", children=" + children.size() + "]";
+ }
+}
*/
package org.opendaylight.controller.md.sal.dom.store.impl.tree;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
-import java.lang.ref.Reference;
-import java.lang.ref.WeakReference;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.dom.store.impl.DataChangeListenerRegistration;
import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.StoreTreeNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A set of listeners organized as a tree by node to which they listen. This class
* allows for efficient lookup of listeners when we walk the DataTreeCandidate.
+ *
+ * @author Robert Varga
*/
public final class ListenerTree {
private static final Logger LOG = LoggerFactory.getLogger(ListenerTree.class);
private final ReadWriteLock rwLock = new ReentrantReadWriteLock(true);
- private final Node rootNode = new Node(null, null);
+ private final ListenerNode rootNode = new ListenerNode(null, null);
private ListenerTree() {
// Private to disallow direct instantiation
rwLock.writeLock().lock();
try {
- Node walkNode = rootNode;
+ ListenerNode walkNode = rootNode;
for (final PathArgument arg : path.getPathArguments()) {
walkNode = walkNode.ensureChild(arg);
}
- final Node node = walkNode;
+ final ListenerNode node = walkNode;
DataChangeListenerRegistration<L> reg = new DataChangeListenerRegistrationImpl<L>(listener) {
@Override
public DataChangeScope getScope() {
*
* @return A walker instance.
*/
- public Walker getWalker() {
+ public ListenerWalker getWalker() {
/*
* TODO: The only current user of this method is local to the datastore.
* Since this class represents a read-lock, losing a reference to
* external user exist, make the Walker a phantom reference, which
* will cleanup the lock if not told to do so.
*/
- final Walker ret = new Walker(rwLock.readLock(), rootNode);
+ final ListenerWalker ret = new ListenerWalker(rwLock.readLock(), rootNode);
rwLock.readLock().lock();
return ret;
}
- /**
- * A walking context, pretty much equivalent to an iterator, but it
- * exposes the underlying tree structure.
- */
- /*
- * FIXME: BUG-1511: split this class out as ListenerWalker.
- */
- public static final class Walker implements AutoCloseable {
- private final Lock lock;
- private final Node node;
-
- @GuardedBy("this")
- private boolean valid = true;
-
- private Walker(final Lock lock, final Node node) {
- this.lock = Preconditions.checkNotNull(lock);
- this.node = Preconditions.checkNotNull(node);
- }
-
- public Node getRootNode() {
- return node;
- }
-
- @Override
- public synchronized void close() {
- if (valid) {
- lock.unlock();
- valid = false;
- }
- }
- }
-
- /**
- * This is a single node within the listener tree. Note that the data returned from
- * and instance of this class is guaranteed to have any relevance or consistency
- * only as long as the {@link org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker} instance through which it is reached remains
- * unclosed.
- */
- /*
- * FIXME: BUG-1511: split this class out as ListenerNode.
- */
- public static final class Node implements StoreTreeNode<Node>, Identifiable<PathArgument> {
- private final Collection<DataChangeListenerRegistration<?>> listeners = new ArrayList<>();
- private final Map<PathArgument, Node> children = new HashMap<>();
- private final PathArgument identifier;
- private final Reference<Node> parent;
-
- private Node(final Node parent, final PathArgument identifier) {
- this.parent = new WeakReference<>(parent);
- this.identifier = identifier;
- }
-
- @Override
- public PathArgument getIdentifier() {
- return identifier;
- }
-
- @Override
- public Optional<Node> getChild(final PathArgument child) {
- return Optional.fromNullable(children.get(child));
- }
-
- /**
- * Return the list of current listeners. This collection is guaranteed
- * to be immutable only while the walker, through which this node is
- * reachable remains unclosed.
- *
- * @return the list of current listeners
- */
- public Collection<DataChangeListenerRegistration<?>> getListeners() {
- return listeners;
- }
-
- private Node ensureChild(final PathArgument child) {
- Node potential = children.get(child);
- if (potential == null) {
- potential = new Node(this, child);
- children.put(child, potential);
- }
- return potential;
- }
-
- private void addListener(final DataChangeListenerRegistration<?> listener) {
- listeners.add(listener);
- LOG.debug("Listener {} registered", listener);
- }
-
- private void removeListener(final DataChangeListenerRegistrationImpl<?> listener) {
- listeners.remove(listener);
- LOG.debug("Listener {} unregistered", listener);
-
- // We have been called with the write-lock held, so we can perform some cleanup.
- removeThisIfUnused();
- }
-
- private void removeThisIfUnused() {
- final Node p = parent.get();
- if (p != null && listeners.isEmpty() && children.isEmpty()) {
- p.removeChild(identifier);
- }
- }
-
- private void removeChild(final PathArgument arg) {
- children.remove(arg);
- removeThisIfUnused();
- }
-
- @Override
- public String toString() {
- return "Node [identifier=" + identifier + ", listeners=" + listeners.size() + ", children=" + children.size() + "]";
- }
-
-
- }
-
- private abstract static class DataChangeListenerRegistrationImpl<T extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> extends AbstractListenerRegistration<T> //
+ abstract static class DataChangeListenerRegistrationImpl<T extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> extends AbstractListenerRegistration<T> //
implements DataChangeListenerRegistration<T> {
public DataChangeListenerRegistrationImpl(final T listener) {
super(listener);
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.impl.tree;
+
+import com.google.common.base.Preconditions;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.locks.Lock;
+
+/**
+ * A walking context, pretty much equivalent to an iterator, but it
+ * exposes the underlying tree structure.
+ *
+ * @author Robert Varga
+ */
+public class ListenerWalker implements AutoCloseable {
+ private static final AtomicIntegerFieldUpdater<ListenerWalker> CLOSED_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ListenerWalker.class, "closed");
+ private final Lock lock;
+ private final ListenerNode node;
+
+ // Used via CLOSED_UPDATER
+ @SuppressWarnings("unused")
+ private volatile int closed = 0;
+
+ ListenerWalker(final Lock lock, final ListenerNode node) {
+ this.lock = Preconditions.checkNotNull(lock);
+ this.node = Preconditions.checkNotNull(node);
+ }
+
+ public ListenerNode getRootNode() {
+ return node;
+ }
+
+ @Override
+ public void close() {
+ if (CLOSED_UPDATER.compareAndSet(this, 0, 1)) {
+ lock.unlock();
+ }
+ }
+}
\ No newline at end of file
@Before
public void setupStore() {
- domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor());
+ domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.newDirectExecutorService());
schemaContext = TestModel.createTestContext();
domStore.onGlobalContextUpdated(schemaContext);
}
@Before
public void setupStore() {
- domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor());
+ domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.newDirectExecutorService());
loadSchemas(RockTheHouseInput.class);
}
package org.opendaylight.controller.md.sal.dom.store.impl;
-import java.util.concurrent.ExecutorService;
-
import com.google.common.util.concurrent.ForwardingExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
/**
* A forwarding Executor used by unit tests for DataChangeListener notifications
public class TestDCLExecutorService extends ForwardingExecutorService {
// Start with a same thread executor to avoid timing issues during test setup.
- private volatile ExecutorService currentExecutor = MoreExecutors.sameThreadExecutor();
+ private volatile ExecutorService currentExecutor = MoreExecutors.newDirectExecutorService();
// The real executor to use when test setup is complete.
private final ExecutorService postSetupExecutor;
- public TestDCLExecutorService( ExecutorService postSetupExecutor ) {
+ public TestDCLExecutorService( final ExecutorService postSetupExecutor ) {
this.postSetupExecutor = postSetupExecutor;
}
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-client</artifactId>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-inventory</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-topology</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-broker-impl</artifactId>
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkCondition;
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkNotNull;
+
import com.google.common.base.Optional;
import io.netty.util.concurrent.EventExecutor;
import java.math.BigDecimal;
import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceSalFacade;
import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final Logger logger = LoggerFactory.getLogger(NetconfConnectorModule.class);
private BundleContext bundleContext;
- private Optional<NetconfSessionCapabilities> userCapabilities;
+ private Optional<NetconfSessionPreferences> userCapabilities;
private SchemaSourceRegistry schemaRegistry;
private SchemaContextFactory schemaContextFactory;
}
userCapabilities = getUserCapabilities();
-
}
private boolean isHostAddressPresent(final Host address) {
@Override
public java.lang.AutoCloseable createInstance() {
- final RemoteDeviceId id = new RemoteDeviceId(getIdentifier());
+ final RemoteDeviceId id = new RemoteDeviceId(getIdentifier(), getSocketAddress());
final ExecutorService globalProcessingExecutor = getProcessingExecutorDependency().getExecutor();
final Broker domBroker = getDomRegistryDependency();
final BindingAwareBroker bindingBroker = getBindingRegistryDependency();
- final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade
+ final RemoteDeviceHandler<NetconfSessionPreferences> salFacade
= new NetconfDeviceSalFacade(id, domBroker, bindingBroker, bundleContext, globalProcessingExecutor);
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO =
new NetconfDevice.SchemaResourcesDTO(schemaRegistry, schemaContextFactory, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
final NetconfDevice device =
- new NetconfDevice(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, new NetconfMessageTransformer());
+ new NetconfDevice(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, new NetconfMessageTransformer(), getReconnectOnChangedSchema());
final NetconfDeviceCommunicator listener = userCapabilities.isPresent() ?
new NetconfDeviceCommunicator(id, device, userCapabilities.get()) : new NetconfDeviceCommunicator(id, device);
final NetconfReconnectingClientConfiguration clientConfig = getClientConfig(listener);
-
final NetconfClientDispatcher dispatcher = getClientDispatcherDependency();
+
listener.initializeRemoteConnection(dispatcher, clientConfig);
- return new MyAutoCloseable(listener, salFacade);
+ return new SalConnectorCloseable(listener, salFacade);
}
- private Optional<NetconfSessionCapabilities> getUserCapabilities() {
+ private Optional<NetconfSessionPreferences> getUserCapabilities() {
if(getYangModuleCapabilities() == null) {
return Optional.absent();
}
return Optional.absent();
}
- final NetconfSessionCapabilities parsedOverrideCapabilities = NetconfSessionCapabilities.fromStrings(capabilities);
+ final NetconfSessionPreferences parsedOverrideCapabilities = NetconfSessionPreferences.fromStrings(capabilities);
JmxAttributeValidationException.checkCondition(
parsedOverrideCapabilities.getNonModuleCaps().isEmpty(),
"Capabilities to override can only contain module based capabilities, non-module capabilities will be retrieved from the device," +
final InetSocketAddress socketAddress = getSocketAddress();
final long clientConnectionTimeoutMillis = getConnectionTimeoutMillis();
- final ReconnectStrategyFactory sf = new MyReconnectStrategyFactory(
+ final ReconnectStrategyFactory sf = new TimedReconnectStrategyFactory(
getEventExecutorDependency(), getMaxConnectionAttempts(), getBetweenAttemptsTimeoutMillis(), getSleepFactor());
final ReconnectStrategy strategy = sf.createReconnectStrategy();
.withAddress(socketAddress)
.withConnectionTimeoutMillis(clientConnectionTimeoutMillis)
.withReconnectStrategy(strategy)
- .withSessionListener(listener)
.withAuthHandler(new LoginPassword(getUsername(),getPassword()))
.withProtocol(getTcpOnly() ?
NetconfClientConfiguration.NetconfClientProtocol.TCP :
NetconfClientConfiguration.NetconfClientProtocol.SSH)
.withConnectStrategyFactory(sf)
+ .withSessionListener(listener)
.build();
}
- private static final class MyAutoCloseable implements AutoCloseable {
- private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
+ private static final class SalConnectorCloseable implements AutoCloseable {
+ private final RemoteDeviceHandler<NetconfSessionPreferences> salFacade;
private final NetconfDeviceCommunicator listener;
- public MyAutoCloseable(final NetconfDeviceCommunicator listener,
- final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade) {
+ public SalConnectorCloseable(final NetconfDeviceCommunicator listener,
+ final RemoteDeviceHandler<NetconfSessionPreferences> salFacade) {
this.listener = listener;
this.salFacade = salFacade;
}
}
}
- private static final class MyReconnectStrategyFactory implements ReconnectStrategyFactory {
+ private static final class TimedReconnectStrategyFactory implements ReconnectStrategyFactory {
private final Long connectionAttempts;
private final EventExecutor executor;
private final double sleepFactor;
private final int minSleep;
- MyReconnectStrategyFactory(final EventExecutor executor, final Long maxConnectionAttempts, final int minSleep, final BigDecimal sleepFactor) {
+ TimedReconnectStrategyFactory(final EventExecutor executor, final Long maxConnectionAttempts, final int minSleep, final BigDecimal sleepFactor) {
if (maxConnectionAttempts != null && maxConnectionAttempts > 0) {
connectionAttempts = maxConnectionAttempts;
} else {
/**
*
*/
-public interface RemoteDevice<PREF, M> {
+public interface RemoteDevice<PREF, M, LISTENER extends RemoteDeviceCommunicator<M>> {
- void onRemoteSessionUp(PREF remoteSessionCapabilities, RemoteDeviceCommunicator<M> listener);
+ void onRemoteSessionUp(PREF remoteSessionCapabilities, LISTENER listener);
void onRemoteSessionDown();
+ void onRemoteSessionFailed(Throwable throwable);
+
void onNotification(M notification);
}
void onDeviceDisconnected();
+ void onDeviceFailed(Throwable throwable);
+
void onNotification(CompositeNode domNotification);
void close();
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collection;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.schema.NetconfRemoteSchemaYangSourceProvider;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.repo.api.MissingSchemaSourceException;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
/**
* This is a mediator between NetconfDeviceCommunicator and NetconfDeviceSalFacade
*/
-public final class NetconfDevice implements RemoteDevice<NetconfSessionCapabilities, NetconfMessage> {
+public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> {
private static final Logger logger = LoggerFactory.getLogger(NetconfDevice.class);
};
private final RemoteDeviceId id;
+ private final boolean reconnectOnSchemasChange;
private final SchemaContextFactory schemaContextFactory;
- private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
+ private final RemoteDeviceHandler<NetconfSessionPreferences> salFacade;
private final ListeningExecutorService processingExecutor;
private final SchemaSourceRegistry schemaRegistry;
private final MessageTransformer<NetconfMessage> messageTransformer;
private final NotificationHandler notificationHandler;
private final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations = Lists.newArrayList();
- public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
+ public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionPreferences> salFacade,
final ExecutorService globalProcessingExecutor, final MessageTransformer<NetconfMessage> messageTransformer) {
+ this(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, messageTransformer, false);
+ }
+
+ // FIXME reduce parameters
+ public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionPreferences> salFacade,
+ final ExecutorService globalProcessingExecutor, final MessageTransformer<NetconfMessage> messageTransformer, final boolean reconnectOnSchemasChange) {
this.id = id;
+ this.reconnectOnSchemasChange = reconnectOnSchemasChange;
this.schemaRegistry = schemaResourcesDTO.getSchemaRegistry();
this.messageTransformer = messageTransformer;
this.schemaContextFactory = schemaResourcesDTO.getSchemaContextFactory();
}
@Override
- public void onRemoteSessionUp(final NetconfSessionCapabilities remoteSessionCapabilities,
- final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ public void onRemoteSessionUp(final NetconfSessionPreferences remoteSessionCapabilities,
+ final NetconfDeviceCommunicator listener) {
// SchemaContext setup has to be performed in a dedicated thread since
// we are in a netty thread in this method
// Yang models are being downloaded in this method and it would cause a
final DeviceSourcesResolver task = new DeviceSourcesResolver(deviceRpc, remoteSessionCapabilities, id, stateSchemasResolver);
final ListenableFuture<DeviceSources> sourceResolverFuture = processingExecutor.submit(task);
+ if(shouldListenOnSchemaChange(remoteSessionCapabilities)) {
+ registerToBaseNetconfStream(deviceRpc, listener);
+ }
+
final FutureCallback<DeviceSources> resolvedSourceCallback = new FutureCallback<DeviceSources>() {
@Override
public void onSuccess(final DeviceSources result) {
};
Futures.addCallback(sourceResolverFuture, resolvedSourceCallback);
+
+ }
+
+ private void registerToBaseNetconfStream(final NetconfDeviceRpc deviceRpc, final NetconfDeviceCommunicator listener) {
+ final ListenableFuture<RpcResult<CompositeNode>> rpcResultListenableFuture =
+ deviceRpc.invokeRpc(NetconfMessageTransformUtil.CREATE_SUBSCRIPTION_RPC_QNAME, NetconfMessageTransformUtil.CREATE_SUBSCRIPTION_RPC_CONTENT);
+
+ final NotificationHandler.NotificationFilter filter = new NotificationHandler.NotificationFilter() {
+ @Override
+ public Optional<CompositeNode> filterNotification(final CompositeNode notification) {
+ if (isCapabilityChanged(notification)) {
+ logger.info("{}: Schemas change detected, reconnecting", id);
+ // Only disconnect is enough, the reconnecting nature of the connector will take care of reconnecting
+ listener.disconnect();
+ return Optional.absent();
+ }
+ return Optional.of(notification);
+ }
+
+ private boolean isCapabilityChanged(final CompositeNode notification) {
+ return notification.getNodeType().equals(NetconfCapabilityChange.QNAME);
+ }
+ };
+
+ Futures.addCallback(rpcResultListenableFuture, new FutureCallback<RpcResult<CompositeNode>>() {
+ @Override
+ public void onSuccess(final RpcResult<CompositeNode> result) {
+ notificationHandler.addNotificationFilter(filter);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ logger.warn("Unable to subscribe to base notification stream. Schemas will not be reloaded on the fly", t);
+ }
+ });
+ }
+
+ private boolean shouldListenOnSchemaChange(final NetconfSessionPreferences remoteSessionCapabilities) {
+ return remoteSessionCapabilities.isNotificationsSupported() && reconnectOnSchemasChange;
}
- private void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc) {
+ private void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionPreferences remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc) {
updateMessageTransformer(result);
salFacade.onDeviceConnected(result, remoteSessionCapabilities, deviceRpc);
notificationHandler.onRemoteSchemaUp();
- logger.debug("{}: Initialization in sal successful", id);
logger.info("{}: Netconf connector initialized successfully", id);
}
/**
* Update initial message transformer to use retrieved schema
- * @param currentSchemaContext
*/
private void updateMessageTransformer(final SchemaContext currentSchemaContext) {
messageTransformer.onGlobalContextUpdated(currentSchemaContext);
@Override
public void onRemoteSessionDown() {
+ notificationHandler.onRemoteSchemaDown();
+
salFacade.onDeviceDisconnected();
for (final SchemaSourceRegistration<? extends SchemaSourceRepresentation> sourceRegistration : sourceRegistrations) {
sourceRegistration.close();
resetMessageTransformer();
}
+ @Override
+ public void onRemoteSessionFailed(Throwable throwable) {
+ salFacade.onDeviceFailed(throwable);
+ }
+
@Override
public void onNotification(final NetconfMessage notification) {
notificationHandler.handleNotification(notification);
*/
private static class DeviceSourcesResolver implements Callable<DeviceSources> {
private final NetconfDeviceRpc deviceRpc;
- private final NetconfSessionCapabilities remoteSessionCapabilities;
+ private final NetconfSessionPreferences remoteSessionCapabilities;
private final RemoteDeviceId id;
private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
- public DeviceSourcesResolver(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+ public DeviceSourcesResolver(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
this.deviceRpc = deviceRpc;
this.remoteSessionCapabilities = remoteSessionCapabilities;
this.id = id;
logger.warn("{}: Netconf device provides additional yang models not reported in hello message capabilities: {}",
id, providedSourcesNotRequired);
logger.warn("{}: Adding provided but not required sources as required to prevent failures", id);
+ logger.debug("{}: Netconf device reported in hello: {}", id, requiredSources);
requiredSources.addAll(providedSourcesNotRequired);
}
*/
private final class RecursiveSchemaSetup implements Runnable {
private final DeviceSources deviceSources;
- private final NetconfSessionCapabilities remoteSessionCapabilities;
+ private final NetconfSessionPreferences remoteSessionCapabilities;
private final NetconfDeviceRpc deviceRpc;
private final RemoteDeviceCommunicator<NetconfMessage> listener;
+ private NetconfDeviceCapabilities capabilities;
- public RecursiveSchemaSetup(final DeviceSources deviceSources, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ public RecursiveSchemaSetup(final DeviceSources deviceSources, final NetconfSessionPreferences remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc, final RemoteDeviceCommunicator<NetconfMessage> listener) {
this.deviceSources = deviceSources;
this.remoteSessionCapabilities = remoteSessionCapabilities;
this.deviceRpc = deviceRpc;
this.listener = listener;
+ this.capabilities = remoteSessionCapabilities.getNetconfDeviceCapabilities();
}
@Override
/**
* Recursively build schema context, in case of success or final failure notify device
*/
+ // FIXME reimplement without recursion
private void setUpSchema(final Collection<SourceIdentifier> requiredSources) {
logger.trace("{}: Trying to build schema context from {}", id, requiredSources);
@Override
public void onSuccess(final SchemaContext result) {
logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
+ Collection<QName> filteredQNames = Sets.difference(remoteSessionCapabilities.getModuleBasedCaps(), capabilities.getUnresolvedCapabilites().keySet());
+ capabilities.addCapabilities(filteredQNames);
+ capabilities.addNonModuleBasedCapabilities(remoteSessionCapabilities.getNonModuleCaps());
handleSalInitializationSuccess(result, remoteSessionCapabilities, deviceRpc);
}
if (t instanceof MissingSchemaSourceException) {
final SourceIdentifier missingSource = ((MissingSchemaSourceException) t).getSourceId();
logger.warn("{}: Unable to build schema context, missing source {}, will reattempt without it", id, missingSource);
+ capabilities.addUnresolvedCapabilities(getQNameFromSourceIdentifiers(Sets.newHashSet(missingSource)), UnavailableCapability.FailureReason.MissingSource);
setUpSchema(stripMissingSource(requiredSources, missingSource));
// In case resolution error, try only with resolved sources
} else if (t instanceof SchemaResolutionException) {
// TODO check for infinite loop
final SchemaResolutionException resolutionException = (SchemaResolutionException) t;
+ final Set<SourceIdentifier> unresolvedSources = resolutionException.getUnsatisfiedImports().keySet();
+ capabilities.addUnresolvedCapabilities(getQNameFromSourceIdentifiers(unresolvedSources), UnavailableCapability.FailureReason.UnableToResolve);
logger.warn("{}: Unable to build schema context, unsatisfied imports {}, will reattempt with resolved only", id, resolutionException.getUnsatisfiedImports());
setUpSchema(resolutionException.getResolvedSources());
// unknown error, fail
Preconditions.checkState(removed, "{}: Trying to remove {} from {} failed", id, sIdToRemove, requiredSources);
return sourceIdentifiers;
}
+
+ private Collection<QName> getQNameFromSourceIdentifiers(Collection<SourceIdentifier> identifiers) {
+ Collection<QName> qNames = new HashSet<>();
+ for (SourceIdentifier source : identifiers) {
+ Optional<QName> qname = getQNameFromSourceIdentifier(source);
+ if (qname.isPresent()) {
+ qNames.add(qname.get());
+ }
+ }
+ if (qNames.isEmpty()) {
+ logger.debug("Unable to map any source identfiers to a capability reported by device : " + identifiers);
+ }
+ return qNames;
+ }
+
+ private Optional<QName> getQNameFromSourceIdentifier(SourceIdentifier identifier) {
+ for (QName qname : remoteSessionCapabilities.getModuleBasedCaps()) {
+ if (qname.getLocalName().equals(identifier.getName())
+ && qname.getFormattedRevision().equals(identifier.getRevision())) {
+ return Optional.of(qname);
+ }
+ }
+ throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier);
+ }
}
}
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ExecutionException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
* Factory for NetconfStateSchemas
*/
public interface NetconfStateSchemasResolver {
- NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id);
+ NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id);
}
/**
public static final class NetconfStateSchemasResolverImpl implements NetconfStateSchemasResolver {
@Override
- public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
+ public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
return NetconfStateSchemas.create(deviceRpc, remoteSessionCapabilities, id);
}
}
/**
* Issue get request to remote device and parse response to find all schemas under netconf-state/schemas
*/
- private static NetconfStateSchemas create(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
+ private static NetconfStateSchemas create(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
if(remoteSessionCapabilities.isMonitoringSupported() == false) {
logger.warn("{}: Netconf monitoring not supported on device, cannot detect provided schemas");
return EMPTY;
*/
package org.opendaylight.controller.sal.connect.netconf;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.LinkedList;
import java.util.List;
private final MessageTransformer<NetconfMessage> messageTransformer;
private final RemoteDeviceId id;
private boolean passNotifications = false;
+ private NotificationFilter filter;
NotificationHandler(final RemoteDeviceHandler<?> salFacade, final MessageTransformer<NetconfMessage> messageTransformer, final RemoteDeviceId id) {
this.salFacade = Preconditions.checkNotNull(salFacade);
synchronized void handleNotification(final NetconfMessage notification) {
if(passNotifications) {
- passNotification(messageTransformer.toNotification(notification));
+ passNotification(transformNotification(notification));
} else {
queueNotification(notification);
}
passNotifications = true;
for (final NetconfMessage cachedNotification : queue) {
- passNotification(messageTransformer.toNotification(cachedNotification));
+ passNotification(transformNotification(cachedNotification));
}
queue.clear();
}
+ private CompositeNode transformNotification(final NetconfMessage cachedNotification) {
+ final CompositeNode parsedNotification = messageTransformer.toNotification(cachedNotification);
+ Preconditions.checkNotNull(parsedNotification, "%s: Unable to parse received notification: %s", id, cachedNotification);
+ return parsedNotification;
+ }
+
private void queueNotification(final NetconfMessage notification) {
Preconditions.checkState(passNotifications == false);
queue.add(notification);
}
- private void passNotification(final CompositeNode parsedNotification) {
+ private synchronized void passNotification(final CompositeNode parsedNotification) {
logger.debug("{}: Forwarding notification {}", id, parsedNotification);
- Preconditions.checkNotNull(parsedNotification);
- salFacade.onNotification(parsedNotification);
+
+ if(filter == null || filter.filterNotification(parsedNotification).isPresent()) {
+ salFacade.onNotification(parsedNotification);
+ }
+ }
+
+ synchronized void addNotificationFilter(final NotificationFilter filter) {
+ this.filter = filter;
+ }
+
+ synchronized void onRemoteSchemaDown() {
+ queue.clear();
+ passNotifications = false;
+ }
+
+ static interface NotificationFilter {
+
+ Optional<CompositeNode> filterNotification(CompositeNode notification);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.listener;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability.FailureReason;
+import org.opendaylight.yangtools.yang.common.QName;
+
+public final class NetconfDeviceCapabilities {
+ private final Map<QName, FailureReason> unresolvedCapabilites;
+ private final Set<QName> resolvedCapabilities;
+
+ private final Set<String> nonModuleBasedCapabilities;
+
+ public NetconfDeviceCapabilities() {
+ this.unresolvedCapabilites = new HashMap<>();
+ this.resolvedCapabilities = new HashSet<>();
+ this.nonModuleBasedCapabilities = new HashSet<>();
+ }
+
+ public void addUnresolvedCapability(QName source, FailureReason reason) {
+ unresolvedCapabilites.put(source, reason);
+ }
+
+ public void addUnresolvedCapabilities(Collection<QName> capabilities, FailureReason reason) {
+ for (QName s : capabilities) {
+ unresolvedCapabilites.put(s, reason);
+ }
+ }
+
+ public void addCapabilities(Collection<QName> availableSchemas) {
+ resolvedCapabilities.addAll(availableSchemas);
+ }
+
+ public void addNonModuleBasedCapabilities(Collection<String> nonModuleCapabilities) {
+ this.nonModuleBasedCapabilities.addAll(nonModuleCapabilities);
+ }
+
+ public Set<String> getNonModuleBasedCapabilities() {
+ return nonModuleBasedCapabilities;
+ }
+
+ public Map<QName, FailureReason> getUnresolvedCapabilites() {
+ return unresolvedCapabilites;
+ }
+
+ public Set<QName> getResolvedCapabilities() {
+ return resolvedCapabilities;
+ }
+
+}
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.FutureListener;
+import io.netty.util.concurrent.GenericFutureListener;
import java.util.ArrayDeque;
import java.util.Iterator;
import java.util.List;
private static final Logger logger = LoggerFactory.getLogger(NetconfDeviceCommunicator.class);
- private final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice;
- private final Optional<NetconfSessionCapabilities> overrideNetconfCapabilities;
+ private final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice;
+ private final Optional<NetconfSessionPreferences> overrideNetconfCapabilities;
private final RemoteDeviceId id;
private final Lock sessionLock = new ReentrantLock();
private NetconfClientSession session;
private Future<?> initFuture;
- public NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice,
- final NetconfSessionCapabilities netconfSessionCapabilities) {
- this(id, remoteDevice, Optional.of(netconfSessionCapabilities));
+ public NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice,
+ final NetconfSessionPreferences NetconfSessionPreferences) {
+ this(id, remoteDevice, Optional.of(NetconfSessionPreferences));
}
public NetconfDeviceCommunicator(final RemoteDeviceId id,
- final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice) {
- this(id, remoteDevice, Optional.<NetconfSessionCapabilities>absent());
+ final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice) {
+ this(id, remoteDevice, Optional.<NetconfSessionPreferences>absent());
}
- private NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice,
- final Optional<NetconfSessionCapabilities> overrideNetconfCapabilities) {
+ private NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice,
+ final Optional<NetconfSessionPreferences> overrideNetconfCapabilities) {
this.id = id;
this.remoteDevice = remoteDevice;
this.overrideNetconfCapabilities = overrideNetconfCapabilities;
logger.debug("{}: Session established", id);
this.session = session;
- NetconfSessionCapabilities netconfSessionCapabilities =
- NetconfSessionCapabilities.fromNetconfSession(session);
- logger.trace("{}: Session advertised capabilities: {}", id, netconfSessionCapabilities);
+ NetconfSessionPreferences netconfSessionPreferences =
+ NetconfSessionPreferences.fromNetconfSession(session);
+ logger.trace("{}: Session advertised capabilities: {}", id, netconfSessionPreferences);
if(overrideNetconfCapabilities.isPresent()) {
- netconfSessionCapabilities = netconfSessionCapabilities.replaceModuleCaps(overrideNetconfCapabilities.get());
- logger.debug("{}: Session capabilities overridden, capabilities that will be used: {}", id, netconfSessionCapabilities);
+ netconfSessionPreferences = netconfSessionPreferences.replaceModuleCaps(overrideNetconfCapabilities.get());
+ logger.debug("{}: Session capabilities overridden, capabilities that will be used: {}", id, netconfSessionPreferences);
}
- remoteDevice.onRemoteSessionUp(netconfSessionCapabilities, this);
+ remoteDevice.onRemoteSessionUp(netconfSessionPreferences, this);
}
finally {
sessionLock.unlock();
}
}
- public void initializeRemoteConnection(final NetconfClientDispatcher dispatch,
- final NetconfClientConfiguration config) {
+ public void initializeRemoteConnection(final NetconfClientDispatcher dispatcher, final NetconfClientConfiguration config) {
+ // TODO 2313 extract listener from configuration
if(config instanceof NetconfReconnectingClientConfiguration) {
- initFuture = dispatch.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
+ initFuture = dispatcher.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
} else {
- initFuture = dispatch.createClient(config);
+ initFuture = dispatcher.createClient(config);
+ }
+
+
+ initFuture.addListener(new GenericFutureListener<Future<Object>>(){
+
+ @Override
+ public void operationComplete(Future<Object> future) throws Exception {
+ if (!future.isSuccess()) {
+ logger.debug("{}: Connection failed", id, future.cause());
+ NetconfDeviceCommunicator.this.remoteDevice.onRemoteSessionFailed(future.cause());
+ }
+ }
+ });
+
+ }
+
+ public void disconnect() {
+ if(session != null) {
+ session.close();
}
}
}
}
- private RpcResult<NetconfMessage> createSessionDownRpcResult()
- {
+ private RpcResult<NetconfMessage> createSessionDownRpcResult() {
return createErrorRpcResult( RpcError.ErrorType.TRANSPORT,
String.format( "The netconf session to %1$s is disconnected", id.getName() ) );
}
- private RpcResult<NetconfMessage> createErrorRpcResult( RpcError.ErrorType errorType, String message )
- {
+ private RpcResult<NetconfMessage> createErrorRpcResult( RpcError.ErrorType errorType, String message ) {
return RpcResultBuilder.<NetconfMessage>failed()
- .withError( errorType, NetconfDocumentedException.ErrorTag.operation_failed.getTagValue(),
- message )
- .build();
+ .withError(errorType, NetconfDocumentedException.ErrorTag.operation_failed.getTagValue(), message).build();
}
@Override
if(session != null) {
session.close();
}
+
tearDown(id + ": Netconf session closed");
}
logger.debug("{}: Message received {}", id, message);
if(logger.isTraceEnabled()) {
- logger.trace( "{}: Matched request: {} to response: {}", id,
- msgToS( request.request ), msgToS( message ) );
+ logger.trace( "{}: Matched request: {} to response: {}", id, msgToS( request.request ), msgToS( message ) );
}
try {
NetconfMessageTransformUtil.checkValidReply( request.request, message );
- }
- catch (final NetconfDocumentedException e) {
+ } catch (final NetconfDocumentedException e) {
logger.warn( "{}: Invalid request-reply match, reply message contains different message-id, request: {}, response: {}",
id, msgToS( request.request ), msgToS( message ), e );
request.future.set( RpcResultBuilder.<NetconfMessage>failed()
.withRpcError( NetconfMessageTransformUtil.toRpcError( e ) ).build() );
+
+ //recursively processing message to eventually find matching request
+ processMessage(message);
+
return;
}
try {
NetconfMessageTransformUtil.checkSuccessReply(message);
- }
- catch(final NetconfDocumentedException e) {
+ } catch(final NetconfDocumentedException e) {
logger.warn( "{}: Error reply from remote device, request: {}, response: {}", id,
msgToS( request.request ), msgToS( message ), e );
}
@Override
- public ListenableFuture<RpcResult<NetconfMessage>> sendRequest(
- final NetconfMessage message, final QName rpc) {
+ public ListenableFuture<RpcResult<NetconfMessage>> sendRequest(final NetconfMessage message, final QName rpc) {
sessionLock.lock();
try {
return sendRequestWithLock( message, rpc );
- }
- finally {
+ } finally {
sessionLock.unlock();
}
}
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public final class NetconfSessionCapabilities {
+public final class NetconfSessionPreferences {
private static final class ParameterMatcher {
private final Predicate<String> predicate;
}
}
- private static final Logger LOG = LoggerFactory.getLogger(NetconfSessionCapabilities.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfSessionPreferences.class);
private static final ParameterMatcher MODULE_PARAM = new ParameterMatcher("module=");
private static final ParameterMatcher REVISION_PARAM = new ParameterMatcher("revision=");
private static final ParameterMatcher BROKEN_REVISON_PARAM = new ParameterMatcher("amp;revision=");
private final Set<QName> moduleBasedCaps;
private final Set<String> nonModuleCaps;
- private NetconfSessionCapabilities(final Set<String> nonModuleCaps, final Set<QName> moduleBasedCaps) {
+ private NetconfSessionPreferences(final Set<String> nonModuleCaps, final Set<QName> moduleBasedCaps) {
this.nonModuleCaps = Preconditions.checkNotNull(nonModuleCaps);
this.moduleBasedCaps = Preconditions.checkNotNull(moduleBasedCaps);
}
return containsNonModuleCapability(NetconfMessageTransformUtil.NETCONF_RUNNING_WRITABLE_URI.toString());
}
+ public boolean isNotificationsSupported() {
+ return containsNonModuleCapability(NetconfMessageTransformUtil.NETCONF_NOTIFICATONS_URI.toString())
+ || containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_NOTIFICATIONS);
+ }
+
public boolean isMonitoringSupported() {
return containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING)
|| containsNonModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING.getNamespace().toString());
}
- public NetconfSessionCapabilities replaceModuleCaps(final NetconfSessionCapabilities netconfSessionModuleCapabilities) {
+ public NetconfSessionPreferences replaceModuleCaps(final NetconfSessionPreferences netconfSessionModuleCapabilities) {
final Set<QName> moduleBasedCaps = Sets.newHashSet(netconfSessionModuleCapabilities.getModuleBasedCaps());
// Preserve monitoring module, since it indicates support for ietf-netconf-monitoring
if(containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING)) {
moduleBasedCaps.add(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING);
}
- return new NetconfSessionCapabilities(getNonModuleCaps(), moduleBasedCaps);
+ return new NetconfSessionPreferences(getNonModuleCaps(), moduleBasedCaps);
}
- public static NetconfSessionCapabilities fromNetconfSession(final NetconfClientSession session) {
+ public static NetconfSessionPreferences fromNetconfSession(final NetconfClientSession session) {
return fromStrings(session.getServerCapabilities());
}
return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
}
- public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
+ public static NetconfSessionPreferences fromStrings(final Collection<String> capabilities) {
final Set<QName> moduleBasedCaps = new HashSet<>();
final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
}
- return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
+ return new NetconfSessionPreferences(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
}
moduleBasedCaps.add(qName);
nonModuleCaps.remove(capability);
}
+
+ private NetconfDeviceCapabilities capabilities = new NetconfDeviceCapabilities();
+
+ public NetconfDeviceCapabilities getNetconfDeviceCapabilities() {
+ return capabilities;
+ }
+
+
}
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.tx.ReadOnlyTx;
import org.opendaylight.controller.sal.connect.netconf.sal.tx.ReadWriteTx;
import org.opendaylight.controller.sal.connect.netconf.sal.tx.WriteCandidateTx;
final class NetconfDeviceDataBroker implements DOMDataBroker {
private final RemoteDeviceId id;
private final NetconfBaseOps netconfOps;
- private final NetconfSessionCapabilities netconfSessionPreferences;
+ private final NetconfSessionPreferences netconfSessionPreferences;
private final DataNormalizer normalizer;
- public NetconfDeviceDataBroker(final RemoteDeviceId id, final RpcImplementation rpc, final SchemaContext schemaContext, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public NetconfDeviceDataBroker(final RemoteDeviceId id, final RpcImplementation rpc, final SchemaContext schemaContext, final NetconfSessionPreferences netconfSessionPreferences) {
this.id = id;
this.netconfOps = new NetconfBaseOps(rpc);
this.netconfSessionPreferences = netconfSessionPreferences;
*
* All data changes are submitted to an ExecutorService to avoid Thread blocking while sal is waiting for schema.
*/
+@Deprecated
final class NetconfDeviceDatastoreAdapter implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(NetconfDeviceDatastoreAdapter.class);
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDeviceHandler<NetconfSessionCapabilities> {
+public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDeviceHandler<NetconfSessionPreferences> {
private static final Logger logger= LoggerFactory.getLogger(NetconfDeviceSalFacade.class);
@Override
public synchronized void onDeviceConnected(final SchemaContext schemaContext,
- final NetconfSessionCapabilities netconfSessionPreferences, final RpcImplementation deviceRpc) {
+ final NetconfSessionPreferences netconfSessionPreferences, final RpcImplementation deviceRpc) {
// TODO move SchemaAwareRpcBroker from sal-broker-impl, now we have depend on the whole sal-broker-impl
final RpcProvisionRegistry rpcRegistry = new SchemaAwareRpcBroker(id.getPath().toString(), new SchemaContextProvider() {
salProvider.getMountInstance().onDeviceConnected(schemaContext, domBroker, rpcRegistry, notificationService);
salProvider.getDatastoreAdapter().updateDeviceState(true, netconfSessionPreferences.getModuleBasedCaps());
+ salProvider.getMountInstance().onTopologyDeviceConnected(schemaContext, domBroker, rpcRegistry, notificationService);
+ salProvider.getTopologyDatastoreAdapter().updateDeviceData(true, netconfSessionPreferences.getNetconfDeviceCapabilities());
}
@Override
public synchronized void onDeviceDisconnected() {
salProvider.getDatastoreAdapter().updateDeviceState(false, Collections.<QName>emptySet());
+ salProvider.getTopologyDatastoreAdapter().updateDeviceData(false, new NetconfDeviceCapabilities());
salProvider.getMountInstance().onDeviceDisconnected();
+ salProvider.getMountInstance().onTopologyDeviceDisconnected();
+ }
+
+ @Override
+ public void onDeviceFailed(Throwable throwable) {
+ salProvider.getTopologyDatastoreAdapter().setDeviceAsFailed(throwable);
+ salProvider.getMountInstance().onDeviceDisconnected();
+ salProvider.getMountInstance().onTopologyDeviceDisconnected();
}
private void registerRpcsToSal(final SchemaContext schemaContext, final RpcProvisionRegistry rpcRegistry, final RpcImplementation deviceRpc) {
private volatile NetconfDeviceDatastoreAdapter datastoreAdapter;
private MountInstance mountInstance;
+ private volatile NetconfDeviceTopologyAdapter topologyDatastoreAdapter;
+
public NetconfDeviceSalProvider(final RemoteDeviceId deviceId, final ExecutorService executor) {
this.id = deviceId;
this.executor = executor;
return datastoreAdapter;
}
+ public NetconfDeviceTopologyAdapter getTopologyDatastoreAdapter() {
+ Preconditions.checkState(topologyDatastoreAdapter != null,
+ "%s: Sal provider %s was not initialized by sal. Cannot get topology datastore adapter", id);
+ return topologyDatastoreAdapter;
+ }
+
@Override
public void onSessionInitiated(final Broker.ProviderSession session) {
logger.debug("{}: (BI)Session with sal established {}", id, session);
final DataBroker dataBroker = session.getSALService(DataBroker.class);
datastoreAdapter = new NetconfDeviceDatastoreAdapter(id, dataBroker);
+
+ topologyDatastoreAdapter = new NetconfDeviceTopologyAdapter(id, dataBroker);
}
public void close() throws Exception {
private ObjectRegistration<DOMMountPoint> registration;
private NotificationPublishService notificationSerivce;
+ private ObjectRegistration<DOMMountPoint> topologyRegistration;
+
MountInstance(final DOMMountPointService mountService, final RemoteDeviceId id) {
this.mountService = Preconditions.checkNotNull(mountService);
this.id = Preconditions.checkNotNull(id);
}
+ @Deprecated
synchronized void onDeviceConnected(final SchemaContext initialCtx,
final DOMDataBroker broker, final RpcProvisionRegistry rpc,
final NotificationPublishService notificationSerivce) {
registration = mountBuilder.register();
}
+ @Deprecated
synchronized void onDeviceDisconnected() {
if(registration == null) {
return;
}
}
+ synchronized void onTopologyDeviceConnected(final SchemaContext initialCtx,
+ final DOMDataBroker broker, final RpcProvisionRegistry rpc,
+ final NotificationPublishService notificationSerivce) {
+
+ Preconditions.checkNotNull(mountService, "Closed");
+ Preconditions.checkState(topologyRegistration == null, "Already initialized");
+
+ final DOMMountPointService.DOMMountPointBuilder mountBuilder = mountService.createMountPoint(id.getTopologyPath());
+ mountBuilder.addInitialSchemaContext(initialCtx);
+
+ mountBuilder.addService(DOMDataBroker.class, broker);
+ mountBuilder.addService(RpcProvisionRegistry.class, rpc);
+ this.notificationSerivce = notificationSerivce;
+ mountBuilder.addService(NotificationPublishService.class, notificationSerivce);
+
+ topologyRegistration = mountBuilder.register();
+ }
+
+ synchronized void onTopologyDeviceDisconnected() {
+ if(topologyRegistration == null) {
+ return;
+ }
+
+ try {
+ topologyRegistration.close();
+ } catch (final Exception e) {
+ // Only log and ignore
+ logger.warn("Unable to unregister mount instance for {}. Ignoring exception", id.getTopologyPath(), e);
+ } finally {
+ topologyRegistration = null;
+ }
+ }
+
@Override
synchronized public void close() throws Exception {
if(registration != null) {
onDeviceDisconnected();
+ onTopologyDeviceDisconnected();
}
mountService = null;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal;
+
+import com.google.common.base.Function;
+import com.google.common.collect.FluentIterable;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeFields.ConnectionStatus;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.AvailableCapabilitiesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.UnavailableCapabilities;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.UnavailableCapabilitiesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapability.FailureReason;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.fields.unavailable.capabilities.UnavailableCapabilityBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.network.topology.topology.topology.types.TopologyNetconf;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class NetconfDeviceTopologyAdapter implements AutoCloseable {
+
+ public static final Logger logger = LoggerFactory.getLogger(NetconfDeviceTopologyAdapter.class);
+ public static final Function<Entry<QName, FailureReason>, UnavailableCapability> UNAVAILABLE_CAPABILITY_TRANSFORMER = new Function<Entry<QName, FailureReason>, UnavailableCapability>() {
+ @Override
+ public UnavailableCapability apply(final Entry<QName, FailureReason> input) {
+ return new UnavailableCapabilityBuilder()
+ .setCapability(input.getKey().toString())
+ .setFailureReason(input.getValue()).build();
+ }
+ };
+ public static final Function<QName, String> AVAILABLE_CAPABILITY_TRANSFORMER = new Function<QName, String>() {
+ @Override
+ public String apply(QName qName) {
+ return qName.toString();
+ }
+ };
+
+ private final RemoteDeviceId id;
+ private final DataBroker dataService;
+
+ private final InstanceIdentifier<NetworkTopology> networkTopologyPath;
+ private final KeyedInstanceIdentifier<Topology, TopologyKey> topologyListPath;
+ private static final String UNKNOWN_REASON = "Unknown reason";
+
+ NetconfDeviceTopologyAdapter(final RemoteDeviceId id, final DataBroker dataService) {
+ this.id = id;
+ this.dataService = dataService;
+
+ this.networkTopologyPath = InstanceIdentifier.builder(NetworkTopology.class).build();
+ this.topologyListPath = networkTopologyPath.child(Topology.class, new TopologyKey(new TopologyId(TopologyNetconf.QNAME.getLocalName())));
+
+ initDeviceData();
+ }
+
+ private void initDeviceData() {
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+
+ createNetworkTopologyIfNotPresent(writeTx);
+
+ final InstanceIdentifier<Node> path = id.getTopologyBindingPath();
+ NodeBuilder nodeBuilder = getNodeIdBuilder(id);
+ NetconfNodeBuilder netconfNodeBuilder = new NetconfNodeBuilder();
+ netconfNodeBuilder.setConnectionStatus(ConnectionStatus.Connecting);
+ netconfNodeBuilder.setHost(id.getHost());
+ netconfNodeBuilder.setPort(new PortNumber(id.getAddress().getPort()));
+ nodeBuilder.addAugmentation(NetconfNode.class, netconfNodeBuilder.build());
+ Node node = nodeBuilder.build();
+
+ logger.trace("{}: Init device state transaction {} putting if absent operational data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, path, node);
+ logger.trace("{}: Init device state transaction {} putting operational data ended.", id, writeTx.getIdentifier());
+
+ logger.trace("{}: Init device state transaction {} putting if absent config data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, path, getNodeWithId(id));
+ logger.trace("{}: Init device state transaction {} putting config data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "init");
+ }
+
+ public void updateDeviceData(boolean up, NetconfDeviceCapabilities capabilities) {
+ final Node data = buildDataForNetconfNode(up, capabilities);
+
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+ logger.trace("{}: Update device state transaction {} merging operational data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, id.getTopologyBindingPath(), data);
+ logger.trace("{}: Update device state transaction {} merging operational data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "update");
+ }
+
+ public void setDeviceAsFailed(Throwable throwable) {
+ String reason = (throwable != null && throwable.getMessage() != null) ? throwable.getMessage() : UNKNOWN_REASON;
+
+ final NetconfNode netconfNode = new NetconfNodeBuilder().setConnectionStatus(ConnectionStatus.UnableToConnect).setConnectedMessage(reason).build();
+ final Node data = getNodeIdBuilder(id).addAugmentation(NetconfNode.class, netconfNode).build();
+
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+ logger.trace("{}: Setting device state as failed {} putting operational data started.", id, writeTx.getIdentifier());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, id.getTopologyBindingPath(), data);
+ logger.trace("{}: Setting device state as failed {} putting operational data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "update-failed-device");
+ }
+
+ private Node buildDataForNetconfNode(boolean up, NetconfDeviceCapabilities capabilities) {
+ List<String> capabilityList = new ArrayList<>();
+ capabilityList.addAll(capabilities.getNonModuleBasedCapabilities());
+ capabilityList.addAll(FluentIterable.from(capabilities.getResolvedCapabilities()).transform(AVAILABLE_CAPABILITY_TRANSFORMER).toList());
+ final AvailableCapabilitiesBuilder avCapabalitiesBuilder = new AvailableCapabilitiesBuilder();
+ avCapabalitiesBuilder.setAvailableCapability(capabilityList);
+
+ final UnavailableCapabilities unavailableCapabilities =
+ new UnavailableCapabilitiesBuilder().setUnavailableCapability(FluentIterable.from(capabilities.getUnresolvedCapabilites().entrySet())
+ .transform(UNAVAILABLE_CAPABILITY_TRANSFORMER).toList()).build();
+
+ final NetconfNodeBuilder netconfNodeBuilder = new NetconfNodeBuilder()
+ .setHost(id.getHost())
+ .setPort(new PortNumber(id.getAddress().getPort()))
+ .setConnectionStatus(up ? ConnectionStatus.Connected : ConnectionStatus.Connecting)
+ .setAvailableCapabilities(avCapabalitiesBuilder.build())
+ .setUnavailableCapabilities(unavailableCapabilities);
+
+ final NodeBuilder nodeBuilder = getNodeIdBuilder(id);
+ final Node node = nodeBuilder.addAugmentation(NetconfNode.class, netconfNodeBuilder.build()).build();
+
+ return node;
+ }
+
+ public void removeDeviceConfiguration() {
+ final WriteTransaction writeTx = dataService.newWriteOnlyTransaction();
+
+ logger.trace("{}: Close device state transaction {} removing all data started.", id, writeTx.getIdentifier());
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, id.getTopologyBindingPath());
+ writeTx.delete(LogicalDatastoreType.OPERATIONAL, id.getTopologyBindingPath());
+ logger.trace("{}: Close device state transaction {} removing all data ended.", id, writeTx.getIdentifier());
+
+ commitTransaction(writeTx, "close");
+ }
+
+ private void createNetworkTopologyIfNotPresent(final WriteTransaction writeTx) {
+
+ final NetworkTopology networkTopology = new NetworkTopologyBuilder().build();
+ logger.trace("{}: Merging {} container to ensure its presence", id, networkTopology.QNAME, writeTx.getIdentifier());
+ writeTx.merge(LogicalDatastoreType.CONFIGURATION, networkTopologyPath, networkTopology);
+ writeTx.merge(LogicalDatastoreType.OPERATIONAL, networkTopologyPath, networkTopology);
+
+ final Topology topology = new TopologyBuilder().setTopologyId(new TopologyId(TopologyNetconf.QNAME.getLocalName())).build();
+ logger.trace("{}: Merging {} container to ensure its presence", id, topology.QNAME, writeTx.getIdentifier());
+ writeTx.merge(LogicalDatastoreType.CONFIGURATION, topologyListPath, topology);
+ writeTx.merge(LogicalDatastoreType.OPERATIONAL, topologyListPath, topology);
+ }
+
+ private void commitTransaction(final WriteTransaction transaction, final String txType) {
+ logger.trace("{}: Committing Transaction {}:{}", id, txType, transaction.getIdentifier());
+ final CheckedFuture<Void, TransactionCommitFailedException> result = transaction.submit();
+
+ Futures.addCallback(result, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ logger.trace("{}: Transaction({}) {} SUCCESSFUL", id, txType, transaction.getIdentifier());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ logger.error("{}: Transaction({}) {} FAILED!", id, txType, transaction.getIdentifier(), t);
+ throw new IllegalStateException(id + " Transaction(" + txType + ") not committed correctly", t);
+ }
+ });
+
+ }
+
+ private static Node getNodeWithId(final RemoteDeviceId id) {
+ final NodeBuilder builder = getNodeIdBuilder(id);
+ return builder.build();
+ }
+
+ private static NodeBuilder getNodeIdBuilder(final RemoteDeviceId id) {
+ final NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(new NodeKey(new NodeId(id.getName())));
+ return nodeBuilder;
+ }
+
+ @Override
+ public void close() throws Exception {
+ removeDeviceConfiguration();
+ }
+}
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.yangtools.yang.common.RpcResult;
protected final RemoteDeviceId id;
protected final NetconfBaseOps netOps;
protected final DataNormalizer normalizer;
- protected final NetconfSessionCapabilities netconfSessionPreferences;
+ protected final NetconfSessionPreferences netconfSessionPreferences;
// Allow commit to be called only once
protected boolean finished = false;
- public AbstractWriteTx(final NetconfBaseOps netOps, final RemoteDeviceId id, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public AbstractWriteTx(final NetconfBaseOps netOps, final RemoteDeviceId id, final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
this.netOps = netOps;
this.id = id;
this.normalizer = normalizer;
private CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readOperationalData(
final YangInstanceIdentifier path) {
- final ListenableFuture<RpcResult<CompositeNode>> configCandidate = netconfOps.getConfigRunning(loggingCallback, Optional.fromNullable(path));
+ final ListenableFuture<RpcResult<CompositeNode>> configCandidate = netconfOps.get(loggingCallback, Optional.fromNullable(path));
// Find data node and normalize its content
final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(configCandidate, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final Logger LOG = LoggerFactory.getLogger(WriteCandidateRunningTx.class);
- public WriteCandidateRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public WriteCandidateRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps, final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
super(id, netOps, normalizer, netconfSessionPreferences);
}
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
}
};
- public WriteCandidateTx(final RemoteDeviceId id, final NetconfBaseOps rpc, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ public WriteCandidateTx(final RemoteDeviceId id, final NetconfBaseOps rpc, final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
super(rpc, id, normalizer, netconfSessionPreferences);
}
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final Logger LOG = LoggerFactory.getLogger(WriteRunningTx.class);
public WriteRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps,
- final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ final DataNormalizer normalizer, final NetconfSessionPreferences netconfSessionPreferences) {
super(netOps, id, normalizer, netconfSessionPreferences);
}
package org.opendaylight.controller.sal.connect.netconf.schema;
import com.google.common.base.Function;
-import com.google.common.base.Objects;
+import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
}
@Override
- protected Objects.ToStringHelper addToStringAttributes(final Objects.ToStringHelper toStringHelper) {
+ protected MoreObjects.ToStringHelper addToStringAttributes(final MoreObjects.ToStringHelper toStringHelper) {
return toStringHelper.add("device", id);
}
return getConfig(callback, NETCONF_CANDIDATE_QNAME, filterPath);
}
- public ListenableFuture<RpcResult<CompositeNode>> get(final FutureCallback<RpcResult<CompositeNode>> callback, final QName datastore, final Optional<YangInstanceIdentifier> filterPath) {
+ public ListenableFuture<RpcResult<CompositeNode>> get(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
Preconditions.checkNotNull(callback);
- Preconditions.checkNotNull(datastore);
final ListenableFuture<RpcResult<CompositeNode>> future;
- if (filterPath.isPresent()) {
- final Node<?> node = toFilterStructure(filterPath.get());
- future = rpc.invokeRpc(NETCONF_GET_QNAME,
- NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, getSourceNode(datastore), node));
- } else {
- future = rpc.invokeRpc(NETCONF_GET_QNAME,
- NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, getSourceNode(datastore)));
- }
+ final Node<?> node = filterPath.isPresent() ? toFilterStructure(filterPath.get()) : NetconfMessageTransformUtil.GET_RPC_CONTENT;
+ future = rpc.invokeRpc(NETCONF_GET_QNAME, NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, node));
Futures.addCallback(future, callback);
return future;
}
- public ListenableFuture<RpcResult<CompositeNode>> getRunning(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
- return get(callback, NETCONF_RUNNING_QNAME, filterPath);
- }
-
- public ListenableFuture<RpcResult<CompositeNode>> getCandidate(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
- return get(callback, NETCONF_CANDIDATE_QNAME, filterPath);
- }
-
-
public ListenableFuture<RpcResult<CompositeNode>> editConfigCandidate(final FutureCallback<? super RpcResult<CompositeNode>> callback, final CompositeNode editStructure, final ModifyAction modifyAction, final boolean rollback) {
return editConfig(callback, NETCONF_CANDIDATE_QNAME, editStructure, Optional.of(modifyAction), rollback);
}
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfState;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
public class NetconfMessageTransformUtil {
public static final String MESSAGE_ID_ATTR = "message-id";
+ public static final QName CREATE_SUBSCRIPTION_RPC_QNAME = QName.cachedReference(QName.create(CreateSubscriptionInput.QNAME, "create-subscription"));
private NetconfMessageTransformUtil() {}
public static final QName IETF_NETCONF_MONITORING_SCHEMA_VERSION = QName.create(IETF_NETCONF_MONITORING, "version");
public static final QName IETF_NETCONF_MONITORING_SCHEMA_NAMESPACE = QName.create(IETF_NETCONF_MONITORING, "namespace");
+ public static final QName IETF_NETCONF_NOTIFICATIONS = QName.create(NetconfCapabilityChange.QNAME, "ietf-netconf-notifications");
+
public static URI NETCONF_URI = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0");
public static QName NETCONF_QNAME = QName.create(NETCONF_URI, null, "netconf");
public static QName NETCONF_DATA_QNAME = QName.create(NETCONF_QNAME, "data");
public static URI NETCONF_CANDIDATE_URI = URI
.create("urn:ietf:params:netconf:capability:candidate:1.0");
+ public static URI NETCONF_NOTIFICATONS_URI = URI
+ .create("urn:ietf:params:netconf:capability:notification:1.0");
+
public static URI NETCONF_RUNNING_WRITABLE_URI = URI
.create("urn:ietf:params:netconf:capability:writable-running:1.0");
public static final CompositeNode COMMIT_RPC_CONTENT =
NodeFactory.createImmutableCompositeNode(NETCONF_COMMIT_QNAME, null, Collections.<Node<?>>emptyList());
+ // Get message
+ public static final CompositeNode GET_RPC_CONTENT =
+ NodeFactory.createImmutableCompositeNode(NETCONF_GET_QNAME, null, Collections.<Node<?>>emptyList());
+
+ // Create-subscription changes message
+ public static final CompositeNode CREATE_SUBSCRIPTION_RPC_CONTENT =
+ NodeFactory.createImmutableCompositeNode(CREATE_SUBSCRIPTION_RPC_QNAME, null, Collections.<Node<?>>emptyList());
+
public static Node<?> toFilterStructure(final YangInstanceIdentifier identifier) {
Node<?> previous = null;
if (Iterables.isEmpty(identifier.getPathArguments())) {
*/
package org.opendaylight.controller.sal.connect.util;
+import com.google.common.base.Preconditions;
+import java.net.InetSocketAddress;
import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Host;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.HostBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.network.topology.topology.topology.types.TopologyNetconf;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-public class RemoteDeviceId {
+public final class RemoteDeviceId {
private final String name;
private final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier path;
private final InstanceIdentifier<Node> bindingPath;
private final NodeKey key;
+ private final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier topologyPath;
+ private final InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node> topologyBindingPath;
+ private InetSocketAddress address;
+ private Host host;
+ @Deprecated
public RemoteDeviceId(final ModuleIdentifier identifier) {
this(Preconditions.checkNotNull(identifier).getInstanceName());
}
+ public RemoteDeviceId(final ModuleIdentifier identifier, Host host) {
+ this(identifier);
+ this.host = host;
+ }
+
+ public RemoteDeviceId(final ModuleIdentifier identifier, InetSocketAddress address) {
+ this(identifier);
+ this.address = address;
+ this.host = buildHost();
+ }
+
+ @Deprecated
public RemoteDeviceId(final String name) {
Preconditions.checkNotNull(name);
this.name = name;
this.key = new NodeKey(new NodeId(name));
this.path = createBIPath(name);
this.bindingPath = createBindingPath(key);
+ this.topologyPath = createBIPathForTopology(name);
+ this.topologyBindingPath = createBindingPathForTopology(key);
+ }
+
+ public RemoteDeviceId(final String name, InetSocketAddress address) {
+ this(name);
+ this.address = address;
+ this.host = buildHost();
}
private static InstanceIdentifier<Node> createBindingPath(final NodeKey key) {
return builder.build();
}
+ private static InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node> createBindingPathForTopology(final NodeKey key) {
+ final InstanceIdentifier<NetworkTopology> networkTopology = InstanceIdentifier.builder(NetworkTopology.class).build();
+ final KeyedInstanceIdentifier<Topology, TopologyKey> topology = networkTopology.child(Topology.class, new TopologyKey(new TopologyId(TopologyNetconf.QNAME.getLocalName())));
+ return topology
+ .child(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.class,
+ new org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey
+ (new org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId(key.getId().getValue())));
+ }
+
+ private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBIPathForTopology(final String name) {
+ final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.InstanceIdentifierBuilder builder =
+ org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder();
+ builder
+ .node(NetworkTopology.QNAME)
+ .nodeWithKey(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), TopologyNetconf.QNAME.getLocalName())
+ .nodeWithKey(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.QNAME,
+ QName.create(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node.QNAME, "node-id"), name);
+ return builder.build();
+ }
+
+ private Host buildHost() {
+ return address.getAddress().getHostAddress() != null
+ ? HostBuilder.getDefaultInstance(address.getAddress().getHostAddress())
+ : HostBuilder.getDefaultInstance(address.getAddress().getHostName());
+ }
+
public String getName() {
return name;
}
return key;
}
+ public InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node> getTopologyBindingPath() {
+ return topologyBindingPath;
+ }
+
+ public YangInstanceIdentifier getTopologyPath() {
+ return topologyPath;
+ }
+
+ public InetSocketAddress getAddress() {
+ return address;
+ }
+
+ public Host getHost() {
+ return host;
+ }
+
@Override
public String toString() {
return "RemoteDevice{" + name +'}';
--- /dev/null
+module netconf-node-topology {
+ namespace "urn:opendaylight:netconf-node-topology";
+ prefix "nettop";
+
+ import network-topology { prefix nt; revision-date 2013-10-21; }
+ import yang-ext { prefix ext; revision-date "2013-07-09";}
+ import ietf-inet-types { prefix inet; revision-date "2010-09-24"; }
+
+ revision "2015-01-14" {
+ description "Initial revision of Topology model";
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:topology-types" {
+ container topology-netconf {
+ }
+ }
+
+ grouping netconf-node-fields {
+ leaf connection-status {
+ type enumeration {
+ enum connecting;
+ enum connected;
+ enum unable-to-connect;
+ }
+ }
+
+ leaf host {
+ type inet:host;
+ }
+
+ leaf port {
+ type inet:port-number;
+ }
+
+ leaf connected-message {
+ type string;
+ }
+
+ container available-capabilities {
+ leaf-list available-capability {
+ type string;
+ }
+ }
+
+ container unavailable-capabilities {
+ list unavailable-capability {
+ leaf capability {
+ type string;
+ }
+
+ leaf failure-reason {
+ type enumeration {
+ enum missing-source;
+ enum unable-to-resolve;
+ }
+ }
+ }
+ }
+
+ container pass-through {
+ when "../connection-status = connected";
+ description
+ "When the underlying node is connected, its NETCONF context
+ is available verbatim under this container through the
+ mount extension.";
+ }
+ }
+
+ augment "/nt:network-topology/nt:topology/nt:node" {
+ when "../../nt:topology-types/topology-netconf";
+ ext:augment-identifier "netconf-node";
+
+ uses netconf-node-fields;
+ }
+}
}
}
+ leaf reconnect-on-changed-schema {
+ type boolean;
+ default false;
+ description "If true, the connector would auto disconnect/reconnect when schemas are changed in the remote device.
+ The connector subscribes (right after connect) to base netconf notifications and listens for netconf-capability-change notification";
+ }
+
container dom-registry {
uses config:service-ref {
refine type {
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
+
import com.google.common.base.Optional;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.sal.connect.api.MessageTransformer;
-import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
import org.opendaylight.controller.sal.connect.api.SchemaSourceProviderFactory;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
private static final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver = new NetconfStateSchemas.NetconfStateSchemasResolver() {
@Override
- public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
+ public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
return NetconfStateSchemas.EMPTY;
}
};
public void testNetconfDeviceFailFirstSchemaFailSecondEmpty() throws Exception {
final ArrayList<String> capList = Lists.newArrayList(TEST_CAPABILITY);
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final SchemaContextFactory schemaFactory = getSchemaFactory();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer(), true);
// Monitoring not supported
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(false, capList);
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(false, capList);
device.onRemoteSessionUp(sessionCaps, listener);
Mockito.verify(facade, Mockito.timeout(5000)).onDeviceDisconnected();
@Test
public void testNetconfDeviceMissingSource() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final SchemaContextFactory schemaFactory = getSchemaFactory();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer(), true);
// Monitoring supported
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
device.onRemoteSessionUp(sessionCaps, listener);
- Mockito.verify(facade, Mockito.timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ Mockito.verify(facade, Mockito.timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
Mockito.verify(schemaFactory, times(2)).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
}
@Test
public void testNotificationBeforeSchema() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), getSchemaFactory(), stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer, true);
device.onNotification(netconfMessage);
device.onNotification(netconfMessage);
verify(facade, times(0)).onNotification(any(CompositeNode.class));
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(true,
Lists.newArrayList(TEST_CAPABILITY));
device.onRemoteSessionUp(sessionCaps, listener);
@Test
public void testNetconfDeviceReconnect() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
- final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+ final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
+ final NetconfDeviceCommunicator listener = getListener();
final SchemaContextFactory schemaContextProviderFactory = getSchemaFactory();
final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
= new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaContextProviderFactory, stateSchemasResolver);
- final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
- final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer, true);
+ final NetconfSessionPreferences sessionCaps = getSessionCaps(true,
Lists.newArrayList(TEST_NAMESPACE + "?module=" + TEST_MODULE + "&revision=" + TEST_REVISION));
device.onRemoteSessionUp(sessionCaps, listener);
verify(schemaContextProviderFactory, timeout(5000)).createSchemaContext(any(Collection.class));
verify(messageTransformer, timeout(5000)).onGlobalContextUpdated(any(SchemaContext.class));
- verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
device.onRemoteSessionDown();
verify(facade, timeout(5000)).onDeviceDisconnected();
verify(schemaContextProviderFactory, timeout(5000).times(2)).createSchemaContext(any(Collection.class));
verify(messageTransformer, timeout(5000).times(3)).onGlobalContextUpdated(any(SchemaContext.class));
- verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
}
private SchemaContextFactory getSchemaFactory() {
return parser.resolveSchemaContext(models);
}
- private RemoteDeviceHandler<NetconfSessionCapabilities> getFacade() throws Exception {
- final RemoteDeviceHandler<NetconfSessionCapabilities> remoteDeviceHandler = mockCloseableClass(RemoteDeviceHandler.class);
- doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ private RemoteDeviceHandler<NetconfSessionPreferences> getFacade() throws Exception {
+ final RemoteDeviceHandler<NetconfSessionPreferences> remoteDeviceHandler = mockCloseableClass(RemoteDeviceHandler.class);
+ doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionPreferences.class), any(RpcImplementation.class));
doNothing().when(remoteDeviceHandler).onDeviceDisconnected();
doNothing().when(remoteDeviceHandler).onNotification(any(CompositeNode.class));
return remoteDeviceHandler;
return messageTransformer;
}
- public NetconfSessionCapabilities getSessionCaps(final boolean addMonitor, final Collection<String> additionalCapabilities) {
+ public NetconfSessionPreferences getSessionCaps(final boolean addMonitor, final Collection<String> additionalCapabilities) {
final ArrayList<String> capabilities = Lists.newArrayList(
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0,
XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
capabilities.addAll(additionalCapabilities);
- return NetconfSessionCapabilities.fromStrings(
+ return NetconfSessionPreferences.fromStrings(
capabilities);
}
- public RemoteDeviceCommunicator<NetconfMessage> getListener() throws Exception {
- final RemoteDeviceCommunicator<NetconfMessage> remoteDeviceCommunicator = mockCloseableClass(RemoteDeviceCommunicator.class);
+ public NetconfDeviceCommunicator getListener() throws Exception {
+ final NetconfDeviceCommunicator remoteDeviceCommunicator = mockCloseableClass(NetconfDeviceCommunicator.class);
doReturn(Futures.immediateFuture(rpcResult)).when(remoteDeviceCommunicator).sendRequest(any(NetconfMessage.class), any(QName.class));
return remoteDeviceCommunicator;
}
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.NetconfClientSession;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
-import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.ReconnectStrategy;
NetconfClientSession mockSession;
@Mock
- RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockDevice;
+ RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> mockDevice;
NetconfDeviceCommunicator communicator;
public void setUp() throws Exception {
MockitoAnnotations.initMocks( this );
- communicator = new NetconfDeviceCommunicator( new RemoteDeviceId( "test" ), mockDevice );
+ communicator = new NetconfDeviceCommunicator( new RemoteDeviceId( "test" ), mockDevice);
}
@SuppressWarnings("unchecked")
- void setupSession()
- {
- doReturn( Collections.<String>emptySet() ).when( mockSession ).getServerCapabilities();
- doNothing().when( mockDevice ).onRemoteSessionUp( any( NetconfSessionCapabilities.class ),
- any( RemoteDeviceCommunicator.class ) );
- communicator.onSessionUp( mockSession );
+ void setupSession() {
+ doReturn(Collections.<String>emptySet()).when(mockSession).getServerCapabilities();
+ doNothing().when(mockDevice).onRemoteSessionUp(any(NetconfSessionPreferences.class),
+ any(NetconfDeviceCommunicator.class));
+ communicator.onSessionUp(mockSession);
}
private ListenableFuture<RpcResult<NetconfMessage>> sendRequest() throws Exception {
testCapability );
doReturn( serverCapabilities ).when( mockSession ).getServerCapabilities();
- ArgumentCaptor<NetconfSessionCapabilities> netconfSessionCapabilities =
- ArgumentCaptor.forClass( NetconfSessionCapabilities.class );
- doNothing().when( mockDevice ).onRemoteSessionUp( netconfSessionCapabilities.capture(), eq( communicator ) );
+ ArgumentCaptor<NetconfSessionPreferences> NetconfSessionPreferences =
+ ArgumentCaptor.forClass( NetconfSessionPreferences.class );
+ doNothing().when( mockDevice ).onRemoteSessionUp( NetconfSessionPreferences.capture(), eq( communicator ) );
communicator.onSessionUp( mockSession );
verify( mockSession ).getServerCapabilities();
- verify( mockDevice ).onRemoteSessionUp( netconfSessionCapabilities.capture(), eq( communicator ) );
+ verify( mockDevice ).onRemoteSessionUp( NetconfSessionPreferences.capture(), eq( communicator ) );
- NetconfSessionCapabilities actualCapabilites = netconfSessionCapabilities.getValue();
+ NetconfSessionPreferences actualCapabilites = NetconfSessionPreferences.getValue();
assertEquals( "containsModuleCapability", true, actualCapabilites.containsNonModuleCapability(
NetconfMessageTransformUtil.NETCONF_ROLLBACK_ON_ERROR_URI.toString()) );
assertEquals( "containsModuleCapability", false, actualCapabilites.containsNonModuleCapability(testCapability) );
return new NetconfMessage( doc );
}
+ //Test scenario verifying whether missing message is handled
+ @Test
+ public void testOnMissingResponseMessage() throws Exception {
+
+ setupSession();
+
+ String messageID1 = UUID.randomUUID().toString();
+ ListenableFuture<RpcResult<NetconfMessage>> resultFuture1 = sendRequest( messageID1 );
+
+ String messageID2 = UUID.randomUUID().toString();
+ ListenableFuture<RpcResult<NetconfMessage>> resultFuture2 = sendRequest( messageID2 );
+
+ String messageID3 = UUID.randomUUID().toString();
+ ListenableFuture<RpcResult<NetconfMessage>> resultFuture3 = sendRequest( messageID3 );
+
+ //response messages 1,2 are omitted
+ communicator.onMessage( mockSession, createSuccessResponseMessage( messageID3 ) );
+
+ verifyResponseMessage( resultFuture3.get(), messageID3 );
+ }
+
@Test
public void testOnSuccessfulResponseMessage() throws Exception {
setupSession();
*/
@Test
public void testNetconfDeviceReconnectInCommunicator() throws Exception {
- final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> device = mock(RemoteDevice.class);
+ final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> device = mock(RemoteDevice.class);
final TimedReconnectStrategy timedReconnectStrategy = new TimedReconnectStrategy(GlobalEventExecutor.INSTANCE, 10000, 0, 1.0, null, 100L, null);
final ReconnectStrategy reconnectStrategy = spy(new ReconnectStrategy() {
}
});
- final NetconfDeviceCommunicator listener = new NetconfDeviceCommunicator(new RemoteDeviceId("test"), device);
final EventLoopGroup group = new NioEventLoopGroup();
final Timer time = new HashedWheelTimer();
try {
- final NetconfClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create()
+ final NetconfDeviceCommunicator listener = new NetconfDeviceCommunicator(new RemoteDeviceId("test"), device);
+ final NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create()
.withAddress(new InetSocketAddress("localhost", 65000))
.withReconnectStrategy(reconnectStrategy)
.withConnectStrategyFactory(new ReconnectStrategyFactory() {
.withSessionListener(listener)
.build();
-
listener.initializeRemoteConnection(new NetconfClientDispatcherImpl(group, group, time), cfg);
verify(reconnectStrategy, timeout((int) TimeUnit.MINUTES.toMillis(3)).times(101)).scheduleReconnect(any(Throwable.class));
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
-public class NetconfSessionCapabilitiesTest {
+public class NetconfSessionPreferencesTest {
@Test
public void testMerge() throws Exception {
"urn:ietf:params:netconf:base:1.0",
"urn:ietf:params:netconf:capability:rollback-on-error:1.0"
);
- final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ final NetconfSessionPreferences sessionCaps1 = NetconfSessionPreferences.fromStrings(caps1);
assertCaps(sessionCaps1, 2, 3);
final List<String> caps2 = Lists.newArrayList(
"namespace:4?module=module4&revision=2012-12-12",
"randomNonModuleCap"
);
- final NetconfSessionCapabilities sessionCaps2 = NetconfSessionCapabilities.fromStrings(caps2);
+ final NetconfSessionPreferences sessionCaps2 = NetconfSessionPreferences.fromStrings(caps2);
assertCaps(sessionCaps2, 1, 2);
- final NetconfSessionCapabilities merged = sessionCaps1.replaceModuleCaps(sessionCaps2);
+ final NetconfSessionPreferences merged = sessionCaps1.replaceModuleCaps(sessionCaps2);
assertCaps(merged, 2, 2 + 1 /*Preserved monitoring*/);
for (final QName qName : sessionCaps2.getModuleBasedCaps()) {
assertThat(merged.getModuleBasedCaps(), hasItem(qName));
"namespace:2?module=module2&RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
);
- final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ final NetconfSessionPreferences sessionCaps1 = NetconfSessionPreferences.fromStrings(caps1);
assertCaps(sessionCaps1, 0, 3);
}
- private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
+ private void assertCaps(final NetconfSessionPreferences sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import com.google.common.util.concurrent.Futures;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+public class NetconfDeviceTopologyAdapterTest {
+
+ private RemoteDeviceId id = new RemoteDeviceId("test", new InetSocketAddress("localhost", 22));
+
+ @Mock
+ private DataBroker broker;
+ @Mock
+ private WriteTransaction writeTx;
+ @Mock
+ private Node data;
+
+ private String txIdent = "test transaction";
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(writeTx).when(broker).newWriteOnlyTransaction();
+ doNothing().when(writeTx).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+ doNothing().when(writeTx).merge(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+
+ doReturn(txIdent).when(writeTx).getIdentifier();
+ }
+
+ @Test
+ public void testFailedDevice() throws Exception {
+ doReturn(Futures.immediateCheckedFuture(null)).when(writeTx).submit();
+
+ NetconfDeviceTopologyAdapter adapter = new NetconfDeviceTopologyAdapter(id, broker);
+ adapter.setDeviceAsFailed(null);
+
+ verify(broker, times(2)).newWriteOnlyTransaction();
+ verify(writeTx, times(3)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+ }
+
+ @Test
+ public void testDeviceUpdate() throws Exception {
+ doReturn(Futures.immediateCheckedFuture(null)).when(writeTx).submit();
+
+ NetconfDeviceTopologyAdapter adapter = new NetconfDeviceTopologyAdapter(id, broker);
+ adapter.updateDeviceData(true, new NetconfDeviceCapabilities());
+
+ verify(broker, times(2)).newWriteOnlyTransaction();
+ verify(writeTx, times(3)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class));
+ }
+
+}
\ No newline at end of file
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
@Test
public void testDiscardChanges() {
final WriteCandidateTx tx = new WriteCandidateTx(id, new NetconfBaseOps(rpc), normalizer,
- NetconfSessionCapabilities.fromStrings(Collections.<String>emptySet()));
+ NetconfSessionPreferences.fromStrings(Collections.<String>emptySet()));
final CheckedFuture<Void, TransactionCommitFailedException> submitFuture = tx.submit();
try {
submitFuture.checkedGet();
.when(rpc).invokeRpc(any(QName.class), any(CompositeNode.class));
final WriteRunningTx tx = new WriteRunningTx(id, new NetconfBaseOps(rpc), normalizer,
- NetconfSessionCapabilities.fromStrings(Collections.<String>emptySet()));
+ NetconfSessionPreferences.fromStrings(Collections.<String>emptySet()));
try {
tx.delete(LogicalDatastoreType.CONFIGURATION, yangIId);
} catch (final Exception e) {
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal.tx;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+public class ReadOnlyTxTest {
+
+ private static final YangInstanceIdentifier path = YangInstanceIdentifier.create();
+
+ @Mock
+ private RpcImplementation rpc;
+ @Mock
+ private DataNormalizer normalizer;
+ @Mock
+ private CompositeNode mockedNode;
+
+ @Before
+ public void setUp() throws DataNormalizationException {
+ MockitoAnnotations.initMocks(this);
+ doReturn(path).when(normalizer).toLegacy(any(YangInstanceIdentifier.class));
+ doReturn(com.google.common.util.concurrent.Futures.immediateFuture(RpcResultBuilder.success(mockedNode).build())).when(rpc).invokeRpc(any(org.opendaylight.yangtools.yang.common.QName.class), any(CompositeNode.class));
+ doReturn("node").when(mockedNode).toString();
+ }
+
+ @Test
+ public void testRead() throws Exception {
+ final NetconfBaseOps netconfOps = new NetconfBaseOps(rpc);
+
+ final ReadOnlyTx readOnlyTx = new ReadOnlyTx(netconfOps, normalizer, new RemoteDeviceId("a", new InetSocketAddress("localhost", 196)));
+
+ readOnlyTx.read(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.create());
+ verify(rpc).invokeRpc(Mockito.same(NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME), any(CompositeNode.class));
+ readOnlyTx.read(LogicalDatastoreType.OPERATIONAL, path);
+ verify(rpc).invokeRpc(Mockito.same(NetconfMessageTransformUtil.NETCONF_GET_QNAME), any(CompositeNode.class));
+ }
+}
\ No newline at end of file
import akka.actor.Terminated;
import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.common.actor.Monitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TerminationMonitor extends UntypedActor{
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
+ private static final Logger LOG = LoggerFactory.getLogger(TerminationMonitor.class);
public TerminationMonitor(){
- LOG.info("Created TerminationMonitor");
+ LOG.debug("Created TerminationMonitor");
}
@Override public void onReceive(Object message) throws Exception {
import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.yang.common.QName;
-
import java.io.Serializable;
+import org.opendaylight.yangtools.yang.common.QName;
public class ExecuteRpc implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 1128904894827335676L;
- private final String inputCompositeNode;
- private final QName rpc;
+ private final String inputCompositeNode;
+ private final QName rpc;
- public ExecuteRpc(final String inputCompositeNode, final QName rpc) {
- Preconditions.checkNotNull(inputCompositeNode, "Composite Node input string should be present");
- Preconditions.checkNotNull(rpc, "rpc Qname should not be null");
+ public ExecuteRpc(final String inputCompositeNode, final QName rpc) {
+ Preconditions.checkNotNull(inputCompositeNode, "Composite Node input string should be present");
+ Preconditions.checkNotNull(rpc, "rpc Qname should not be null");
- this.inputCompositeNode = inputCompositeNode;
- this.rpc = rpc;
- }
+ this.inputCompositeNode = inputCompositeNode;
+ this.rpc = rpc;
+ }
- public String getInputCompositeNode() {
- return inputCompositeNode;
- }
+ public String getInputCompositeNode() {
+ return inputCompositeNode;
+ }
- public QName getRpc() {
- return rpc;
- }
+ public QName getRpc() {
+ return rpc;
+ }
}
package org.opendaylight.controller.remote.rpc.messages;
import com.google.common.base.Preconditions;
+import java.io.Serializable;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import java.io.Serializable;
-
public class InvokeRpc implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -2813459607858108953L;
- private final QName rpc;
- private final YangInstanceIdentifier identifier;
- private final CompositeNode input;
+ private final QName rpc;
+ private final YangInstanceIdentifier identifier;
+ private final CompositeNode input;
- public InvokeRpc(final QName rpc, final YangInstanceIdentifier identifier, final CompositeNode input) {
- Preconditions.checkNotNull(rpc, "rpc qname should not be null");
- Preconditions.checkNotNull(input, "rpc input should not be null");
+ public InvokeRpc(final QName rpc, final YangInstanceIdentifier identifier, final CompositeNode input) {
+ Preconditions.checkNotNull(rpc, "rpc qname should not be null");
+ Preconditions.checkNotNull(input, "rpc input should not be null");
- this.rpc = rpc;
- this.identifier = identifier;
- this.input = input;
- }
+ this.rpc = rpc;
+ this.identifier = identifier;
+ this.input = input;
+ }
- public QName getRpc() {
- return rpc;
- }
+ public QName getRpc() {
+ return rpc;
+ }
- public YangInstanceIdentifier getIdentifier() {
- return identifier;
- }
+ public YangInstanceIdentifier getIdentifier() {
+ return identifier;
+ }
- public CompositeNode getInput() {
- return input;
- }
+ public CompositeNode getInput() {
+ return input;
+ }
}
import java.io.Serializable;
public class RpcResponse implements Serializable {
- private static final long serialVersionUID = 1L;
- private final String resultCompositeNode;
+ private static final long serialVersionUID = -4211279498688989245L;
- public RpcResponse(final String resultCompositeNode) {
- this.resultCompositeNode = resultCompositeNode;
- }
+ private final String resultCompositeNode;
- public String getResultCompositeNode() {
- return resultCompositeNode;
- }
+ public RpcResponse(final String resultCompositeNode) {
+ this.resultCompositeNode = resultCompositeNode;
+ }
+
+ public String getResultCompositeNode() {
+ return resultCompositeNode;
+ }
}
import org.opendaylight.controller.sal.connector.api.RpcRouter;
public class RoutingTable implements Copier<RoutingTable>, Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 5592610415175278760L;
private final Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
private ActorRef router;
package org.opendaylight.controller.remote.rpc.registry;
import akka.actor.ActorRef;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.japi.Option;
import akka.japi.Pair;
import com.google.common.base.Preconditions;
*/
public class RpcRegistry extends BucketStore<RoutingTable> {
- final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
-
public RpcRegistry() {
getLocalBucket().setData(new RoutingTable());
}
import java.io.Serializable;
public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 294779770032719196L;
private Long version = System.currentTimeMillis();
import akka.actor.Address;
import akka.actor.Props;
import akka.cluster.ClusterActorRefProvider;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import com.google.common.annotations.VisibleForTesting;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
import org.opendaylight.controller.utils.ConditionalProbe;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A store that syncs its data across nodes in the cluster.
private static final Long NO_VERSION = -1L;
- final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+ protected final Logger log = LoggerFactory.getLogger(getClass());
/**
* Bucket owned by the node
import akka.cluster.ClusterEvent;
import akka.cluster.Member;
import akka.dispatch.Mapper;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
import akka.pattern.Patterns;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
-
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
/**
* Gossiper that syncs bucket store across nodes in the cluster.
public class Gossiper extends AbstractUntypedActorWithMetering {
- final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+ private final Logger log = LoggerFactory.getLogger(getClass());
private Cluster cluster;
@Override
public void postStop(){
- if (cluster != null)
+ if (cluster != null) {
cluster.unsubscribe(getSelf());
- if (gossipTask != null)
+ }
+ if (gossipTask != null) {
gossipTask.cancel();
+ }
}
@Override
protected void handleReceive(Object message) throws Exception {
//Usually sent by self via gossip task defined above. But its not enforced.
//These ticks can be sent by another actor as well which is esp. useful while testing
- if (message instanceof GossipTick)
+ if (message instanceof GossipTick) {
receiveGossipTick();
-
- //Message from remote gossiper with its bucket versions
- else if (message instanceof GossipStatus)
+ } else if (message instanceof GossipStatus) {
+ // Message from remote gossiper with its bucket versions
receiveGossipStatus((GossipStatus) message);
-
- //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
- //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
- //message with its local versions
- else if (message instanceof GossipEnvelope)
+ } else if (message instanceof GossipEnvelope) {
+ // Message from remote gossiper with buckets. This is usually in response to GossipStatus
+ // message. The contained buckets are newer as determined by the remote gossiper by
+ // comparing the GossipStatus message with its local versions.
receiveGossip((GossipEnvelope) message);
-
- else if (message instanceof ClusterEvent.MemberUp) {
+ } else if (message instanceof ClusterEvent.MemberUp) {
receiveMemberUp(((ClusterEvent.MemberUp) message).member());
} else if (message instanceof ClusterEvent.MemberRemoved) {
} else if ( message instanceof ClusterEvent.UnreachableMember){
receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
- } else
+ } else {
unhandled(message);
+ }
}
/**
*/
void receiveMemberUp(Member member) {
- if (selfAddress.equals(member.address()))
+ if (selfAddress.equals(member.address())) {
return; //ignore up notification for self
+ }
- if (!clusterMembers.contains(member.address()))
+ if (!clusterMembers.contains(member.address())) {
clusterMembers.add(member.address());
+ }
if(log.isDebugEnabled()) {
log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
}
* 3. If there are more than one member, randomly pick one and send gossip status (bucket versions) to it.
*/
void receiveGossipTick(){
- if (clusterMembers.size() == 0) return; //no members to send gossip status to
+ if (clusterMembers.size() == 0) {
+ return; //no members to send gossip status to
+ }
Address remoteMemberToGossipTo;
- if (clusterMembers.size() == 1)
+ if (clusterMembers.size() == 1) {
remoteMemberToGossipTo = clusterMembers.get(0);
- else {
+ } else {
Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
remoteMemberToGossipTo = clusterMembers.get(randomIndex);
}
*/
void receiveGossipStatus(GossipStatus status){
//Don't accept messages from non-members
- if (!clusterMembers.contains(status.from()))
+ if (!clusterMembers.contains(status.from())) {
return;
+ }
final ActorRef sender = getSender();
Future<Object> futureReply =
for (Address address : remoteVersions.keySet()){
- if (localVersions.get(address) == null || remoteVersions.get(address) == null)
+ if (localVersions.get(address) == null || remoteVersions.get(address) == null) {
continue; //this condition is taken care of by above diffs
- if (localVersions.get(address) < remoteVersions.get(address))
+ }
+ if (localVersions.get(address) < remoteVersions.get(address)) {
localIsOlder.add(address);
- else if (localVersions.get(address) > remoteVersions.get(address))
+ } else if (localVersions.get(address) > remoteVersions.get(address)) {
localIsNewer.add(address);
+ }
}
- if (!localIsOlder.isEmpty())
+ if (!localIsOlder.isEmpty()) {
sendGossipStatusTo(sender, localVersions );
+ }
- if (!localIsNewer.isEmpty())
+ if (!localIsNewer.isEmpty()) {
sendGossipTo(sender, localIsNewer);//send newer buckets to remote
+ }
}
return null;
}
public static class ContainsBuckets implements Serializable{
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -4940160367495308286L;
+
private final Map<Address, Bucket> buckets;
public ContainsBuckets(Map<Address, Bucket> buckets){
}
public static class ContainsBucketVersions implements Serializable{
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -8172148925383801613L;
+
Map<Address, Long> versions;
public ContainsBucketVersions(Map<Address, Long> versions) {
public static class GossiperMessages{
public static class Tick implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -4770935099506366773L;
}
public static final class GossipTick extends Tick {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 5803354404380026143L;
}
public static final class GossipStatus extends ContainsBucketVersions implements Serializable{
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = -593037395143883265L;
+
private final Address from;
public GossipStatus(Address from, Map<Address, Long> versions) {
}
public static final class GossipEnvelope extends ContainsBuckets implements Serializable {
- private static final long serialVersionUID = 1L;
+ private static final long serialVersionUID = 8346634072582438818L;
+
private final Address from;
private final Address to;
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-codec-gson</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-model-export</artifactId>
+ <!-- FIXME: remove explicit version, once model export package is part of yangtools-artefacts -->
+ <version>0.7.0-SNAPSHOT</version>
+ </dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<Private-Package>org.opendaylight.controller.sal.rest.*,
org.opendaylight.controller.sal.restconf.rpc.*,
org.opendaylight.controller.sal.restconf.impl,
+ org.opendaylight.controller.md.sal.rest.common.*,
org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.rest.connector.rev140724.*,
</Private-Package>
<Import-Package>*,
--- /dev/null
+/**
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.sal.rest.common;
+
+import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
+
+/**
+ * sal-rest-connector
+ * org.opendaylight.controller.md.sal.rest.common
+ *
+ * Utility class is centralizing all needed validation functionality for a Restconf osgi module.
+ * All methods have to throw {@link RestconfDocumentedException} only, which is a representation
+ * for all error situation followed by restconf-netconf specification.
+ * @see {@link https://tools.ietf.org/html/draft-bierman-netconf-restconf-02}
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Feb 24, 2015
+ */
+public class RestconfValidationUtils {
+
+ private RestconfValidationUtils () {
+ throw new UnsupportedOperationException("Utility class");
+ }
+
+ /**
+ * Method returns {@link RestconfDocumentedException} for a false condition.
+ *
+ * @param condition - condition for rise {@link RestconfDocumentedException}
+ * @param type - input {@link ErrorType} for create {@link RestconfDocumentedException}
+ * @param tag - input {@link ErrorTag} for create {@link RestconfDocumentedException}
+ * @param message - input error message for create {@link RestconfDocumentedException}
+ */
+ public static void checkDocumentedError(final boolean condition, final ErrorType type,
+ final ErrorTag tag, final String message) {
+ if(!condition) {
+ throw new RestconfDocumentedException(message, type, tag);
+ }
+ }
+
+ /**
+ * Method returns {@link RestconfDocumentedException} if value is NULL or same input value.
+ * {@link ErrorType} is relevant for server application layer
+ * {@link ErrorTag} is 404 data-missing
+ * @see {@link https://tools.ietf.org/html/draft-bierman-netconf-restconf-02}
+ *
+ * @param value - some value from {@link org.opendaylight.yangtools.yang.model.api.Module}
+ * @param moduleName - name of {@link org.opendaylight.yangtools.yang.model.api.Module}
+ * @return - T value (same input value)
+ */
+ public static <T> T checkNotNullDocumented(final T value, final String moduleName) {
+ if(value == null) {
+ final String errMsg = "Module " + moduleName + "was not found.";
+ throw new RestconfDocumentedException(errMsg, ErrorType.APPLICATION, ErrorTag.DATA_MISSING);
+ }
+ return value;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+@Provider
+@Produces(SchemaRetrievalService.YANG_MEDIA_TYPE)
+public class SchemaExportContentYangBodyWriter implements MessageBodyWriter<SchemaExportContext> {
+
+ @Override
+ public boolean isWriteable(final Class<?> type, final Type genericType, final Annotation[] annotations,
+ final MediaType mediaType) {
+ return type.equals(SchemaExportContext.class);
+ }
+
+ @Override
+ public long getSize(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType) {
+ return -1;
+ }
+
+ @Override
+ public void writeTo(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType,
+ final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream) throws IOException,
+ WebApplicationException {
+ final PrintWriter writer = new PrintWriter(entityStream);
+ writer.write(t.getModule().getSource());
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+import javax.xml.stream.XMLStreamException;
+import org.opendaylight.yangtools.yang.model.export.YinExportUtils;
+
+@Provider
+@Produces(SchemaRetrievalService.YIN_MEDIA_TYPE)
+public class SchemaExportContentYinBodyWriter implements MessageBodyWriter<SchemaExportContext> {
+
+ @Override
+ public boolean isWriteable(final Class<?> type, final Type genericType, final Annotation[] annotations,
+ final MediaType mediaType) {
+ return type.equals(SchemaExportContext.class);
+ }
+
+ @Override
+ public long getSize(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType) {
+ return -1;
+ }
+
+ @Override
+ public void writeTo(final SchemaExportContext t, final Class<?> type, final Type genericType,
+ final Annotation[] annotations, final MediaType mediaType,
+ final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream) throws IOException,
+ WebApplicationException {
+ try {
+ YinExportUtils.writeModuleToOutputStream(t.getSchemaContext(), t.getModule(), entityStream);
+ } catch (final XMLStreamException e) {
+ throw new IllegalStateException(e);
+ }
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class SchemaExportContext {
+
+ private final SchemaContext schemaContext;
+ private final Module module;
+
+ public SchemaExportContext(final SchemaContext ctx, final Module module) {
+ schemaContext = ctx;
+ this.module = module;
+ }
+
+ public SchemaContext getSchemaContext() {
+ return schemaContext;
+ }
+
+ public Module getModule() {
+ return module;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import com.google.common.annotations.Beta;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+
+@Beta
+public interface SchemaRetrievalService {
+
+ public static final String YANG_MEDIA_TYPE = "application/yang";
+ public static final String YIN_MEDIA_TYPE = "application/yin+xml";
+
+ @GET
+ @Produces({YIN_MEDIA_TYPE,YANG_MEDIA_TYPE})
+ @Path("/modules/module/{identifier:.+}/schema")
+ SchemaExportContext getSchema(@PathParam("identifier") String mountAndModuleId);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.rest.schema;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+import java.text.ParseException;
+import java.util.Date;
+import java.util.Iterator;
+import org.opendaylight.controller.md.sal.rest.common.RestconfValidationUtils;
+import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
+import org.opendaylight.controller.sal.restconf.impl.InstanceIdentifierContext;
+import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
+import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class SchemaRetrievalServiceImpl implements SchemaRetrievalService {
+
+ private final ControllerContext salContext;
+
+ private static final Splitter SLASH_SPLITTER = Splitter.on("/");
+ private static final Splitter AT_SPLITTER = Splitter.on("@");
+ private static final String MOUNT_ARG = ControllerContext.MOUNT;
+
+ public SchemaRetrievalServiceImpl(final ControllerContext controllerContext) {
+ salContext = controllerContext;
+ }
+
+
+ @Override
+ public SchemaExportContext getSchema(final String mountAndModule) {
+ final SchemaContext schemaContext;
+ final Iterable<String> pathComponents = SLASH_SPLITTER.split(mountAndModule);
+ final Iterator<String> componentIter = pathComponents.iterator();
+ if(!Iterables.contains(pathComponents, MOUNT_ARG)) {
+ schemaContext = salContext.getGlobalSchema();
+ } else {
+ final StringBuilder pathBuilder = new StringBuilder();
+ while(componentIter.hasNext()) {
+ final String current = componentIter.next();
+ // It is argument, not last element.
+ if(pathBuilder.length() != 0) {
+ pathBuilder.append("/");
+ }
+ pathBuilder.append(current);
+ if(MOUNT_ARG.equals(current)) {
+ // We stop right at mountpoint, last two arguments should
+ // be module name and revision
+ break;
+ }
+ }
+ schemaContext = getMountSchemaContext(pathBuilder.toString());
+
+ }
+
+ RestconfValidationUtils.checkDocumentedError(componentIter.hasNext(),
+ ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE, "Module name must be supplied.");
+ final String moduleName = componentIter.next();
+ RestconfValidationUtils.checkDocumentedError(componentIter.hasNext(),
+ ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE, "Revision date must be supplied.");
+ final String revisionString = componentIter.next();
+ return getExportUsingNameAndRevision(schemaContext, moduleName, revisionString);
+ }
+
+ private SchemaExportContext getExportUsingNameAndRevision(final SchemaContext schemaContext, final String moduleName,
+ final String revisionStr) {
+ try {
+ final Date revision = SimpleDateFormatUtil.getRevisionFormat().parse(revisionStr);
+ final Module module = schemaContext.findModuleByName(moduleName, revision);
+ return new SchemaExportContext(schemaContext, RestconfValidationUtils.checkNotNullDocumented(module, moduleName));
+ } catch (final ParseException e) {
+ throw new RestconfDocumentedException("Supplied revision is not in expected date format YYYY-mm-dd", e);
+ }
+ }
+
+ private SchemaContext getMountSchemaContext(final String identifier) {
+ final InstanceIdentifierContext mountContext = salContext.toMountPointIdentifier(identifier);
+ return mountContext.getSchemaContext();
+ }
+
+
+
+}
+
import java.io.IOException;
/**
+ * @deprecated class will be removed in Lithium release
+ *
* This class parses JSON elements from a gson JsonReader. It disallows multiple elements of the same name unlike the
* default gson JsonParser."
*/
+@Deprecated
public class JsonParser {
- public JsonElement parse(JsonReader reader) throws JsonIOException, JsonSyntaxException {
+ public JsonElement parse(final JsonReader reader) throws JsonIOException, JsonSyntaxException {
// code copied from gson's JsonParser and Stream classes
- boolean lenient = reader.isLenient();
+ final boolean lenient = reader.isLenient();
reader.setLenient(true);
boolean isEmpty = true;
try {
reader.peek();
isEmpty = false;
return read(reader);
- } catch (EOFException e) {
+ } catch (final EOFException e) {
if (isEmpty) {
return JsonNull.INSTANCE;
}
// The stream ended prematurely so it is likely a syntax error.
throw new JsonSyntaxException(e);
- } catch (MalformedJsonException e) {
+ } catch (final MalformedJsonException e) {
throw new JsonSyntaxException(e);
- } catch (IOException e) {
+ } catch (final IOException e) {
throw new JsonIOException(e);
- } catch (NumberFormatException e) {
+ } catch (final NumberFormatException e) {
throw new JsonSyntaxException(e);
} catch (StackOverflowError | OutOfMemoryError e) {
throw new JsonParseException("Failed parsing JSON source: " + reader + " to Json", e);
}
}
- public JsonElement read(JsonReader in) throws IOException {
+ public JsonElement read(final JsonReader in) throws IOException {
switch (in.peek()) {
case STRING:
return new JsonPrimitive(in.nextString());
case NUMBER:
- String number = in.nextString();
+ final String number = in.nextString();
return new JsonPrimitive(new LazilyParsedNumber(number));
case BOOLEAN:
return new JsonPrimitive(in.nextBoolean());
in.nextNull();
return JsonNull.INSTANCE;
case BEGIN_ARRAY:
- JsonArray array = new JsonArray();
+ final JsonArray array = new JsonArray();
in.beginArray();
while (in.hasNext()) {
array.add(read(in));
in.endArray();
return array;
case BEGIN_OBJECT:
- JsonObject object = new JsonObject();
+ final JsonObject object = new JsonObject();
in.beginObject();
while (in.hasNext()) {
final String childName = in.nextName();
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Consumes({ Draft02.MediaTypes.DATA + RestconfService.JSON, Draft02.MediaTypes.OPERATION + RestconfService.JSON,
MediaType.APPLICATION_JSON })
WebApplicationException {
try {
return JsonToCompositeNodeReader.read(entityStream);
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.debug("Error parsing json input", e);
throw new RestconfDocumentedException("Error parsing input: " + e.getMessage(), ErrorType.PROTOCOL,
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
class JsonToCompositeNodeReader {
private static final Logger LOG = LoggerFactory.getLogger(JsonToCompositeNodeReader.class);
private static final Splitter COLON_SPLITTER = Splitter.on(':');
}
public static CompositeNodeWrapper read(final InputStream entityStream) throws UnsupportedFormatException {
- JsonParser parser = new JsonParser();
+ final JsonParser parser = new JsonParser();
- JsonElement rootElement = parser.parse(new JsonReader(new InputStreamReader(entityStream)));
+ final JsonElement rootElement = parser.parse(new JsonReader(new InputStreamReader(entityStream)));
if (rootElement.isJsonNull()) {
// no content, so return null to indicate no input
return null;
throw new UnsupportedFormatException("Root element of Json has to be Object");
}
- Set<Entry<String, JsonElement>> entrySetsOfRootJsonObject = rootElement.getAsJsonObject().entrySet();
+ final Set<Entry<String, JsonElement>> entrySetsOfRootJsonObject = rootElement.getAsJsonObject().entrySet();
if (entrySetsOfRootJsonObject.size() != 1) {
throw new UnsupportedFormatException("Json Object should contain one element");
}
- Entry<String, JsonElement> childEntry = entrySetsOfRootJsonObject.iterator().next();
- String firstElementName = childEntry.getKey();
- JsonElement firstElementType = childEntry.getValue();
+ final Entry<String, JsonElement> childEntry = entrySetsOfRootJsonObject.iterator().next();
+ final String firstElementName = childEntry.getKey();
+ final JsonElement firstElementType = childEntry.getValue();
if (firstElementType.isJsonObject()) {
// container in yang
return createStructureWithRoot(firstElementName, firstElementType.getAsJsonObject());
if (firstElementType.isJsonArray()) {
// list in yang
if (firstElementType.getAsJsonArray().size() == 1) {
- JsonElement firstElementInArray = firstElementType.getAsJsonArray().get(0);
+ final JsonElement firstElementInArray = firstElementType.getAsJsonArray().get(0);
if (firstElementInArray.isJsonObject()) {
return createStructureWithRoot(firstElementName, firstElementInArray.getAsJsonObject());
}
}
private static CompositeNodeWrapper createStructureWithRoot(final String rootObjectName, final JsonObject rootObject) {
- CompositeNodeWrapper firstNode = new CompositeNodeWrapper(getNamespaceFor(rootObjectName),
+ final CompositeNodeWrapper firstNode = new CompositeNodeWrapper(getNamespaceFor(rootObjectName),
getLocalNameFor(rootObjectName));
- for (Entry<String, JsonElement> childOfFirstNode : rootObject.entrySet()) {
+ for (final Entry<String, JsonElement> childOfFirstNode : rootObject.entrySet()) {
addChildToParent(childOfFirstNode.getKey(), childOfFirstNode.getValue(), firstNode);
}
return firstNode;
private static void addChildToParent(final String childName, final JsonElement childType,
final CompositeNodeWrapper parent) {
if (childType.isJsonObject()) {
- CompositeNodeWrapper child = new CompositeNodeWrapper(getNamespaceFor(childName),
+ final CompositeNodeWrapper child = new CompositeNodeWrapper(getNamespaceFor(childName),
getLocalNameFor(childName));
parent.addValue(child);
- for (Entry<String, JsonElement> childOfChild : childType.getAsJsonObject().entrySet()) {
+ for (final Entry<String, JsonElement> childOfChild : childType.getAsJsonObject().entrySet()) {
addChildToParent(childOfChild.getKey(), childOfChild.getValue(), child);
}
} else if (childType.isJsonArray()) {
parent.addValue(new EmptyNodeWrapper(getNamespaceFor(childName), getLocalNameFor(childName)));
} else {
- for (JsonElement childOfChildType : childType.getAsJsonArray()) {
+ for (final JsonElement childOfChildType : childType.getAsJsonArray()) {
addChildToParent(childName, childOfChildType, parent);
}
}
} else if (childType.isJsonPrimitive()) {
- JsonPrimitive childPrimitive = childType.getAsJsonPrimitive();
- String value = childPrimitive.getAsString().trim();
+ final JsonPrimitive childPrimitive = childType.getAsJsonPrimitive();
+ final String value = childPrimitive.getAsString().trim();
parent.addValue(new SimpleNodeWrapper(getNamespaceFor(childName), getLocalNameFor(childName),
resolveValueOfElement(value)));
} else {
if (Iterators.size(it) == 1) {
try {
return URI.create(maybeURI);
- } catch (IllegalArgumentException e) {
+ } catch (final IllegalArgumentException e) {
LOG.debug("Value {} couldn't be interpreted as URI.", maybeURI);
}
}
private static Object resolveValueOfElement(final String value) {
// it could be instance-identifier Built-In Type
if (!value.isEmpty() && value.charAt(0) == '/') {
- IdentityValuesDTO resolvedValue = RestUtil.asInstanceIdentifier(value, new PrefixMapingFromJson());
+ final IdentityValuesDTO resolvedValue = RestUtil.asInstanceIdentifier(value, new PrefixMapingFromJson());
if (resolvedValue != null) {
return resolvedValue;
}
}
// it could be identityref Built-In Type therefore it is necessary to look at value as module_name:local_name
- URI namespace = getNamespaceFor(value);
+ final URI namespace = getNamespaceFor(value);
if (namespace != null) {
return new IdentityValuesDTO(namespace.toString(), getLocalNameFor(value), null, value);
}
import java.util.HashSet;
import java.util.Set;
import javax.ws.rs.core.Application;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaExportContentYangBodyWriter;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaExportContentYinBodyWriter;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaRetrievalServiceImpl;
import org.opendaylight.controller.sal.restconf.impl.BrokerFacade;
import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
import org.opendaylight.controller.sal.restconf.impl.RestconfImpl;
.add(JsonNormalizedNodeBodyReader.class)
.add(NormalizedNodeJsonBodyWriter.class)
.add(NormalizedNodeXmlBodyWriter.class)
+ .add(SchemaExportContentYinBodyWriter.class)
+ .add(SchemaExportContentYangBodyWriter.class)
.build();
}
@Override
public Set<Object> getSingletons() {
- Set<Object> singletons = new HashSet<>();
- ControllerContext controllerContext = ControllerContext.getInstance();
- BrokerFacade brokerFacade = BrokerFacade.getInstance();
- RestconfImpl restconfImpl = RestconfImpl.getInstance();
+ final Set<Object> singletons = new HashSet<>();
+ final ControllerContext controllerContext = ControllerContext.getInstance();
+ final BrokerFacade brokerFacade = BrokerFacade.getInstance();
+ final RestconfImpl restconfImpl = RestconfImpl.getInstance();
+ final SchemaRetrievalServiceImpl schemaRetrieval = new SchemaRetrievalServiceImpl(controllerContext);
restconfImpl.setBroker(brokerFacade);
restconfImpl.setControllerContext(controllerContext);
singletons.add(controllerContext);
singletons.add(brokerFacade);
- singletons.add(StatisticsRestconfServiceWrapper.getInstance());
+ singletons.add(schemaRetrieval);
+ singletons.add(new RestconfCompositeWrapper(StatisticsRestconfServiceWrapper.getInstance(), schemaRetrieval));
singletons.add(StructuredDataToXmlProvider.INSTANCE);
singletons.add(StructuredDataToJsonProvider.INSTANCE);
singletons.add(JsonToCompositeNodeProvider.INSTANCE);
--- /dev/null
+package org.opendaylight.controller.sal.rest.impl;
+
+import com.google.common.base.Preconditions;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaExportContext;
+import org.opendaylight.controller.md.sal.rest.schema.SchemaRetrievalService;
+import org.opendaylight.controller.sal.rest.api.RestconfService;
+import org.opendaylight.controller.sal.restconf.impl.NormalizedNodeContext;
+import org.opendaylight.controller.sal.restconf.impl.StructuredData;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.Node;
+
+public class RestconfCompositeWrapper implements RestconfService, SchemaRetrievalService {
+
+ private final RestconfService restconf;
+ private final SchemaRetrievalService schema;
+
+ public RestconfCompositeWrapper(final RestconfService restconf, final SchemaRetrievalService schema) {
+ this.restconf = Preconditions.checkNotNull(restconf);
+ this.schema = Preconditions.checkNotNull(schema);
+ }
+
+ @Override
+ public Object getRoot() {
+ return restconf.getRoot();
+ }
+
+ @Override
+ public StructuredData getModules(final UriInfo uriInfo) {
+ return restconf.getModules(uriInfo);
+ }
+
+ @Override
+ public StructuredData getModules(final String identifier, final UriInfo uriInfo) {
+ return restconf.getModules(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData getModule(final String identifier, final UriInfo uriInfo) {
+ return restconf.getModule(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData getOperations(final UriInfo uriInfo) {
+ return restconf.getOperations(uriInfo);
+ }
+
+ @Override
+ public StructuredData getOperations(final String identifier, final UriInfo uriInfo) {
+ return restconf.getOperations(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData invokeRpc(final String identifier, final CompositeNode payload, final UriInfo uriInfo) {
+ return restconf.invokeRpc(identifier, payload, uriInfo);
+ }
+
+ @Override
+ public StructuredData invokeRpc(final String identifier, final String noPayload, final UriInfo uriInfo) {
+ return restconf.invokeRpc(identifier, noPayload, uriInfo);
+ }
+
+ @Override
+ public NormalizedNodeContext readConfigurationData(final String identifier, final UriInfo uriInfo) {
+ return restconf.readConfigurationData(identifier, uriInfo);
+ }
+
+ @Override
+ public NormalizedNodeContext readOperationalData(final String identifier, final UriInfo uriInfo) {
+ return restconf.readOperationalData(identifier, uriInfo);
+ }
+
+ @Override
+ public Response updateConfigurationData(final String identifier, final Node<?> payload) {
+ return restconf.updateConfigurationData(identifier, payload);
+ }
+
+ @Override
+ public Response createConfigurationData(final String identifier, final Node<?> payload) {
+ return restconf.createConfigurationData(identifier, payload);
+ }
+
+ @Override
+ public Response createConfigurationData(final Node<?> payload) {
+ return restconf.createConfigurationData(payload);
+ }
+
+ @Override
+ public Response deleteConfigurationData(final String identifier) {
+ return restconf.deleteConfigurationData(identifier);
+ }
+
+ @Override
+ public Response subscribeToStream(final String identifier, final UriInfo uriInfo) {
+ return restconf.subscribeToStream(identifier, uriInfo);
+ }
+
+ @Override
+ public StructuredData getAvailableStreams(final UriInfo uriInfo) {
+ return restconf.getAvailableStreams(uriInfo);
+ }
+
+ @Override
+ public SchemaExportContext getSchema(final String mountId) {
+ return schema.getSchema(mountId);
+ }
+}
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Produces({ Draft02.MediaTypes.API + RestconfService.JSON, Draft02.MediaTypes.DATA + RestconfService.JSON,
Draft02.MediaTypes.OPERATION + RestconfService.JSON, MediaType.APPLICATION_JSON })
public void writeTo(final StructuredData t, final Class<?> type, final Type genericType, final Annotation[] annotations,
final MediaType mediaType, final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream)
throws IOException, WebApplicationException {
- CompositeNode data = t.getData();
+ final CompositeNode data = t.getData();
if (data == null) {
throw new RestconfDocumentedException(Response.Status.NOT_FOUND);
}
- JsonWriter writer = new JsonWriter(new OutputStreamWriter(entityStream, Charsets.UTF_8));
+ final JsonWriter writer = new JsonWriter(new OutputStreamWriter(entityStream, Charsets.UTF_8));
if (t.isPrettyPrintMode()) {
writer.setIndent(" ");
} else {
writer.setIndent("");
}
- JsonMapper jsonMapper = new JsonMapper(t.getMountPoint());
+ final JsonMapper jsonMapper = new JsonMapper(t.getMountPoint());
jsonMapper.write(writer, data, (DataNodeContainer) t.getSchema());
writer.flush();
}
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Produces({ Draft02.MediaTypes.API + RestconfService.XML, Draft02.MediaTypes.DATA + RestconfService.XML,
Draft02.MediaTypes.OPERATION + RestconfService.XML, MediaType.APPLICATION_XML, MediaType.TEXT_XML })
final Transformer ret;
try {
ret = FACTORY.newTransformer();
- } catch (TransformerConfigurationException e) {
+ } catch (final TransformerConfigurationException e) {
LOG.error("Failed to instantiate XML transformer", e);
throw new IllegalStateException("XML encoding currently unavailable", e);
}
final Annotation[] annotations, final MediaType mediaType,
final MultivaluedMap<String, Object> httpHeaders, final OutputStream entityStream) throws IOException,
WebApplicationException {
- CompositeNode data = t.getData();
+ final CompositeNode data = t.getData();
if (data == null) {
throw new RestconfDocumentedException(Response.Status.NOT_FOUND);
}
} else {
trans.setOutputProperty(OutputKeys.INDENT, "no");
}
- } catch (RuntimeException e) {
+ } catch (final RuntimeException e) {
throw new RestconfDocumentedException(e.getMessage(), ErrorType.TRANSPORT, ErrorTag.OPERATION_FAILED);
}
final Document domTree = new XmlMapper().write(data, (DataNodeContainer) t.getSchema());
try {
trans.transform(new DOMSource(domTree), new StreamResult(entityStream));
- } catch (TransformerException e) {
+ } catch (final TransformerException e) {
LOG.error("Error during translation of Document to OutputStream", e);
throw new RestconfDocumentedException(e.getMessage(), ErrorType.TRANSPORT, ErrorTag.OPERATION_FAILED);
}
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Provider
@Consumes({ Draft02.MediaTypes.DATA + RestconfService.XML, Draft02.MediaTypes.OPERATION + RestconfService.XML,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
@Override
public Node<?> readFrom(final Class<Node<?>> type, final Type genericType, final Annotation[] annotations,
- MediaType mediaType, MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
+ final MediaType mediaType, final MultivaluedMap<String, String> httpHeaders, final InputStream entityStream)
throws IOException, WebApplicationException {
- XmlToCompositeNodeReader xmlReader = new XmlToCompositeNodeReader();
+ final XmlToCompositeNodeReader xmlReader = new XmlToCompositeNodeReader();
try {
return xmlReader.read(entityStream);
} catch (XMLStreamException | UnsupportedFormatException e) {
package org.opendaylight.controller.sal.rest.impl;
import static com.google.common.base.Preconditions.checkArgument;
-
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.opendaylight.controller.sal.restconf.impl.SimpleNodeWrapper;
import org.opendaylight.yangtools.yang.data.api.Node;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
@Deprecated
public class XmlToCompositeNodeReader {
eventReader = xmlInputFactory.createXMLEventReader(entityStream);
if (eventReader.hasNext()) {
- XMLEvent element = eventReader.peek();
+ final XMLEvent element = eventReader.peek();
if (element.isStartDocument()) {
eventReader.nextEvent();
}
private boolean isSimpleNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
- XMLEvent innerEvent = skipCommentsAndWhitespace();
+ final XMLEvent innerEvent = skipCommentsAndWhitespace();
if (innerEvent != null && (innerEvent.isCharacters() || innerEvent.isEndElement())) {
return true;
}
private boolean isCompositeNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
- XMLEvent innerEvent = skipCommentsAndWhitespace();
+ final XMLEvent innerEvent = skipCommentsAndWhitespace();
if (innerEvent != null) {
if (innerEvent.isStartElement()) {
return true;
private XMLEvent skipCommentsAndWhitespace() throws XMLStreamException {
while (eventReader.hasNext()) {
- XMLEvent event = eventReader.peek();
+ final XMLEvent event = eventReader.peek();
if (event.getEventType() == XMLStreamConstants.COMMENT) {
eventReader.nextEvent();
continue;
}
if (event.isCharacters()) {
- Characters chars = event.asCharacters();
+ final Characters chars = event.asCharacters();
if (chars.isWhiteSpace()) {
eventReader.nextEvent();
continue;
private NodeWrapper<? extends Node<?>> resolveSimpleNodeFromStartElement(final StartElement startElement)
throws XMLStreamException {
checkArgument(startElement != null, "Start Element cannot be NULL!");
- String data = getValueOf(startElement);
+ final String data = getValueOf(startElement);
if (data == null) {
return new EmptyNodeWrapper(getNamespaceFor(startElement), getLocalNameFor(startElement));
}
}
private URI getNamespaceFor(final StartElement startElement) {
- String namespaceURI = startElement.getName().getNamespaceURI();
+ final String namespaceURI = startElement.getName().getNamespaceURI();
return namespaceURI.isEmpty() ? null : URI.create(namespaceURI);
}
private Object resolveValueOfElement(final String value, final StartElement startElement) {
// it could be instance-identifier Built-In Type
if (value.startsWith("/")) {
- IdentityValuesDTO iiValue = RestUtil.asInstanceIdentifier(value, new RestUtil.PrefixMapingFromXml(
+ final IdentityValuesDTO iiValue = RestUtil.asInstanceIdentifier(value, new RestUtil.PrefixMapingFromXml(
startElement));
if (iiValue != null) {
return iiValue;
}
}
// it could be identityref Built-In Type
- String[] namespaceAndValue = value.split(":");
+ final String[] namespaceAndValue = value.split(":");
if (namespaceAndValue.length == 2) {
- String namespace = startElement.getNamespaceContext().getNamespaceURI(namespaceAndValue[0]);
+ final String namespace = startElement.getNamespaceContext().getNamespaceURI(namespaceAndValue[0]);
if (namespace != null && !namespace.isEmpty()) {
return new IdentityValuesDTO(namespace, namespaceAndValue[1], namespaceAndValue[0], value);
}
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public final class CompositeNodeWrapper implements NodeWrapper<CompositeNode>, CompositeNode {
private MutableCompositeNode compositeNode;
name = new QName(namespace, localName);
}
- List<Node<?>> nodeValues = new ArrayList<>(values.size());
- for (NodeWrapper<?> nodeWrapper : values) {
+ final List<Node<?>> nodeValues = new ArrayList<>(values.size());
+ for (final NodeWrapper<?> nodeWrapper : values) {
nodeValues.add(nodeWrapper.unwrap());
}
compositeNode = NodeFactory.createMutableCompositeNode(name, null, nodeValues, null, null);
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public final class EmptyNodeWrapper implements NodeWrapper<Node<?>>, Node<Void> {
private Node<?> unwrapped;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.Node;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public interface NodeWrapper<T extends Node<?>> {
void setQname(QName name);
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public final class SimpleNodeWrapper implements NodeWrapper<SimpleNode<?>>, SimpleNode<Object> {
private SimpleNode<Object> simpleNode;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+/**
+ * @deprecated class will be removed in Lithium release
+ */
+@Deprecated
public class StructuredData {
private final CompositeNode data;
this.data = data;
this.schema = schema;
this.mountPoint = mountPoint;
- this.prettyPrintMode = preattyPrintMode;
+ prettyPrintMode = preattyPrintMode;
}
public CompositeNode getData() {
</parent>
<modelVersion>4.0.0</modelVersion>
+ <artifactId>sal-test-model</artifactId>
+ <packaging>bundle</packaging>
+
<dependencies>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
</dependency>
</dependencies>
- <artifactId>sal-test-model</artifactId>
<build>
<plugins>
<plugin>
--- /dev/null
+module opendaylight-of-migration-test-model {
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:of-migration-test-model";
+ prefix of-migration-test;
+
+ import opendaylight-mdsal-list-test {prefix test;}
+ import yang-ext {prefix ext;}
+ import opendaylight-mdsal-augment-test {prefix aug;}
+ import opendaylight-test-routed-rpc {prefix routed;}
+
+ description
+ "This module contains a collection of YANG definitions used for
+ test cases that used to depend on flow model.";
+
+ revision 2015-02-10 {
+ }
+
+ typedef bit-flags {
+ type bits {
+ bit FLAG_ONE;
+ bit FLAG_TWO;
+ bit FLAG_THREE;
+ bit FLAG_FOUR;
+ bit FLAG_FIVE;
+ }
+ }
+
+ typedef custom-enum {
+ type enumeration {
+ enum type1;
+ enum type2;
+ enum type3;
+ }
+ }
+
+ grouping enum-grouping {
+ leaf attr-enum {
+ type custom-enum;
+ }
+ }
+
+ grouping aug-grouping {
+ container cont1 {
+ leaf attr-str {
+ type string;
+ }
+ }
+
+ container cont2 {
+ list contlist1 {
+ key "attr-str";
+
+ leaf attr-str {
+ type string;
+ }
+
+ uses enum-grouping;
+ }
+ }
+
+ leaf attr-str1 {
+ type string;
+ }
+
+ leaf attr-str2 {
+ type string;
+ }
+
+ leaf attr-str3 {
+ type string;
+ }
+
+ leaf attr-str4 {
+ type string;
+ }
+
+ list list1 {
+ key "attr-str";
+ leaf attr-str {
+ type string;
+ }
+
+ list list1-1 {
+ key "attr-int";
+ leaf attr-int {
+ type int32;
+ }
+
+ leaf attr-str {
+ type string;
+ }
+
+ leaf flags {
+ type bit-flags;
+ }
+ }
+
+ list list1-2 {
+ key "attr-int";
+ leaf attr-int {
+ type int32;
+ }
+
+ leaf attr-str {
+ type string;
+ }
+ }
+ }
+ }
+
+ augment "/test:top/test:top-level-list" {
+ ext:augment-identifier tll-complex-augment;
+ uses aug-grouping;
+ }
+
+ augment "/test:top/test:top-level-list/list1/list1-1" {
+ ext:augment-identifier list11-simple-augment;
+
+ leaf attr-str2 {
+ type string;
+ }
+
+ container cont {
+ leaf attr-int {
+ type int32;
+ }
+ }
+ }
+
+ augment "/test:top/test:top-level-list/test:nested-list/" {
+ ext:augment-identifier nested-list-simple-augment;
+
+ leaf type {
+ type string;
+ }
+ }
+
+ rpc knock-knock {
+ input {
+ leaf knocker-id {
+ ext:context-reference routed:test-context;
+ type instance-identifier;
+ }
+
+ leaf question {
+ type string;
+ }
+ }
+
+ output {
+ leaf answer {
+ type string;
+ }
+ }
+ }
+}
--- /dev/null
+module opendaylight-test-notification {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:test:bi:ba:notification";
+ prefix "ntf";
+
+ description
+ "Test model for testing of registering notification listener and publishing of notification.";
+
+ revision "2015-02-05" {
+ description
+ "Initial revision";
+ }
+
+ notification out-of-pixie-dust-notification {
+ description "Just a testing notification that we can not fly for now.";
+
+ leaf reason {
+ type string;
+ }
+
+ leaf days-till-new-dust {
+ type uint16;
+ }
+ }
+}
\ No newline at end of file
<packaging>bundle</packaging>
<properties>
<bundle.plugin.version>2.4.0</bundle.plugin.version>
- <guava.version>14.0.1</guava.version>
<maven.clean.plugin.version>2.5</maven.clean.plugin.version>
</properties>
<dependencies>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-mapping-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
org.opendaylight.controller.netconf.confignetconfconnector.util,
org.opendaylight.controller.netconf.confignetconfconnector.osgi,
org.opendaylight.controller.netconf.confignetconfconnector.exception,</Private-Package>
- <Import-Package>*</Import-Package>
- <Export-Package></Export-Package>
</instructions>
</configuration>
</plugin>
import java.util.Map.Entry;
import javax.management.ObjectName;
import javax.management.openmbean.OpenType;
-import org.opendaylight.controller.config.util.ConfigRegistryClient;
+import org.opendaylight.controller.config.util.BeanReader;
import org.opendaylight.controller.config.yangjmxgenerator.RuntimeBeanEntry;
import org.opendaylight.controller.config.yangjmxgenerator.attribute.AttributeIfc;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
private final Map<String, AttributeIfc> yangToAttrConfig;
private final String nullableDummyContainerName;
private final Map<String, AttributeIfc> jmxToAttrConfig;
- private final ConfigRegistryClient configRegistryClient;
+ private final BeanReader configRegistryClient;
- public InstanceConfig(ConfigRegistryClient configRegistryClient, Map<String, AttributeIfc> yangNamesToAttributes,
+ public InstanceConfig(BeanReader configRegistryClient, Map<String, AttributeIfc> yangNamesToAttributes,
String nullableDummyContainerName) {
this.yangToAttrConfig = yangNamesToAttributes;
TransactionProvider transactionProvider) {
switch (source) {
case running:
- return new RunningDatastoreQueryStrategy();
+ return new RunningDatastoreQueryStrategy(transactionProvider);
case candidate:
return new CandidateDatastoreQueryStrategy(transactionProvider);
default:
protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
fromXml(xml);
try {
- this.transactionProvider.abortTransaction();
- } catch (final IllegalStateException e) {
+ if (transactionProvider.getTransaction().isPresent()) {
+ this.transactionProvider.abortTransaction();
+ }
+ } catch (final RuntimeException e) {
LOG.warn("Abort failed: ", e);
final Map<String, String> errorInfo = new HashMap<>();
errorInfo
.put(ErrorTag.operation_failed.name(),
- "Operation failed. Use 'get-config' or 'edit-config' before triggering 'discard-changes' operation");
+ "Abort failed.");
throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.application, ErrorTag.operation_failed,
ErrorSeverity.error, errorInfo);
}
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
import org.opendaylight.controller.config.api.ValidationException;
+import org.opendaylight.controller.config.util.BeanReader;
import org.opendaylight.controller.config.util.ConfigRegistryClient;
import org.opendaylight.controller.config.util.ConfigTransactionClient;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.Services;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfigXmlParser.EditConfigExecution;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
private static final Logger LOG = LoggerFactory.getLogger(EditConfig.class);
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
private final TransactionProvider transactionProvider;
private EditConfigXmlParser editConfigXmlParser;
- public EditConfig(YangStoreSnapshot yangStoreSnapshot, TransactionProvider transactionProvider,
+ public EditConfig(YangStoreContext yangStoreSnapshot, TransactionProvider transactionProvider,
ConfigRegistryClient configRegistryClient, String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
this.yangStoreSnapshot = yangStoreSnapshot;
}
}
- public static Config getConfigMapping(ConfigRegistryClient configRegistryClient, YangStoreSnapshot yangStoreSnapshot) {
+ public static Config getConfigMapping(ConfigRegistryClient configRegistryClient, YangStoreContext yangStoreSnapshot) {
Map<String, Map<String, ModuleConfig>> factories = transformMbeToModuleConfigs(configRegistryClient,
yangStoreSnapshot.getModuleMXBeanEntryMap());
Map<String, Map<Date, IdentityMapping>> identitiesMap = transformIdentities(yangStoreSnapshot.getModules());
public static Map<String/* Namespace from yang file */,
Map<String /* Name of module entry from yang file */, ModuleConfig>> transformMbeToModuleConfigs
- (final ConfigRegistryClient configRegistryClient, Map<String/* Namespace from yang file */,
+ (final BeanReader configRegistryClient, Map<String/* Namespace from yang file */,
Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
@Override
protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
-
EditConfigXmlParser.EditConfigExecution editConfigExecution;
Config cfg = getConfigMapping(getConfigRegistryClient(), yangStoreSnapshot);
editConfigExecution = editConfigXmlParser.fromXml(xml, cfg);
import java.util.Set;
import javax.management.ObjectName;
import org.opendaylight.controller.config.util.ConfigRegistryClient;
+import org.opendaylight.controller.config.util.ConfigTransactionClient;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.config.yangjmxgenerator.RuntimeBeanEntry;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Datastore;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
+import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
public class Get extends AbstractConfigNetconfOperation {
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final TransactionProvider transactionProvider;
+ private final YangStoreContext yangStoreSnapshot;
private static final Logger LOG = LoggerFactory.getLogger(Get.class);
- public Get(YangStoreSnapshot yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ public Get(final TransactionProvider transactionProvider, YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
+ this.transactionProvider = transactionProvider;
this.yangStoreSnapshot = yangStoreSnapshot;
}
protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
checkXml(xml);
- final Set<ObjectName> runtimeBeans = getConfigRegistryClient().lookupRuntimeBeans();
+ final ObjectName testTransaction = transactionProvider.getOrCreateReadTransaction();
+ final ConfigTransactionClient registryClient = getConfigRegistryClient().getConfigTransactionClient(testTransaction);
- //Transaction provider required only for candidate datastore
- final Set<ObjectName> configBeans = Datastore.getInstanceQueryStrategy(Datastore.running, null)
- .queryInstances(getConfigRegistryClient());
+ try {
+ // Runtime beans are not parts of transactions and have to be queried against the central registry
+ final Set<ObjectName> runtimeBeans = getConfigRegistryClient().lookupRuntimeBeans();
- final Map<String, Map<String, ModuleRuntime>> moduleRuntimes = createModuleRuntimes(getConfigRegistryClient(),
- yangStoreSnapshot.getModuleMXBeanEntryMap());
- final Map<String, Map<String, ModuleConfig>> moduleConfigs = EditConfig.transformMbeToModuleConfigs(
- getConfigRegistryClient(), yangStoreSnapshot.getModuleMXBeanEntryMap());
+ final Set<ObjectName> configBeans = Datastore.getInstanceQueryStrategy(Datastore.running, transactionProvider)
+ .queryInstances(getConfigRegistryClient());
- final Runtime runtime = new Runtime(moduleRuntimes, moduleConfigs);
+ final Map<String, Map<String, ModuleRuntime>> moduleRuntimes = createModuleRuntimes(getConfigRegistryClient(),
+ yangStoreSnapshot.getModuleMXBeanEntryMap());
+ final Map<String, Map<String, ModuleConfig>> moduleConfigs = EditConfig.transformMbeToModuleConfigs(
+ registryClient, yangStoreSnapshot.getModuleMXBeanEntryMap());
- final Element element = runtime.toXml(runtimeBeans, configBeans, document);
+ final Runtime runtime = new Runtime(moduleRuntimes, moduleConfigs);
- LOG.trace("{} operation successful", XmlNetconfConstants.GET);
+ final Element element = runtime.toXml(runtimeBeans, configBeans, document);
- return element;
+ LOG.trace("{} operation successful", XmlNetconfConstants.GET);
+
+ return element;
+ } finally {
+ transactionProvider.closeReadTransaction();
+ }
}
}
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Datastore;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
public static final String GET_CONFIG = "get-config";
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
private final Optional<String> maybeNamespace;
private final TransactionProvider transactionProvider;
private static final Logger LOG = LoggerFactory.getLogger(GetConfig.class);
- public GetConfig(YangStoreSnapshot yangStoreSnapshot, Optional<String> maybeNamespace,
+ public GetConfig(YangStoreContext yangStoreSnapshot, Optional<String> maybeNamespace,
TransactionProvider transactionProvider, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
private Element getResponseInternal(final Document document, final ConfigRegistryClient configRegistryClient,
final Datastore source) {
- Element dataElement = XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
- final Set<ObjectName> instances = Datastore.getInstanceQueryStrategy(source, this.transactionProvider)
- .queryInstances(configRegistryClient);
- final Config configMapping = new Config(EditConfig.transformMbeToModuleConfigs(configRegistryClient,
- yangStoreSnapshot.getModuleMXBeanEntryMap()));
-
-
- ObjectName on = transactionProvider.getOrCreateTransaction();
- ConfigTransactionClient ta = configRegistryClient.getConfigTransactionClient(on);
-
- ServiceRegistryWrapper serviceTracker = new ServiceRegistryWrapper(ta);
- dataElement = configMapping.toXml(instances, this.maybeNamespace, document, dataElement, serviceTracker);
-
- LOG.trace("{} operation successful", GET_CONFIG);
-
- return dataElement;
+ final ConfigTransactionClient registryClient;
+ // Read current state from a transaction, if running is source, then start new transaction just for reading
+ // in case of candidate, get current transaction representing candidate
+ if(source == Datastore.running) {
+ final ObjectName readTx = transactionProvider.getOrCreateReadTransaction();
+ registryClient = getConfigRegistryClient().getConfigTransactionClient(readTx);
+ } else {
+ registryClient = getConfigRegistryClient().getConfigTransactionClient(transactionProvider.getOrCreateTransaction());
+ }
+
+ try {
+ Element dataElement = XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ final Set<ObjectName> instances = Datastore.getInstanceQueryStrategy(source, this.transactionProvider)
+ .queryInstances(configRegistryClient);
+
+ final Config configMapping = new Config(EditConfig.transformMbeToModuleConfigs(registryClient,
+ yangStoreSnapshot.getModuleMXBeanEntryMap()));
+
+ ServiceRegistryWrapper serviceTracker = new ServiceRegistryWrapper(registryClient);
+ dataElement = configMapping.toXml(instances, this.maybeNamespace, document, dataElement, serviceTracker);
+
+ LOG.trace("{} operation successful", GET_CONFIG);
+
+ return dataElement;
+ } finally {
+ if(source == Datastore.running) {
+ transactionProvider.closeReadTransaction();
+ }
+ }
}
@Override
import java.util.Set;
import javax.management.ObjectName;
import org.opendaylight.controller.config.util.ConfigRegistryClient;
+import org.opendaylight.controller.config.util.ConfigTransactionClient;
+import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
public class RunningDatastoreQueryStrategy implements DatastoreQueryStrategy {
+ private final TransactionProvider transactionProvider;
+
+ public RunningDatastoreQueryStrategy(TransactionProvider transactionProvider) {
+ this.transactionProvider = transactionProvider;
+ }
+
@Override
public Set<ObjectName> queryInstances(ConfigRegistryClient configRegistryClient) {
- return configRegistryClient.lookupConfigBeans();
+ ObjectName on = transactionProvider.getOrCreateReadTransaction();
+ ConfigTransactionClient proxy = configRegistryClient.getConfigTransactionClient(on);
+ return proxy.lookupConfigBeans();
}
}
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.rpc.ModuleRpcs;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.rpc.Rpcs;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpc.class);
public static final String CONTEXT_INSTANCE = "context-instance";
- private final YangStoreSnapshot yangStoreSnapshot;
+ private final YangStoreContext yangStoreSnapshot;
- public RuntimeRpc(final YangStoreSnapshot yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ public RuntimeRpc(final YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
String netconfSessionIdForReporting) {
super(configRegistryClient, netconfSessionIdForReporting);
this.yangStoreSnapshot = yangStoreSnapshot;
import java.util.Dictionary;
import java.util.Hashtable;
+import org.opendaylight.controller.netconf.api.util.NetconfConstants;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
import org.osgi.framework.BundleActivator;
SchemaContextProvider schemaContextProvider = reference.getBundle().getBundleContext().getService(reference);
- YangStoreServiceImpl yangStoreService = new YangStoreServiceImpl(schemaContextProvider);
+ YangStoreService yangStoreService = new YangStoreService(schemaContextProvider, context);
configRegistryLookup = new ConfigRegistryLookupThread(yangStoreService);
configRegistryLookup.start();
return configRegistryLookup;
}
private class ConfigRegistryLookupThread extends Thread {
- private final YangStoreServiceImpl yangStoreService;
+ private final YangStoreService yangStoreService;
- private ConfigRegistryLookupThread(YangStoreServiceImpl yangStoreService) {
+ private ConfigRegistryLookupThread(YangStoreService yangStoreService) {
super("config-registry-lookup");
this.yangStoreService = yangStoreService;
}
NetconfOperationServiceFactoryImpl factory = new NetconfOperationServiceFactoryImpl(yangStoreService);
LOG.debug("Registering into OSGi");
Dictionary<String, String> properties = new Hashtable<>();
- properties.put("name", "config-netconf-connector");
+ properties.put(NetconfConstants.SERVICE_NAME, NetconfConstants.CONFIG_NETCONF_CONNECTOR);
osgiRegistration = context.registerService(NetconfOperationServiceFactory.class, factory, properties);
}
}
final class NetconfOperationProvider {
private final Set<NetconfOperation> operations;
- NetconfOperationProvider(YangStoreSnapshot yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
+ NetconfOperationProvider(YangStoreContext yangStoreSnapshot, ConfigRegistryClient configRegistryClient,
TransactionProvider transactionProvider, String netconfSessionIdForReporting) {
operations = setUpOperations(yangStoreSnapshot, configRegistryClient, transactionProvider,
return operations;
}
- private static Set<NetconfOperation> setUpOperations(YangStoreSnapshot yangStoreSnapshot,
+ private static Set<NetconfOperation> setUpOperations(YangStoreContext yangStoreSnapshot,
ConfigRegistryClient configRegistryClient, TransactionProvider transactionProvider,
String netconfSessionIdForReporting) {
Set<NetconfOperation> ops = Sets.newHashSet();
ops.add(new Commit(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new Lock(netconfSessionIdForReporting));
ops.add(new UnLock(netconfSessionIdForReporting));
- ops.add(new Get(yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
+ ops.add(new Get(transactionProvider, yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
ops.add(new DiscardChanges(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new Validate(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new RuntimeRpc(yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+import com.google.common.base.Optional;
import java.lang.management.ManagementFactory;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
import javax.management.MBeanServer;
import org.opendaylight.controller.config.util.ConfigRegistryJMXClient;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.confignetconfconnector.util.Util;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.yangtools.yang.model.api.Module;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Override
public NetconfOperationServiceImpl createService(String netconfSessionIdForReporting) {
- try {
- return new NetconfOperationServiceImpl(yangStoreService, jmxClient, netconfSessionIdForReporting);
- } catch (YangStoreException e) {
- throw new IllegalStateException(e);
+ return new NetconfOperationServiceImpl(yangStoreService, jmxClient, netconfSessionIdForReporting);
+ }
+
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return setupCapabilities(yangStoreService);
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ return yangStoreService.registerCapabilityListener(listener);
+ }
+
+ public static Set<Capability> setupCapabilities(final YangStoreContext yangStoreSnapshot) {
+ Set<Capability> capabilities = new HashSet<>();
+ // [RFC6241] 8.3. Candidate Configuration Capability
+ capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
+
+ // TODO rollback on error not supported EditConfigXmlParser:100
+ // [RFC6241] 8.5. Rollback-on-Error Capability
+ // capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
+
+ Set<Module> modules = yangStoreSnapshot.getModules();
+ for (Module module : modules) {
+ capabilities.add(new YangStoreCapability(module, yangStoreSnapshot.getModuleSource(module)));
+ }
+
+ return capabilities;
+ }
+
+ private static class BasicCapability implements Capability {
+
+ private final String capability;
+
+ private BasicCapability(final String capability) {
+ this.capability = capability;
+ }
+
+ @Override
+ public String getCapabilityUri() {
+ return capability;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Collection<String> getLocation() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public String toString() {
+ return capability;
+ }
+ }
+
+ static final class YangStoreCapability extends BasicCapability {
+
+ private final String content;
+ private final String revision;
+ private final String moduleName;
+ private final String moduleNamespace;
+
+ public YangStoreCapability(final Module module, final String moduleContent) {
+ super(toCapabilityURI(module));
+ this.content = moduleContent;
+ this.moduleName = module.getName();
+ this.moduleNamespace = module.getNamespace().toString();
+ this.revision = Util.writeDate(module.getRevision());
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.of(content);
+ }
+
+ private static String toCapabilityURI(final Module module) {
+ return String.valueOf(module.getNamespace()) + "?module="
+ + module.getName() + "&revision=" + Util.writeDate(module.getRevision());
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.of(moduleName);
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.of(moduleNamespace);
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.of(revision);
}
}
}
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
import java.util.Set;
-import org.opendaylight.controller.config.api.LookupRegistry;
import org.opendaylight.controller.config.util.ConfigRegistryJMXClient;
-import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
-import org.opendaylight.controller.netconf.confignetconfconnector.util.Util;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.yangtools.yang.model.api.Module;
/**
- * Manages life cycle of {@link YangStoreSnapshot}.
+ * Manages life cycle of {@link YangStoreContext}.
*/
public class NetconfOperationServiceImpl implements NetconfOperationService {
- private final YangStoreSnapshot yangStoreSnapshot;
private final NetconfOperationProvider operationProvider;
- private final Set<Capability> capabilities;
private final TransactionProvider transactionProvider;
public NetconfOperationServiceImpl(final YangStoreService yangStoreService, final ConfigRegistryJMXClient jmxClient,
- final String netconfSessionIdForReporting) throws YangStoreException {
-
- yangStoreSnapshot = yangStoreService.getYangStoreSnapshot();
- checkConsistencyBetweenYangStoreAndConfig(jmxClient, yangStoreSnapshot);
+ final String netconfSessionIdForReporting) {
transactionProvider = new TransactionProvider(jmxClient, netconfSessionIdForReporting);
- operationProvider = new NetconfOperationProvider(yangStoreSnapshot, jmxClient, transactionProvider,
+ operationProvider = new NetconfOperationProvider(yangStoreService, jmxClient, transactionProvider,
netconfSessionIdForReporting);
- capabilities = setupCapabilities(yangStoreSnapshot);
- }
-
-
- @VisibleForTesting
- static void checkConsistencyBetweenYangStoreAndConfig(final LookupRegistry jmxClient, final YangStoreSnapshot yangStoreSnapshot) {
- Set<String> missingModulesFromConfig = Sets.newHashSet();
-
- Set<String> modulesSeenByConfig = jmxClient.getAvailableModuleFactoryQNames();
- Map<String, Map<String, ModuleMXBeanEntry>> moduleMXBeanEntryMap = yangStoreSnapshot.getModuleMXBeanEntryMap();
-
- for (Map<String, ModuleMXBeanEntry> moduleNameToMBE : moduleMXBeanEntryMap.values()) {
- for (ModuleMXBeanEntry moduleMXBeanEntry : moduleNameToMBE.values()) {
- String moduleSeenByYangStore = moduleMXBeanEntry.getYangModuleQName().toString();
- if(!modulesSeenByConfig.contains(moduleSeenByYangStore)){
- missingModulesFromConfig.add(moduleSeenByYangStore);
- }
- }
- }
-
- Preconditions
- .checkState(
- missingModulesFromConfig.isEmpty(),
- "There are inconsistencies between configuration subsystem and yangstore in terms of discovered yang modules, yang modules missing from config subsystem but present in yangstore: %s, %sAll modules present in config: %s",
- missingModulesFromConfig, System.lineSeparator(), modulesSeenByConfig);
-
- }
-
- @Override
- public void close() {
- yangStoreSnapshot.close();
- transactionProvider.close();
- }
-
- @Override
- public Set<Capability> getCapabilities() {
- return capabilities;
}
@Override
return operationProvider.getOperations();
}
- private static Set<Capability> setupCapabilities(final YangStoreSnapshot yangStoreSnapshot) {
- Set<Capability> capabilities = new HashSet<>();
- // [RFC6241] 8.3. Candidate Configuration Capability
- capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
-
- // TODO rollback on error not supported EditConfigXmlParser:100
- // [RFC6241] 8.5. Rollback-on-Error Capability
- // capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
-
- Set<Module> modules = yangStoreSnapshot.getModules();
- for (Module module : modules) {
- capabilities.add(new YangStoreCapability(module, yangStoreSnapshot.getModuleSource(module)));
- }
-
- return capabilities;
- }
-
- private static class BasicCapability implements Capability {
-
- private final String capability;
-
- private BasicCapability(final String capability) {
- this.capability = capability;
- }
-
- @Override
- public String getCapabilityUri() {
- return capability;
- }
-
- @Override
- public Optional<String> getModuleNamespace() {
- return Optional.absent();
- }
-
- @Override
- public Optional<String> getModuleName() {
- return Optional.absent();
- }
-
- @Override
- public Optional<String> getRevision() {
- return Optional.absent();
- }
-
- @Override
- public Optional<String> getCapabilitySchema() {
- return Optional.absent();
- }
-
- @Override
- public Collection<String> getLocation() {
- return Collections.emptyList();
- }
-
- @Override
- public String toString() {
- return capability;
- }
+ @Override
+ public void close() {
+ transactionProvider.close();
}
- private static final class YangStoreCapability extends BasicCapability {
-
- private final String content;
- private final String revision;
- private final String moduleName;
- private final String moduleNamespace;
-
- public YangStoreCapability(final Module module, final String moduleContent) {
- super(toCapabilityURI(module));
- this.content = moduleContent;
- this.moduleName = module.getName();
- this.moduleNamespace = module.getNamespace().toString();
- this.revision = Util.writeDate(module.getRevision());
- }
-
- @Override
- public Optional<String> getCapabilitySchema() {
- return Optional.of(content);
- }
-
- private static String toCapabilityURI(final Module module) {
- return String.valueOf(module.getNamespace()) + "?module="
- + module.getName() + "&revision=" + Util.writeDate(module.getRevision());
- }
-
- @Override
- public Optional<String> getModuleName() {
- return Optional.of(moduleName);
- }
-
- @Override
- public Optional<String> getModuleNamespace() {
- return Optional.of(moduleNamespace);
- }
-
- @Override
- public Optional<String> getRevision() {
- return Optional.of(revision);
- }
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+
+public interface YangStoreContext {
+
+ /**
+ * @deprecated Use {@link #getQNamesToIdentitiesToModuleMXBeanEntries()} instead. This method return only one
+ * module representation even if multiple revisions are available.
+ */
+ @Deprecated
+ Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> getModuleMXBeanEntryMap();
+
+
+ Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries();
+
+ /**
+ * Get all modules discovered when this snapshot was created.
+ * @return all modules discovered. If one module exists with two different revisions, return both.
+ */
+ Set<Module> getModules();
+
+ String getModuleSource(ModuleIdentifier moduleIdentifier);
+
+}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-public class YangStoreException extends Exception {
-
- private static final long serialVersionUID = 2841238836278528836L;
-
- public YangStoreException(String message, Throwable cause) {
- super(message, cause);
- }
-
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
+
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-/**
- * Yang store OSGi service
- */
-public interface YangStoreService {
+import com.google.common.base.Function;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import java.lang.ref.SoftReference;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicReference;
+import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.notifications.BaseNetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.BaseNotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChangeBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.changed.by.parms.ChangedByBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.changed.by.parms.changed.by.server.or.user.ServerBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class YangStoreService implements YangStoreContext {
+
+ private static final Logger LOG = LoggerFactory.getLogger(YangStoreService.class);
/**
- * Module entry objects mapped to module names and namespaces.
+ * This is a rather interesting locking model. We need to guard against both the
+ * cache expiring from GC and being invalidated by schema context change. The
+ * context can change while we are doing processing, so we do not want to block
+ * it, so no synchronization can happen on the methods.
+ *
+ * So what we are doing is the following:
+ *
+ * We synchronize with GC as usual, using a SoftReference.
*
- * @return actual view of what is available in OSGi service registry.
+ * The atomic reference is used to synchronize with {@link #refresh()}, e.g. when
+ * refresh happens, it will push a SoftReference(null), e.g. simulate the GC. Now
+ * that may happen while the getter is already busy acting on the old schema context,
+ * so it needs to understand that a refresh has happened and retry. To do that, it
+ * attempts a CAS operation -- if it fails, in knows that the SoftReference has
+ * been replaced and thus it needs to retry.
+ *
+ * Note that {@link #getYangStoreSnapshot()} will still use synchronize() internally
+ * to stop multiple threads doing the same work.
+ */
+ private final AtomicReference<SoftReference<YangStoreSnapshot>> ref =
+ new AtomicReference<>(new SoftReference<YangStoreSnapshot>(null));
+
+ private final SchemaContextProvider schemaContextProvider;
+ private final BaseNetconfNotificationListener notificationPublisher;
+
+ private final ExecutorService notificationExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() {
+ @Override
+ public Thread newThread(final Runnable r) {
+ return new Thread(r, "config-netconf-connector-capability-notifications");
+ }
+ });
+
+ private final Set<CapabilityListener> listeners = Collections.synchronizedSet(new HashSet<CapabilityListener>());
+
+ public YangStoreService(final SchemaContextProvider schemaContextProvider, final BundleContext context) {
+ this(schemaContextProvider, new NotificationCollectorTracker(context));
+ }
+
+ public YangStoreService(final SchemaContextProvider schemaContextProvider, final BaseNetconfNotificationListener notificationHandler) {
+ this.schemaContextProvider = schemaContextProvider;
+ this.notificationPublisher = notificationHandler;
+ }
+
+ private synchronized YangStoreContext getYangStoreSnapshot() {
+ SoftReference<YangStoreSnapshot> r = ref.get();
+ YangStoreSnapshot ret = r.get();
+
+ while (ret == null) {
+ // We need to be compute a new value
+ ret = new YangStoreSnapshot(schemaContextProvider.getSchemaContext());
+
+ if (!ref.compareAndSet(r, new SoftReference<>(ret))) {
+ LOG.debug("Concurrent refresh detected, recomputing snapshot");
+ r = ref.get();
+ ret = null;
+ }
+ }
+
+ return ret;
+ }
+
+ @Override
+ public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
+ return getYangStoreSnapshot().getModuleMXBeanEntryMap();
+ }
+
+ @Override
+ public Map<QName, Map<String, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries() {
+ return getYangStoreSnapshot().getQNamesToIdentitiesToModuleMXBeanEntries();
+ }
+
+ @Override
+ public Set<Module> getModules() {
+ return getYangStoreSnapshot().getModules();
+ }
+
+ @Override
+ public String getModuleSource(final ModuleIdentifier moduleIdentifier) {
+ return getYangStoreSnapshot().getModuleSource(moduleIdentifier);
+ }
+
+ public void refresh() {
+ final YangStoreSnapshot previous = ref.get().get();
+ ref.set(new SoftReference<YangStoreSnapshot>(null));
+ notificationExecutor.submit(new CapabilityChangeNotifier(previous));
+ }
+
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ if(ref.get() == null || ref.get().get() == null) {
+ getYangStoreSnapshot();
+ }
+
+ this.listeners.add(listener);
+ listener.onCapabilitiesAdded(NetconfOperationServiceFactoryImpl.setupCapabilities(ref.get().get()));
+
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ YangStoreService.this.listeners.remove(listener);
+ }
+ };
+ }
+
+ private static final Function<Module, Capability> MODULE_TO_CAPABILITY = new Function<Module, Capability>() {
+ @Override
+ public Capability apply(final Module module) {
+ return new NetconfOperationServiceFactoryImpl.YangStoreCapability(module, module.getSource());
+ }
+ };
+
+ private final class CapabilityChangeNotifier implements Runnable {
+
+ private final YangStoreSnapshot previous;
+
+ public CapabilityChangeNotifier(final YangStoreSnapshot previous) {
+ this.previous = previous;
+ }
+
+ @Override
+ public void run() {
+ final YangStoreContext current = getYangStoreSnapshot();
+
+ if(current.equals(previous) == false) {
+ final Sets.SetView<Module> removed = Sets.difference(previous.getModules(), current.getModules());
+ final Sets.SetView<Module> added = Sets.difference(current.getModules(), previous.getModules());
+
+ // Notify notification manager
+ notificationPublisher.onCapabilityChanged(computeDiff(removed, added));
+
+ // Notify direct capability listener TODO would it not be better if the capability listeners went through notification manager ?
+ for (final CapabilityListener listener : listeners) {
+ listener.onCapabilitiesAdded(Sets.newHashSet(Collections2.transform(added, MODULE_TO_CAPABILITY)));
+ }
+ for (final CapabilityListener listener : listeners) {
+ listener.onCapabilitiesRemoved(Sets.newHashSet(Collections2.transform(removed, MODULE_TO_CAPABILITY)));
+ }
+ }
+ }
+ }
+
+ private static final Function<Module, Uri> MODULE_TO_URI = new Function<Module, Uri>() {
+ @Override
+ public Uri apply(final Module input) {
+ return new Uri(new NetconfOperationServiceFactoryImpl.YangStoreCapability(input, input.getSource()).getCapabilityUri());
+ }
+ };
+
+ static NetconfCapabilityChange computeDiff(final Sets.SetView<Module> removed, final Sets.SetView<Module> added) {
+ final NetconfCapabilityChangeBuilder netconfCapabilityChangeBuilder = new NetconfCapabilityChangeBuilder();
+ netconfCapabilityChangeBuilder.setChangedBy(new ChangedByBuilder().setServerOrUser(new ServerBuilder().setServer(true).build()).build());
+ netconfCapabilityChangeBuilder.setDeletedCapability(Lists.newArrayList(Collections2.transform(removed, MODULE_TO_URI)));
+ netconfCapabilityChangeBuilder.setAddedCapability(Lists.newArrayList(Collections2.transform(added, MODULE_TO_URI)));
+ // TODO modified should be computed ... but why ?
+ netconfCapabilityChangeBuilder.setModifiedCapability(Collections.<Uri>emptyList());
+ return netconfCapabilityChangeBuilder.build();
+ }
+
+
+ /**
+ * Looks for NetconfNotificationCollector service and publishes base netconf notifications if possible
*/
- YangStoreSnapshot getYangStoreSnapshot() throws YangStoreException;
+ private static class NotificationCollectorTracker implements ServiceTrackerCustomizer<NetconfNotificationCollector, NetconfNotificationCollector>, BaseNetconfNotificationListener, AutoCloseable {
+
+ private final BundleContext context;
+ private final ServiceTracker<NetconfNotificationCollector, NetconfNotificationCollector> listenerTracker;
+ private BaseNotificationPublisherRegistration publisherReg;
+
+ public NotificationCollectorTracker(final BundleContext context) {
+ this.context = context;
+ listenerTracker = new ServiceTracker<>(context, NetconfNotificationCollector.class, this);
+ listenerTracker.open();
+ }
+
+ @Override
+ public synchronized NetconfNotificationCollector addingService(final ServiceReference<NetconfNotificationCollector> reference) {
+ closePublisherRegistration();
+ publisherReg = context.getService(reference).registerBaseNotificationPublisher();
+ return null;
+ }
+
+ @Override
+ public synchronized void modifiedService(final ServiceReference<NetconfNotificationCollector> reference, final NetconfNotificationCollector service) {
+ closePublisherRegistration();
+ publisherReg = context.getService(reference).registerBaseNotificationPublisher();
+ }
+
+ @Override
+ public synchronized void removedService(final ServiceReference<NetconfNotificationCollector> reference, final NetconfNotificationCollector service) {
+ closePublisherRegistration();
+ publisherReg = null;
+ }
+
+ private void closePublisherRegistration() {
+ if(publisherReg != null) {
+ publisherReg.close();
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ closePublisherRegistration();
+ listenerTracker.close();
+ }
+
+ @Override
+ public void onCapabilityChanged(final NetconfCapabilityChange capabilityChange) {
+ if(publisherReg == null) {
+ LOG.warn("Omitting notification due to missing notification service: {}", capabilityChange);
+ return;
+ }
+ publisherReg.onCapabilityChanged(capabilityChange);
+ }
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-import java.lang.ref.SoftReference;
-import java.util.concurrent.atomic.AtomicReference;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class YangStoreServiceImpl implements YangStoreService {
- private static final Logger LOG = LoggerFactory.getLogger(YangStoreServiceImpl.class);
-
- /**
- * This is a rather interesting locking model. We need to guard against both the
- * cache expiring from GC and being invalidated by schema context change. The
- * context can change while we are doing processing, so we do not want to block
- * it, so no synchronization can happen on the methods.
- *
- * So what we are doing is the following:
- *
- * We synchronize with GC as usual, using a SoftReference.
- *
- * The atomic reference is used to synchronize with {@link #refresh()}, e.g. when
- * refresh happens, it will push a SoftReference(null), e.g. simulate the GC. Now
- * that may happen while the getter is already busy acting on the old schema context,
- * so it needs to understand that a refresh has happened and retry. To do that, it
- * attempts a CAS operation -- if it fails, in knows that the SoftReference has
- * been replaced and thus it needs to retry.
- *
- * Note that {@link #getYangStoreSnapshot()} will still use synchronize() internally
- * to stop multiple threads doing the same work.
- */
- private final AtomicReference<SoftReference<YangStoreSnapshotImpl>> ref = new AtomicReference<>(new SoftReference<YangStoreSnapshotImpl>(null));
- private final SchemaContextProvider service;
-
- public YangStoreServiceImpl(final SchemaContextProvider service) {
- this.service = service;
- }
-
- @Override
- public synchronized YangStoreSnapshotImpl getYangStoreSnapshot() throws YangStoreException {
- SoftReference<YangStoreSnapshotImpl> r = ref.get();
- YangStoreSnapshotImpl ret = r.get();
-
- while (ret == null) {
- // We need to be compute a new value
- ret = new YangStoreSnapshotImpl(service.getSchemaContext());
-
- if (!ref.compareAndSet(r, new SoftReference<>(ret))) {
- LOG.debug("Concurrent refresh detected, recomputing snapshot");
- r = ref.get();
- ret = null;
- }
- }
-
- return ret;
- }
-
- /**
- * Called when schema context changes, invalidates cache.
- */
- public void refresh() {
- ref.set(new SoftReference<YangStoreSnapshotImpl>(null));
- }
-}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
+
package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
+import com.google.common.collect.Maps;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
+import org.opendaylight.controller.config.yangjmxgenerator.ServiceInterfaceEntry;
+import org.opendaylight.controller.config.yangjmxgenerator.TypeProviderWrapper;
+import org.opendaylight.yangtools.sal.binding.yang.types.TypeProviderImpl;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class YangStoreSnapshot implements YangStoreContext {
+ private static final Logger LOG = LoggerFactory.getLogger(YangStoreSnapshot.class);
+
+
+ private final Map<String /* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> moduleMXBeanEntryMap;
+
+
+ private final Map<QName, Map<String, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries;
+
+ private final SchemaContext schemaContext;
+
+ public YangStoreSnapshot(final SchemaContext resolveSchemaContext) {
+ LOG.trace("Resolved modules:{}", resolveSchemaContext.getModules());
+ this.schemaContext = resolveSchemaContext;
+ // JMX generator
+
+ Map<String, String> namespaceToPackageMapping = Maps.newHashMap();
+ PackageTranslator packageTranslator = new PackageTranslator(namespaceToPackageMapping);
+ Map<QName, ServiceInterfaceEntry> qNamesToSIEs = new HashMap<>();
+ Map<IdentitySchemaNode, ServiceInterfaceEntry> knownSEITracker = new HashMap<>();
+ // create SIE structure qNamesToSIEs
+ for (Module module : resolveSchemaContext.getModules()) {
+ String packageName = packageTranslator.getPackageName(module);
+ Map<QName, ServiceInterfaceEntry> namesToSIEntries = ServiceInterfaceEntry
+ .create(module, packageName, knownSEITracker);
+ for (Entry<QName, ServiceInterfaceEntry> sieEntry : namesToSIEntries.entrySet()) {
+ // merge value into qNamesToSIEs
+ if (qNamesToSIEs.containsKey(sieEntry.getKey()) == false) {
+ qNamesToSIEs.put(sieEntry.getKey(), sieEntry.getValue());
+ } else {
+ throw new IllegalStateException("Cannot add two SIE with same qname "
+ + sieEntry.getValue());
+ }
+ }
+ }
+
+ Map<String, Map<String, ModuleMXBeanEntry>> moduleMXBeanEntryMap = Maps.newHashMap();
+
+ Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries = new HashMap<>();
+
-public interface YangStoreSnapshot extends AutoCloseable {
+ for (Module module : schemaContext.getModules()) {
+ String packageName = packageTranslator.getPackageName(module);
+ TypeProviderWrapper typeProviderWrapper = new TypeProviderWrapper(
+ new TypeProviderImpl(resolveSchemaContext));
- /**
- * @deprecated Use {@link #getQNamesToIdentitiesToModuleMXBeanEntries()} instead. This method return only one
- * module representation even if multiple revisions are available.
- */
- @Deprecated
- Map<String/* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> getModuleMXBeanEntryMap();
+ QName qName = QName.create(module.getNamespace(), module.getRevision(), module.getName());
+ Map<String /* MB identity local name */, ModuleMXBeanEntry> namesToMBEs =
+ Collections.unmodifiableMap(ModuleMXBeanEntry.create(module, qNamesToSIEs, resolveSchemaContext,
+ typeProviderWrapper, packageName));
+ moduleMXBeanEntryMap.put(module.getNamespace().toString(), namesToMBEs);
+
+ qNamesToIdentitiesToModuleMXBeanEntries.put(qName, namesToMBEs);
+ }
+ this.moduleMXBeanEntryMap = Collections.unmodifiableMap(moduleMXBeanEntryMap);
+ this.qNamesToIdentitiesToModuleMXBeanEntries = Collections.unmodifiableMap(qNamesToIdentitiesToModuleMXBeanEntries);
+
+ }
+
+ @Override
+ public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
+ return moduleMXBeanEntryMap;
+ }
+
+ @Override
+ public Map<QName, Map<String, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries() {
+ return qNamesToIdentitiesToModuleMXBeanEntries;
+ }
+
+ @Override
+ public Set<Module> getModules() {
+ return schemaContext.getModules();
+ }
+
+ @Override
+ public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
+ return schemaContext.getModuleSource(moduleIdentifier).get();
+ }
+
+ @Override
+ public boolean equals(final Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
- Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries();
+ final YangStoreSnapshot that = (YangStoreSnapshot) o;
- /**
- * Get all modules discovered when this snapshot was created.
- * @return all modules discovered. If one module exists with two different revisions, return both.
- */
- Set<Module> getModules();
+ if (schemaContext != null ? !schemaContext.equals(that.schemaContext) : that.schemaContext != null)
+ return false;
- String getModuleSource(ModuleIdentifier moduleIdentifier);
+ return true;
+ }
@Override
- void close();
+ public int hashCode() {
+ return schemaContext != null ? schemaContext.hashCode() : 0;
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-import com.google.common.collect.Maps;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
-import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
-import org.opendaylight.controller.config.yangjmxgenerator.ServiceInterfaceEntry;
-import org.opendaylight.controller.config.yangjmxgenerator.TypeProviderWrapper;
-import org.opendaylight.yangtools.sal.binding.yang.types.TypeProviderImpl;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class YangStoreSnapshotImpl implements YangStoreSnapshot {
- private static final Logger LOG = LoggerFactory.getLogger(YangStoreSnapshotImpl.class);
-
-
- private final Map<String /* Namespace from yang file */,
- Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> moduleMXBeanEntryMap;
-
-
- private final Map<QName, Map<String, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries;
-
- private final SchemaContext schemaContext;
-
-
- public YangStoreSnapshotImpl(final SchemaContext resolveSchemaContext) {
- LOG.trace("Resolved modules:{}", resolveSchemaContext.getModules());
- this.schemaContext = resolveSchemaContext;
- // JMX generator
-
- Map<String, String> namespaceToPackageMapping = Maps.newHashMap();
- PackageTranslator packageTranslator = new PackageTranslator(namespaceToPackageMapping);
- Map<QName, ServiceInterfaceEntry> qNamesToSIEs = new HashMap<>();
- Map<IdentitySchemaNode, ServiceInterfaceEntry> knownSEITracker = new HashMap<>();
- // create SIE structure qNamesToSIEs
- for (Module module : resolveSchemaContext.getModules()) {
- String packageName = packageTranslator.getPackageName(module);
- Map<QName, ServiceInterfaceEntry> namesToSIEntries = ServiceInterfaceEntry
- .create(module, packageName, knownSEITracker);
- for (Entry<QName, ServiceInterfaceEntry> sieEntry : namesToSIEntries.entrySet()) {
- // merge value into qNamesToSIEs
- if (qNamesToSIEs.containsKey(sieEntry.getKey()) == false) {
- qNamesToSIEs.put(sieEntry.getKey(), sieEntry.getValue());
- } else {
- throw new IllegalStateException("Cannot add two SIE with same qname "
- + sieEntry.getValue());
- }
- }
- }
-
- Map<String, Map<String, ModuleMXBeanEntry>> moduleMXBeanEntryMap = Maps.newHashMap();
-
- Map<QName, Map<String /* identity local name */, ModuleMXBeanEntry>> qNamesToIdentitiesToModuleMXBeanEntries = new HashMap<>();
-
-
- for (Module module : schemaContext.getModules()) {
- String packageName = packageTranslator.getPackageName(module);
- TypeProviderWrapper typeProviderWrapper = new TypeProviderWrapper(
- new TypeProviderImpl(resolveSchemaContext));
-
- QName qName = QName.create(module.getNamespace(), module.getRevision(), module.getName());
-
- Map<String /* MB identity local name */, ModuleMXBeanEntry> namesToMBEs =
- Collections.unmodifiableMap(ModuleMXBeanEntry.create(module, qNamesToSIEs, resolveSchemaContext,
- typeProviderWrapper, packageName));
- moduleMXBeanEntryMap.put(module.getNamespace().toString(), namesToMBEs);
-
- qNamesToIdentitiesToModuleMXBeanEntries.put(qName, namesToMBEs);
- }
- this.moduleMXBeanEntryMap = Collections.unmodifiableMap(moduleMXBeanEntryMap);
- this.qNamesToIdentitiesToModuleMXBeanEntries = Collections.unmodifiableMap(qNamesToIdentitiesToModuleMXBeanEntries);
-
- }
-
- @Override
- public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
- return moduleMXBeanEntryMap;
- }
-
- @Override
- public Map<QName, Map<String, ModuleMXBeanEntry>> getQNamesToIdentitiesToModuleMXBeanEntries() {
- return qNamesToIdentitiesToModuleMXBeanEntries;
- }
-
- @Override
- public Set<Module> getModules() {
- return schemaContext.getModules();
- }
-
- @Override
- public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
- return schemaContext.getModuleSource(moduleIdentifier).get();
- }
-
- @Override
- public void close() {
-
- }
-}
private final ConfigRegistryClient configRegistryClient;
private final String netconfSessionIdForReporting;
- private ObjectName transaction;
+ private ObjectName candidateTx;
+ private ObjectName readTx;
private final List<ObjectName> allOpenedTransactions = new ArrayList<>();
private static final String NO_TRANSACTION_FOUND_FOR_SESSION = "No transaction found for session ";
public synchronized Optional<ObjectName> getTransaction() {
- if (transaction == null){
+ if (candidateTx == null){
return Optional.absent();
}
// Transaction was already closed somehow
- if (!isStillOpenTransaction(transaction)) {
- LOG.warn("Fixing illegal state: transaction {} was closed in {}", transaction,
+ if (!isStillOpenTransaction(candidateTx)) {
+ LOG.warn("Fixing illegal state: transaction {} was closed in {}", candidateTx,
netconfSessionIdForReporting);
- transaction = null;
+ candidateTx = null;
return Optional.absent();
}
- return Optional.of(transaction);
+ return Optional.of(candidateTx);
+ }
+
+ public synchronized Optional<ObjectName> getReadTransaction() {
+
+ if (readTx == null){
+ return Optional.absent();
+ }
+
+ // Transaction was already closed somehow
+ if (!isStillOpenTransaction(readTx)) {
+ LOG.warn("Fixing illegal state: transaction {} was closed in {}", readTx,
+ netconfSessionIdForReporting);
+ readTx = null;
+ return Optional.absent();
+ }
+ return Optional.of(readTx);
}
private boolean isStillOpenTransaction(ObjectName transaction) {
if (ta.isPresent()) {
return ta.get();
}
- transaction = configRegistryClient.beginConfig();
- allOpenedTransactions.add(transaction);
- return transaction;
+ candidateTx = configRegistryClient.beginConfig();
+ allOpenedTransactions.add(candidateTx);
+ return candidateTx;
+ }
+
+ public synchronized ObjectName getOrCreateReadTransaction() {
+ Optional<ObjectName> ta = getReadTransaction();
+
+ if (ta.isPresent()) {
+ return ta.get();
+ }
+ readTx = configRegistryClient.beginConfig();
+ allOpenedTransactions.add(readTx);
+ return readTx;
}
/**
try {
CommitStatus status = configRegistryClient.commitConfig(taON);
// clean up
- allOpenedTransactions.remove(transaction);
- transaction = null;
+ allOpenedTransactions.remove(candidateTx);
+ candidateTx = null;
return status;
} catch (ValidationException validationException) {
// no clean up: user can reconfigure and recover this transaction
ConfigTransactionClient transactionClient = configRegistryClient.getConfigTransactionClient(taON.get());
transactionClient.abortConfig();
- allOpenedTransactions.remove(transaction);
- transaction = null;
+ allOpenedTransactions.remove(candidateTx);
+ candidateTx = null;
+ }
+
+ public synchronized void closeReadTransaction() {
+ LOG.debug("Closing read transaction");
+ Optional<ObjectName> taON = getReadTransaction();
+ Preconditions.checkState(taON.isPresent(), NO_TRANSACTION_FOUND_FOR_SESSION + netconfSessionIdForReporting);
+
+ ConfigTransactionClient transactionClient = configRegistryClient.getConfigTransactionClient(taON.get());
+ transactionClient.abortConfig();
+ allOpenedTransactions.remove(readTx);
+ readTx = null;
}
public synchronized void abortTestTransaction(ObjectName testTx) {
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.get.Get;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.getconfig.GetConfig;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.runtimerpc.RuntimeRpc;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreServiceImpl;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreService;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+import org.osgi.framework.Filter;
+import org.osgi.framework.ServiceListener;
+import org.osgi.framework.ServiceReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
private TestImplModuleFactory factory4;
@Mock
- YangStoreSnapshot yangStoreSnapshot;
+ YangStoreContext yangStoreSnapshot;
@Mock
NetconfOperationRouter netconfOperationRouter;
@Mock
- NetconfOperationServiceSnapshotImpl netconfOperationServiceSnapshot;
+ AggregatedNetconfOperationServiceFactory netconfOperationServiceSnapshot;
+ @Mock
+ private AutoCloseable sessionCloseable;
private TransactionProvider transactionProvider;
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
+
+
+ final Filter filter = mock(Filter.class);
+ doReturn(filter).when(mockedContext).createFilter(anyString());
+ doNothing().when(mockedContext).addServiceListener(any(ServiceListener.class), anyString());
+ doReturn(new ServiceReference<?>[]{}).when(mockedContext).getServiceReferences(anyString(), anyString());
+
doReturn(getMbes()).when(this.yangStoreSnapshot).getModuleMXBeanEntryMap();
doReturn(getModules()).when(this.yangStoreSnapshot).getModules();
- doNothing().when(netconfOperationServiceSnapshot).close();
this.factory = new NetconfTestImplModuleFactory();
this.factory2 = new DepTestImplModuleFactory();
this.factory3 = new IdentityTestModuleFactory();
factory4 = new TestImplModuleFactory();
+ doNothing().when(sessionCloseable).close();
+
super.initConfigTransactionManagerImpl(new HardcodedModuleFactoriesResolver(mockedContext, this.factory, this.factory2,
this.factory3, factory4));
edit("netconfMessages/editConfig_none.xml");
closeSession();
- verify(netconfOperationServiceSnapshot).close();
+ verify(sessionCloseable).close();
verifyNoMoreInteractions(netconfOperationRouter);
verifyNoMoreInteractions(netconfOperationServiceSnapshot);
}
private void closeSession() throws NetconfDocumentedException, ParserConfigurationException, SAXException,
IOException {
- DefaultCloseSession closeOp = new DefaultCloseSession(NETCONF_SESSION_ID, netconfOperationServiceSnapshot);
+ DefaultCloseSession closeOp = new DefaultCloseSession(NETCONF_SESSION_ID, sessionCloseable);
executeOp(closeOp, "netconfMessages/closeSession.xml");
}
commit();
}
- @Test(expected = NetconfDocumentedException.class)
+ @Test
public void testEx2() throws Exception {
- discard();
+ assertContainsElement(discard(), readXmlToElement("<ok xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"/>"));
}
- private void discard() throws ParserConfigurationException, SAXException, IOException, NetconfDocumentedException {
+ private Document discard() throws ParserConfigurationException, SAXException, IOException, NetconfDocumentedException {
DiscardChanges discardOp = new DiscardChanges(transactionProvider, configRegistryClient, NETCONF_SESSION_ID);
- executeOp(discardOp, "netconfMessages/discardChanges.xml");
+ return executeOp(discardOp, "netconfMessages/discardChanges.xml");
}
private void checkBinaryLeafEdited(final Document response) throws NodeTestException, SAXException, IOException {
YangParserImpl yangParser = new YangParserImpl();
final SchemaContext schemaContext = yangParser.resolveSchemaContext(new HashSet<>(yangParser.parseYangModelsFromStreamsMapped(yangDependencies).values()));
- YangStoreServiceImpl yangStoreService = new YangStoreServiceImpl(new SchemaContextProvider() {
+ YangStoreService yangStoreService = new YangStoreService(new SchemaContextProvider() {
@Override
public SchemaContext getSchemaContext() {
return schemaContext ;
}
- });
- mBeanEntries.putAll(yangStoreService.getYangStoreSnapshot().getModuleMXBeanEntryMap());
+ }, mockedContext);
+ mBeanEntries.putAll(yangStoreService.getModuleMXBeanEntryMap());
return mBeanEntries;
}
}
private Document get() throws NetconfDocumentedException, ParserConfigurationException, SAXException, IOException {
- Get getOp = new Get(yangStoreSnapshot, configRegistryClient, NETCONF_SESSION_ID);
+ Get getOp = new Get(transactionProvider, yangStoreSnapshot, configRegistryClient, NETCONF_SESSION_ID);
return executeOp(getOp, "netconfMessages/get.xml");
}
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.Services;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.ValidateTest;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfigXmlParser.EditConfigExecution;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
+import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreContext;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public class EditConfigTest {
@Mock
- private YangStoreSnapshot yangStoreSnapshot;
+ private YangStoreContext yangStoreSnapshot;
@Mock
private TransactionProvider provider;
@Mock
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
-
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import java.net.URI;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Map;
-import java.util.Set;
-import org.hamcrest.CoreMatchers;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.config.api.LookupRegistry;
-import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
-import org.opendaylight.yangtools.yang.common.QName;
-
-public class NetconfOperationServiceImplTest {
-
- private static final Date date1970_01_01;
-
- static {
- try {
- date1970_01_01 = new SimpleDateFormat("yyyy-MM-dd").parse("1970-01-01");
- } catch (ParseException e) {
- throw new IllegalStateException(e);
- }
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_ok() throws Exception {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(
- mockJmxClient("qname1", "qname2"),
- mockYangStoreSnapshot("qname2", "qname1"));
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_ok2() throws Exception {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(
- mockJmxClient("qname1", "qname2", "qname4", "qname5"),
- mockYangStoreSnapshot("qname2", "qname1"));
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_ok3() throws Exception {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(
- mockJmxClient(),
- mockYangStoreSnapshot());
- }
-
- @Test
- public void testCheckConsistencyBetweenYangStoreAndConfig_yangStoreMore() throws Exception {
- try {
- NetconfOperationServiceImpl.checkConsistencyBetweenYangStoreAndConfig(mockJmxClient("qname1"),
- mockYangStoreSnapshot("qname2", "qname1"));
- fail("An exception of type " + IllegalStateException.class + " was expected");
- } catch (IllegalStateException e) {
- String message = e.getMessage();
- Assert.assertThat(
- message,
- CoreMatchers
- .containsString("missing from config subsystem but present in yangstore: [(namespace?revision=1970-01-01)qname2]"));
- Assert.assertThat(
- message,
- CoreMatchers
- .containsString("All modules present in config: [(namespace?revision=1970-01-01)qname1]"));
- }
- }
-
- private YangStoreSnapshot mockYangStoreSnapshot(final String... qnames) {
- YangStoreSnapshot mock = mock(YangStoreSnapshot.class);
-
- Map<String, Map<String, ModuleMXBeanEntry>> map = Maps.newHashMap();
-
- Map<String, ModuleMXBeanEntry> innerMap = Maps.newHashMap();
-
- int i = 1;
- for (String qname : qnames) {
- innerMap.put(Integer.toString(i++), mockMBeanEntry(qname));
- }
-
- map.put("1", innerMap);
-
- doReturn(map).when(mock).getModuleMXBeanEntryMap();
-
- return mock;
- }
-
- private ModuleMXBeanEntry mockMBeanEntry(final String qname) {
- ModuleMXBeanEntry mock = mock(ModuleMXBeanEntry.class);
- QName q = getQName(qname);
- doReturn(q).when(mock).getYangModuleQName();
- return mock;
- }
-
- private QName getQName(final String qname) {
- return QName.create(URI.create("namespace"), date1970_01_01, qname);
- }
-
- private LookupRegistry mockJmxClient(final String... visibleQNames) {
- LookupRegistry mock = mock(LookupRegistry.class);
- Set<String> qnames = Sets.newHashSet();
- for (String visibleQName : visibleQNames) {
- QName q = getQName(visibleQName);
- qnames.add(q.toString());
- }
- doReturn(qnames).when(mock).getAvailableModuleFactoryQNames();
- return mock;
- }
-}
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
*/
private synchronized EditAndCommitResponse pushConfigWithConflictingVersionRetries(ConfigSnapshotHolder configSnapshotHolder) throws NetconfDocumentedException {
ConflictingVersionException lastException;
- Stopwatch stopwatch = new Stopwatch();
+ Stopwatch stopwatch = Stopwatch.createUnstarted();
do {
String idForReporting = configSnapshotHolder.toString();
SortedSet<String> expectedCapabilities = checkNotNull(configSnapshotHolder.getCapabilities(),
}
private NetconfOperationService getOperationServiceWithRetries(Set<String> expectedCapabilities, String idForReporting) {
- Stopwatch stopwatch = new Stopwatch().start();
+ Stopwatch stopwatch = Stopwatch.createStarted();
NotEnoughCapabilitiesException lastException;
do {
try {
} catch(RuntimeException e) {
throw new NotEnoughCapabilitiesException("Netconf service not stable for " + idForReporting, e);
}
- Set<String> notFoundDiff = computeNotFoundCapabilities(expectedCapabilities, serviceCandidate);
+ Set<String> notFoundDiff = computeNotFoundCapabilities(expectedCapabilities, configNetconfConnector);
if (notFoundDiff.isEmpty()) {
return serviceCandidate;
} else {
serviceCandidate.close();
LOG.trace("Netconf server did not provide required capabilities for {} ", idForReporting,
"Expected but not found: {}, all expected {}, current {}",
- notFoundDiff, expectedCapabilities, serviceCandidate.getCapabilities()
+ notFoundDiff, expectedCapabilities, configNetconfConnector.getCapabilities()
);
throw new NotEnoughCapabilitiesException("Not enough capabilities for " + idForReporting + ". Expected but not found: " + notFoundDiff);
}
}
- private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationService serviceCandidate) {
+ private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationServiceFactory serviceCandidate) {
Collection<String> actual = Collections2.transform(serviceCandidate.getCapabilities(), new Function<Capability, String>() {
@Override
public String apply(@Nonnull final Capability input) {
throw new IllegalStateException("Cannot parse " + configSnapshotHolder);
}
LOG.trace("Pushing last configuration to netconf: {}", configSnapshotHolder);
- Stopwatch stopwatch = new Stopwatch().start();
+ Stopwatch stopwatch = Stopwatch.createStarted();
NetconfMessage editConfigMessage = createEditConfigMessage(xmlToBePersisted);
Document editResponseMessage = sendRequestGetResponseCheckIsOK(editConfigMessage, operationService,
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import javax.management.MBeanServer;
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.persist.impl.ConfigPusherImpl;
import org.opendaylight.controller.netconf.persist.impl.PersisterAggregator;
InnerCustomizer innerCustomizer = new InnerCustomizer(configs, maxWaitForCapabilitiesMillis,
conflictingVersionTimeoutMillis, persisterAggregator);
OuterCustomizer outerCustomizer = new OuterCustomizer(context, innerCustomizer);
- new ServiceTracker<>(context, NetconfOperationProvider.class, outerCustomizer).open();
+ new ServiceTracker<>(context, NetconfOperationServiceFactory.class, outerCustomizer).open();
}
private long getConflictingVersionTimeoutMillis(PropertiesProviderBaseImpl propertiesProvider) {
")";
}
- class OuterCustomizer implements ServiceTrackerCustomizer<NetconfOperationProvider, NetconfOperationProvider> {
+ class OuterCustomizer implements ServiceTrackerCustomizer<NetconfOperationServiceFactory, NetconfOperationServiceFactory> {
private final BundleContext context;
private final InnerCustomizer innerCustomizer;
}
@Override
- public NetconfOperationProvider addingService(ServiceReference<NetconfOperationProvider> reference) {
+ public NetconfOperationServiceFactory addingService(ServiceReference<NetconfOperationServiceFactory> reference) {
LOG.trace("Got OuterCustomizer.addingService {}", reference);
// JMX was registered, track config-netconf-connector
Filter filter;
}
@Override
- public void modifiedService(ServiceReference<NetconfOperationProvider> reference, NetconfOperationProvider service) {
+ public void modifiedService(ServiceReference<NetconfOperationServiceFactory> reference, NetconfOperationServiceFactory service) {
}
@Override
- public void removedService(ServiceReference<NetconfOperationProvider> reference, NetconfOperationProvider service) {
+ public void removedService(ServiceReference<NetconfOperationServiceFactory> reference, NetconfOperationServiceFactory service) {
}
}
private final List<ConfigSnapshotHolder> configs;
private final PersisterAggregator persisterAggregator;
private final long maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis;
-
+ // This inner customizer has its filter to find the right operation service, but it gets triggered after any
+ // operation service appears. This means that it could start pushing thread up to N times (N = number of operation services spawned in OSGi)
+ private final AtomicBoolean alreadyStarted = new AtomicBoolean(false);
InnerCustomizer(List<ConfigSnapshotHolder> configs, long maxWaitForCapabilitiesMillis, long conflictingVersionTimeoutMillis,
PersisterAggregator persisterAggregator) {
@Override
public NetconfOperationServiceFactory addingService(ServiceReference<NetconfOperationServiceFactory> reference) {
+ if(alreadyStarted.compareAndSet(false, true) == false) {
+ //Prevents multiple calls to this method spawning multiple pushing threads
+ return reference.getBundle().getBundleContext().getService(reference);
+ }
LOG.trace("Got InnerCustomizer.addingService {}", reference);
NetconfOperationServiceFactory service = reference.getBundle().getBundleContext().getService(reference);
@Override
public void modifiedService(ServiceReference<NetconfOperationServiceFactory> reference, NetconfOperationServiceFactory service) {
+ LOG.trace("Got InnerCustomizer.modifiedService {}", reference);
}
@Override
public void removedService(ServiceReference<NetconfOperationServiceFactory> reference, NetconfOperationServiceFactory service) {
+ LOG.trace("Got InnerCustomizer.removedService {}", reference);
}
}
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.config.api.ConflictingVersionException;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
private TestingExceptionHandler handler;
- private void setUpContextAndStartPersister(String requiredCapability) throws Exception {
+ private void setUpContextAndStartPersister(String requiredCapability, final NetconfOperationService conflictingService) throws Exception {
DummyAdapterWithInitialSnapshot.expectedCapability = requiredCapability;
ctx = new MockedBundleContext(1000, 1000);
+ doReturn(getConflictingService()).when(ctx.serviceFactory).createService(anyString());
configPersisterActivator = new ConfigPersisterActivator();
configPersisterActivator.start(ctx.getBundleContext());
}
@Test
public void testPersisterNotAllCapabilitiesProvided() throws Exception {
- setUpContextAndStartPersister("required-cap");
+ setUpContextAndStartPersister("required-cap", getConflictingService());
Thread.sleep(2000);
handler.assertException(IllegalStateException.class, "Max wait for capabilities reached.Not enough capabilities " +
"for <data><config-snapshot/></data>. Expected but not found: [required-cap]");
@Test
public void testPersisterSuccessfulPush() throws Exception {
- setUpContextAndStartPersister("cap1");
+ setUpContextAndStartPersister("cap1", getConflictingService());
NetconfOperationService service = getWorkingService(getOKDocument());
doReturn(service).when(ctx.serviceFactory).createService(anyString());
Thread.sleep(2000);
public NetconfOperationService getWorkingService(Document document) throws SAXException, IOException, NetconfDocumentedException {
NetconfOperationService service = mock(NetconfOperationService.class);
Capability capability = mock(Capability.class);
- doReturn(Sets.newHashSet(capability)).when(service).getCapabilities();
+// doReturn(Sets.newHashSet(capability)).when(service).getCapabilities();
doReturn("cap1").when(capability).getCapabilityUri();
@Test
public void testPersisterConflictingVersionException() throws Exception {
- setUpContextAndStartPersister("cap1");
+ setUpContextAndStartPersister("cap1", getConflictingService());
- doReturn(getConflictingService()).when(ctx.serviceFactory).createService(anyString());
Thread.sleep(2000);
handler.assertException(IllegalStateException.class, "Max wait for conflicting version stabilization timeout");
}
@Test
public void testSuccessConflictingVersionException() throws Exception {
- setUpContextAndStartPersister("cap1");
+ setUpContextAndStartPersister("cap1", getConflictingService());
doReturn(getConflictingService()).when(ctx.serviceFactory).createService(anyString());
Thread.sleep(500);
// working service:
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.config.persist.api.PropertiesProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.persist.impl.DummyAdapter;
doReturn(null).when(context).getProperty(anyString());
initContext(maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis);
- String outerFilterString = "(objectClass=org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider)";
+ String outerFilterString = "(objectClass=org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory)";
doReturn(outerFilter).when(context).createFilter(outerFilterString);
doNothing().when(context).addServiceListener(any(ServiceListener.class), eq(outerFilterString));
ServiceReference<?>[] toBeReturned = {serviceReference};
- doReturn(toBeReturned).when(context).getServiceReferences(NetconfOperationProvider.class.getName(), null);
+ doReturn(toBeReturned).when(context).getServiceReferences(NetconfOperationServiceFactory.class.getName(), null);
String innerFilterString = "innerfilter";
doReturn(innerFilterString).when(outerFilter).toString();
doReturn(bundle).when(serviceReference).getBundle();
doReturn(context).when(bundle).getBundleContext();
doReturn("").when(serviceReference).toString();
+ doReturn("context").when(context).toString();
doReturn(serviceFactory).when(context).getService(any(ServiceReference.class));
doReturn(service).when(serviceFactory).createService(anyString());
- doReturn(Collections.emptySet()).when(service).getCapabilities();
+ final Capability cap = mock(Capability.class);
+ doReturn("cap1").when(cap).getCapabilityUri();
+ doReturn(Collections.singleton(cap)).when(serviceFactory).getCapabilities();
doNothing().when(service).close();
doReturn("serviceFactoryMock").when(serviceFactory).toString();
import ietf-yang-types {
prefix yang;
+ revision-date "2010-09-24";
}
+
import ietf-inet-types {
prefix inet;
}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.*,
+ org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.*,
+ org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.*
+ </Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+module ietf-netconf-notifications {
+
+ namespace
+ "urn:ietf:params:xml:ns:yang:ietf-netconf-notifications";
+
+ prefix ncn;
+
+ import ietf-inet-types { prefix inet; }
+ import ietf-netconf { prefix nc; }
+
+ organization
+ "IETF NETCONF (Network Configuration Protocol) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netconf/>
+ WG List: <mailto:netconf@ietf.org>
+
+ WG Chair: Bert Wijnen
+ <mailto:bertietf@bwijnen.net>
+
+ WG Chair: Mehmet Ersue
+ <mailto:mehmet.ersue@nsn.com>
+
+ Editor: Andy Bierman
+ <mailto:andy@netconfcentral.org>";
+
+ description
+ "This module defines a YANG data model for use with the
+ NETCONF protocol that allows the NETCONF client to
+ receive common NETCONF base event notifications.
+
+ Copyright (c) 2012 IETF Trust and the persons identified as
+ the document authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+
+
+
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6470; see
+ the RFC itself for full legal notices.";
+
+ revision "2012-02-06" {
+ description
+ "Initial version. Errata 3957 added.";
+ reference
+ "RFC 6470: NETCONF Base Notifications";
+ }
+
+ grouping common-session-parms {
+ description
+ "Common session parameters to identify a
+ management session.";
+
+ leaf username {
+ type string;
+ mandatory true;
+ description
+ "Name of the user for the session.";
+ }
+
+ leaf session-id {
+ type nc:session-id-or-zero-type;
+ mandatory true;
+ description
+ "Identifier of the session.
+ A NETCONF session MUST be identified by a non-zero value.
+ A non-NETCONF session MAY be identified by the value zero.";
+ }
+
+ leaf source-host {
+ type inet:ip-address;
+ description
+ "Address of the remote host for the session.";
+ }
+ }
+
+
+
+
+
+
+
+
+ grouping changed-by-parms {
+ description
+ "Common parameters to identify the source
+ of a change event, such as a configuration
+ or capability change.";
+
+ container changed-by {
+ description
+ "Indicates the source of the change.
+ If caused by internal action, then the
+ empty leaf 'server' will be present.
+ If caused by a management session, then
+ the name, remote host address, and session ID
+ of the session that made the change will be reported.";
+ choice server-or-user {
+ mandatory true;
+ leaf server {
+ type empty;
+ description
+ "If present, the change was caused
+ by the server.";
+ }
+
+ case by-user {
+ uses common-session-parms;
+ }
+ } // choice server-or-user
+ } // container changed-by-parms
+ }
+
+
+ notification netconf-config-change {
+ description
+ "Generated when the NETCONF server detects that the
+ <running> or <startup> configuration datastore
+ has been changed by a management session.
+ The notification summarizes the edits that
+ have been detected.
+
+ The server MAY choose to also generate this
+ notification while loading a datastore during the
+ boot process for the device.";
+
+ uses changed-by-parms;
+
+
+
+
+
+ leaf datastore {
+ type enumeration {
+ enum running {
+ description "The <running> datastore has changed.";
+ }
+ enum startup {
+ description "The <startup> datastore has changed";
+ }
+ }
+ default "running";
+ description
+ "Indicates which configuration datastore has changed.";
+ }
+
+ list edit {
+ description
+ "An edit record SHOULD be present for each distinct
+ edit operation that the server has detected on
+ the target datastore. This list MAY be omitted
+ if the detailed edit operations are not known.
+ The server MAY report entries in this list for
+ changes not made by a NETCONF session (e.g., CLI).";
+
+ leaf target {
+ type instance-identifier;
+ description
+ "Topmost node associated with the configuration change.
+ A server SHOULD set this object to the node within
+ the datastore that is being altered. A server MAY
+ set this object to one of the ancestors of the actual
+ node that was changed, or omit this object, if the
+ exact node is not known.";
+ }
+
+ leaf operation {
+ type nc:edit-operation-type;
+ description
+ "Type of edit operation performed.
+ A server MUST set this object to the NETCONF edit
+ operation performed on the target datastore.";
+ }
+ } // list edit
+ } // notification netconf-config-change
+
+
+
+
+
+
+ notification netconf-capability-change {
+ description
+ "Generated when the NETCONF server detects that
+ the server capabilities have changed.
+ Indicates which capabilities have been added, deleted,
+ and/or modified. The manner in which a server
+ capability is changed is outside the scope of this
+ document.";
+
+ uses changed-by-parms;
+
+ leaf-list added-capability {
+ type inet:uri;
+ description
+ "List of capabilities that have just been added.";
+ }
+
+ leaf-list deleted-capability {
+ type inet:uri;
+ description
+ "List of capabilities that have just been deleted.";
+ }
+
+ leaf-list modified-capability {
+ type inet:uri;
+ description
+ "List of capabilities that have just been modified.
+ A capability is considered to be modified if the
+ base URI for the capability has not changed, but
+ one or more of the parameters encoded at the end of
+ the capability URI have changed.
+ The new modified value of the complete URI is returned.";
+ }
+ } // notification netconf-capability-change
+
+
+ notification netconf-session-start {
+ description
+ "Generated when a NETCONF server detects that a
+ NETCONF session has started. A server MAY generate
+ this event for non-NETCONF management sessions.
+ Indicates the identity of the user that started
+ the session.";
+ uses common-session-parms;
+ } // notification netconf-session-start
+
+
+
+
+ notification netconf-session-end {
+ description
+ "Generated when a NETCONF server detects that a
+ NETCONF session has terminated.
+ A server MAY optionally generate this event for
+ non-NETCONF management sessions. Indicates the
+ identity of the user that owned the session,
+ and why the session was terminated.";
+
+ uses common-session-parms;
+
+ leaf killed-by {
+ when "../termination-reason = 'killed'";
+ type nc:session-id-type;
+ description
+ "The ID of the session that directly caused this session
+ to be abnormally terminated. If this session was abnormally
+ terminated by a non-NETCONF session unknown to the server,
+ then this leaf will not be present.";
+ }
+
+ leaf termination-reason {
+ type enumeration {
+ enum "closed" {
+ description
+ "The session was terminated by the client in normal
+ fashion, e.g., by the NETCONF <close-session>
+ protocol operation.";
+ }
+ enum "killed" {
+ description
+ "The session was terminated in abnormal
+ fashion, e.g., by the NETCONF <kill-session>
+ protocol operation.";
+ }
+ enum "dropped" {
+ description
+ "The session was terminated because the transport layer
+ connection was unexpectedly closed.";
+ }
+ enum "timeout" {
+ description
+ "The session was terminated because of inactivity,
+ e.g., waiting for the <hello> message or <rpc>
+ messages.";
+ }
+
+
+
+ enum "bad-hello" {
+ description
+ "The client's <hello> message was invalid.";
+ }
+ enum "other" {
+ description
+ "The session was terminated for some other reason.";
+ }
+ }
+ mandatory true;
+ description
+ "Reason the session was terminated.";
+ }
+ } // notification netconf-session-end
+
+
+ notification netconf-confirmed-commit {
+ description
+ "Generated when a NETCONF server detects that a
+ confirmed-commit event has occurred. Indicates the event
+ and the current state of the confirmed-commit procedure
+ in progress.";
+ reference
+ "RFC 6241, Section 8.4";
+
+ uses common-session-parms {
+ when "confirm-event != 'timeout'";
+ }
+
+ leaf confirm-event {
+ type enumeration {
+ enum "start" {
+ description
+ "The confirmed-commit procedure has started.";
+ }
+ enum "cancel" {
+ description
+ "The confirmed-commit procedure has been canceled,
+ e.g., due to the session being terminated, or an
+ explicit <cancel-commit> operation.";
+ }
+ enum "timeout" {
+ description
+ "The confirmed-commit procedure has been canceled
+ due to the confirm-timeout interval expiring.
+ The common session parameters will not be present
+ in this sub-mode.";
+ }
+
+ enum "extend" {
+ description
+ "The confirmed-commit timeout has been extended,
+ e.g., by a new <confirmed-commit> operation.";
+ }
+ enum "complete" {
+ description
+ "The confirmed-commit procedure has been completed.";
+ }
+ }
+ mandatory true;
+ description
+ "Indicates the event that caused the notification.";
+ }
+
+ leaf timeout {
+ when
+ "../confirm-event = 'start' or ../confirm-event = 'extend'";
+ type uint32;
+ units "seconds";
+ description
+ "The configured timeout value if the event type
+ is 'start' or 'extend'. This value represents
+ the approximate number of seconds from the event
+ time when the 'timeout' event might occur.";
+ }
+ } // notification netconf-confirmed-commit
+
+}
--- /dev/null
+module nc-notifications {
+
+ namespace "urn:ietf:params:xml:ns:netmod:notification";
+ prefix "manageEvent";
+
+ import ietf-yang-types{ prefix yang; }
+ import notifications { prefix ncEvent; }
+
+ organization
+ "IETF NETCONF WG";
+
+ contact
+ "netconf@ietf.org";
+
+ description
+ "Conversion of the 'manageEvent' XSD in the NETCONF
+ Notifications RFC.";
+
+ reference
+ "RFC 5277";
+
+ revision 2008-07-14 {
+ description "RFC 5277 version.";
+ }
+
+ container netconf {
+ description "Top-level element in the notification namespace";
+
+ config false;
+
+ container streams {
+ description
+ "The list of event streams supported by the system. When
+ a query is issued, the returned set of streams is
+ determined based on user privileges.";
+
+ list stream {
+ description
+ "Stream name, description and other information.";
+ key name;
+ min-elements 1;
+
+ leaf name {
+ description
+ "The name of the event stream. If this is the default
+ NETCONF stream, this must have the value 'NETCONF'.";
+ type ncEvent:streamNameType;
+ }
+
+ leaf description {
+ description
+ "A description of the event stream, including such
+ information as the type of events that are sent over
+ this stream.";
+ type string;
+ mandatory true;
+ }
+
+ leaf replaySupport {
+ description
+ "A description of the event stream, including such
+ information as the type of events that are sent over
+ this stream.";
+ type boolean;
+ mandatory true;
+ }
+
+ leaf replayLogCreationTime {
+ description
+ "The timestamp of the creation of the log used to support
+ the replay function on this stream. Note that this might
+ be earlier then the earliest available notification in
+ the log. This object is updated if the log resets for
+ some reason. This object MUST be present if replay is
+ supported.";
+ type yang:date-and-time; // xsd:dateTime is wrong!
+ }
+ }
+ }
+ }
+
+ notification replayComplete {
+ description
+ "This notification is sent to signal the end of a replay
+ portion of a subscription.";
+ }
+
+ notification notificationComplete {
+ description
+ "This notification is sent to signal the end of a notification
+ subscription. It is sent in the case that stopTime was
+ specified during the creation of the subscription..";
+ }
+
+}
--- /dev/null
+module notifications {
+
+ namespace "urn:ietf:params:xml:ns:netconf:notification:1.0";
+ prefix "ncEvent";
+
+ import ietf-yang-types { prefix yang; }
+
+ organization
+ "IETF NETCONF WG";
+
+ contact
+ "netconf@ops.ietf.org";
+
+ description
+ "Conversion of the 'ncEvent' XSD in the
+ NETCONF Notifications RFC.";
+
+ reference
+ "RFC 5277.";
+
+ revision 2008-07-14 {
+ description "RFC 5277 version.";
+ }
+
+ typedef streamNameType {
+ description
+ "The name of an event stream.";
+ type string;
+ }
+
+ rpc create-subscription {
+ description
+ "The command to create a notification subscription. It
+ takes as argument the name of the notification stream
+ and filter. Both of those options limit the content of
+ the subscription. In addition, there are two time-related
+ parameters, startTime and stopTime, which can be used to
+ select the time interval of interest to the notification
+ replay feature.";
+
+ input {
+ leaf stream {
+ description
+ "An optional parameter that indicates which stream of events
+ is of interest. If not present, then events in the default
+ NETCONF stream will be sent.";
+ type streamNameType;
+ default "NETCONF";
+ }
+
+ anyxml filter {
+ description
+ "An optional parameter that indicates which subset of all
+ possible events is of interest. The format of this
+ parameter is the same as that of the filter parameter
+ in the NETCONF protocol operations. If not present,
+ all events not precluded by other parameters will
+ be sent.";
+ }
+
+ leaf startTime {
+ description
+ "A parameter used to trigger the replay feature and
+ indicates that the replay should start at the time
+ specified. If start time is not present, this is not a
+ replay subscription.";
+ type yang:date-and-time;
+ }
+
+ leaf stopTime {
+ // must ". >= ../startTime";
+ description
+ "An optional parameter used with the optional replay
+ feature to indicate the newest notifications of
+ interest. If stop time is not present, the notifications
+ will continue until the subscription is terminated.
+ Must be used with startTime.";
+ type yang:date-and-time;
+ }
+ }
+ }
+}
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>ietf-netconf</artifactId>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.base._1._0.rev110601.*</Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+module ietf-netconf {
+
+ // the namespace for NETCONF XML definitions is unchanged
+ // from RFC 4741, which this document replaces
+ namespace "urn:ietf:params:xml:ns:netconf:base:1.0";
+
+ prefix nc;
+
+ import ietf-inet-types {
+ prefix inet;
+ }
+
+ organization
+ "IETF NETCONF (Network Configuration) Working Group";
+
+ contact
+ "WG Web: <http://tools.ietf.org/wg/netconf/>
+ WG List: <netconf@ietf.org>
+
+ WG Chair: Bert Wijnen
+ <bertietf@bwijnen.net>
+
+ WG Chair: Mehmet Ersue
+ <mehmet.ersue@nsn.com>
+
+ Editor: Martin Bjorklund
+ <mbj@tail-f.com>
+
+ Editor: Juergen Schoenwaelder
+ <j.schoenwaelder@jacobs-university.de>
+
+ Editor: Andy Bierman
+ <andy.bierman@brocade.com>";
+ description
+ "NETCONF Protocol Data Types and Protocol Operations.
+
+ Copyright (c) 2011 IETF Trust and the persons identified as
+ the document authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, is permitted pursuant to, and subject
+ to the license terms contained in, the Simplified BSD License
+ set forth in Section 4.c of the IETF Trust's Legal Provisions
+ Relating to IETF Documents
+ (http://trustee.ietf.org/license-info).
+
+ This version of this YANG module is part of RFC 6241; see
+ the RFC itself for full legal notices.";
+
+ revision 2011-06-01 {
+ description
+ "Initial revision;";
+ reference
+ "RFC 6241: Network Configuration Protocol";
+ }
+
+ extension get-filter-element-attributes {
+ description
+ "If this extension is present within an 'anyxml'
+ statement named 'filter', which must be conceptually
+ defined within the RPC input section for the <get>
+ and <get-config> protocol operations, then the
+ following unqualified XML attribute is supported
+ within the <filter> element, within a <get> or
+ <get-config> protocol operation:
+
+ type : optional attribute with allowed
+ value strings 'subtree' and 'xpath'.
+ If missing, the default value is 'subtree'.
+
+ If the 'xpath' feature is supported, then the
+ following unqualified XML attribute is
+ also supported:
+
+ select: optional attribute containing a
+ string representing an XPath expression.
+ The 'type' attribute must be equal to 'xpath'
+ if this attribute is present.";
+ }
+
+ // NETCONF capabilities defined as features
+ feature writable-running {
+ description
+ "NETCONF :writable-running capability;
+ If the server advertises the :writable-running
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.2";
+ }
+
+ feature candidate {
+ description
+ "NETCONF :candidate capability;
+ If the server advertises the :candidate
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.3";
+ }
+
+ feature confirmed-commit {
+ if-feature candidate;
+ description
+ "NETCONF :confirmed-commit:1.1 capability;
+ If the server advertises the :confirmed-commit:1.1
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+
+ reference "RFC 6241, Section 8.4";
+ }
+
+ feature rollback-on-error {
+ description
+ "NETCONF :rollback-on-error capability;
+ If the server advertises the :rollback-on-error
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.5";
+ }
+
+ feature validate {
+ description
+ "NETCONF :validate:1.1 capability;
+ If the server advertises the :validate:1.1
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.6";
+ }
+
+ feature startup {
+ description
+ "NETCONF :startup capability;
+ If the server advertises the :startup
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.7";
+ }
+
+ feature url {
+ description
+ "NETCONF :url capability;
+ If the server advertises the :url
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.8";
+ }
+
+ feature xpath {
+ description
+ "NETCONF :xpath capability;
+ If the server advertises the :xpath
+ capability for a session, then this feature must
+ also be enabled for that session. Otherwise,
+ this feature must not be enabled.";
+ reference "RFC 6241, Section 8.9";
+ }
+
+ // NETCONF Simple Types
+
+ typedef session-id-type {
+ type uint32 {
+ range "1..max";
+ }
+ description
+ "NETCONF Session Id";
+ }
+
+ typedef session-id-or-zero-type {
+ type uint32;
+ description
+ "NETCONF Session Id or Zero to indicate none";
+ }
+ typedef error-tag-type {
+ type enumeration {
+ enum in-use {
+ description
+ "The request requires a resource that
+ already is in use.";
+ }
+ enum invalid-value {
+ description
+ "The request specifies an unacceptable value for one
+ or more parameters.";
+ }
+ enum too-big {
+ description
+ "The request or response (that would be generated) is
+ too large for the implementation to handle.";
+ }
+ enum missing-attribute {
+ description
+ "An expected attribute is missing.";
+ }
+ enum bad-attribute {
+ description
+ "An attribute value is not correct; e.g., wrong type,
+ out of range, pattern mismatch.";
+ }
+ enum unknown-attribute {
+ description
+ "An unexpected attribute is present.";
+ }
+ enum missing-element {
+ description
+ "An expected element is missing.";
+ }
+ enum bad-element {
+ description
+ "An element value is not correct; e.g., wrong type,
+ out of range, pattern mismatch.";
+ }
+ enum unknown-element {
+ description
+ "An unexpected element is present.";
+ }
+ enum unknown-namespace {
+ description
+ "An unexpected namespace is present.";
+ }
+ enum access-denied {
+ description
+ "Access to the requested protocol operation or
+ data model is denied because authorization failed.";
+ }
+ enum lock-denied {
+ description
+ "Access to the requested lock is denied because the
+ lock is currently held by another entity.";
+ }
+ enum resource-denied {
+ description
+ "Request could not be completed because of
+ insufficient resources.";
+ }
+ enum rollback-failed {
+ description
+ "Request to roll back some configuration change (via
+ rollback-on-error or <discard-changes> operations)
+ was not completed for some reason.";
+
+ }
+ enum data-exists {
+ description
+ "Request could not be completed because the relevant
+ data model content already exists. For example,
+ a 'create' operation was attempted on data that
+ already exists.";
+ }
+ enum data-missing {
+ description
+ "Request could not be completed because the relevant
+ data model content does not exist. For example,
+ a 'delete' operation was attempted on
+ data that does not exist.";
+ }
+ enum operation-not-supported {
+ description
+ "Request could not be completed because the requested
+ operation is not supported by this implementation.";
+ }
+ enum operation-failed {
+ description
+ "Request could not be completed because the requested
+ operation failed for some reason not covered by
+ any other error condition.";
+ }
+ enum partial-operation {
+ description
+ "This error-tag is obsolete, and SHOULD NOT be sent
+ by servers conforming to this document.";
+ }
+ enum malformed-message {
+ description
+ "A message could not be handled because it failed to
+ be parsed correctly. For example, the message is not
+ well-formed XML or it uses an invalid character set.";
+ }
+ }
+ description "NETCONF Error Tag";
+ reference "RFC 6241, Appendix A";
+ }
+
+ typedef error-severity-type {
+ type enumeration {
+ enum error {
+ description "Error severity";
+ }
+ enum warning {
+ description "Warning severity";
+ }
+ }
+ description "NETCONF Error Severity";
+ reference "RFC 6241, Section 4.3";
+ }
+
+ typedef edit-operation-type {
+ type enumeration {
+ enum merge {
+ description
+ "The configuration data identified by the
+ element containing this attribute is merged
+ with the configuration at the corresponding
+ level in the configuration datastore identified
+ by the target parameter.";
+ }
+ enum replace {
+ description
+ "The configuration data identified by the element
+ containing this attribute replaces any related
+ configuration in the configuration datastore
+ identified by the target parameter. If no such
+ configuration data exists in the configuration
+ datastore, it is created. Unlike a
+ <copy-config> operation, which replaces the
+ entire target configuration, only the configuration
+ actually present in the config parameter is affected.";
+ }
+ enum create {
+ description
+ "The configuration data identified by the element
+ containing this attribute is added to the
+ configuration if and only if the configuration
+ data does not already exist in the configuration
+ datastore. If the configuration data exists, an
+ <rpc-error> element is returned with an
+ <error-tag> value of 'data-exists'.";
+ }
+ enum delete {
+ description
+ "The configuration data identified by the element
+ containing this attribute is deleted from the
+ configuration if and only if the configuration
+ data currently exists in the configuration
+ datastore. If the configuration data does not
+ exist, an <rpc-error> element is returned with
+ an <error-tag> value of 'data-missing'.";
+ }
+ enum remove {
+ description
+ "The configuration data identified by the element
+ containing this attribute is deleted from the
+ configuration if the configuration
+ data currently exists in the configuration
+ datastore. If the configuration data does not
+ exist, the 'remove' operation is silently ignored
+ by the server.";
+ }
+ }
+ default "merge";
+ description "NETCONF 'operation' attribute values";
+ reference "RFC 6241, Section 7.2";
+ }
+
+ // NETCONF Standard Protocol Operations
+
+ rpc get-config {
+ description
+ "Retrieve all or part of a specified configuration.";
+
+ reference "RFC 6241, Section 7.1";
+
+ input {
+ container source {
+ description
+ "Particular configuration to retrieve.";
+
+ choice config-source {
+ mandatory true;
+ description
+ "The configuration to retrieve.";
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config source.";
+ }
+ leaf running {
+ type empty;
+ description
+ "The running configuration is the config source.";
+ }
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config source.
+ This is optional-to-implement on the server because
+ not all servers will support filtering for this
+ datastore.";
+ }
+ }
+ }
+
+ anyxml filter {
+ description
+ "Subtree or XPath filter to use.";
+ nc:get-filter-element-attributes;
+ }
+ }
+
+ output {
+ anyxml data {
+ description
+ "Copy of the source datastore subset that matched
+ the filter criteria (if any). An empty data container
+ indicates that the request did not produce any results.";
+ }
+ }
+ }
+
+ rpc edit-config {
+ description
+ "The <edit-config> operation loads all or part of a specified
+ configuration to the specified target configuration.";
+
+ reference "RFC 6241, Section 7.2";
+
+ input {
+ container target {
+ description
+ "Particular configuration to edit.";
+
+ choice config-target {
+ mandatory true;
+ description
+ "The configuration target.";
+
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config target.";
+ }
+ leaf running {
+ if-feature writable-running;
+ type empty;
+ description
+ "The running configuration is the config source.";
+ }
+ }
+ }
+
+ leaf default-operation {
+ type enumeration {
+ enum merge {
+ description
+ "The default operation is merge.";
+ }
+ enum replace {
+ description
+ "The default operation is replace.";
+ }
+ enum none {
+ description
+ "There is no default operation.";
+ }
+ }
+ default "merge";
+ description
+ "The default operation to use.";
+ }
+
+ leaf test-option {
+ if-feature validate;
+ type enumeration {
+ enum test-then-set {
+ description
+ "The server will test and then set if no errors.";
+ }
+ enum set {
+ description
+ "The server will set without a test first.";
+ }
+
+ enum test-only {
+ description
+ "The server will only test and not set, even
+ if there are no errors.";
+ }
+ }
+ default "test-then-set";
+ description
+ "The test option to use.";
+ }
+
+ leaf error-option {
+ type enumeration {
+ enum stop-on-error {
+ description
+ "The server will stop on errors.";
+ }
+ enum continue-on-error {
+ description
+ "The server may continue on errors.";
+ }
+ enum rollback-on-error {
+ description
+ "The server will roll back on errors.
+ This value can only be used if the 'rollback-on-error'
+ feature is supported.";
+ }
+ }
+ default "stop-on-error";
+ description
+ "The error option to use.";
+ }
+
+ choice edit-content {
+ mandatory true;
+ description
+ "The content for the edit operation.";
+
+ anyxml config {
+ description
+ "Inline Config content.";
+ }
+ leaf url {
+ if-feature url;
+ type inet:uri;
+ description
+ "URL-based config content.";
+ }
+ }
+ }
+ }
+
+ rpc copy-config {
+ description
+ "Create or replace an entire configuration datastore with the
+ contents of another complete configuration datastore.";
+
+ reference "RFC 6241, Section 7.3";
+
+ input {
+ container target {
+ description
+ "Particular configuration to copy to.";
+
+ choice config-target {
+ mandatory true;
+ description
+ "The configuration target of the copy operation.";
+
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config target.";
+ }
+ leaf running {
+ if-feature writable-running;
+ type empty;
+ description
+ "The running configuration is the config target.
+ This is optional-to-implement on the server.";
+ }
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config target.";
+ }
+ leaf url {
+ if-feature url;
+ type inet:uri;
+ description
+ "The URL-based configuration is the config target.";
+ }
+ }
+ }
+
+ container source {
+ description
+ "Particular configuration to copy from.";
+
+ choice config-source {
+ mandatory true;
+ description
+ "The configuration source for the copy operation.";
+
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config source.";
+ }
+ leaf running {
+ type empty;
+ description
+ "The running configuration is the config source.";
+ }
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config source.";
+ }
+ leaf url {
+ if-feature url;
+ type inet:uri;
+ description
+ "The URL-based configuration is the config source.";
+ }
+ anyxml config {
+ description
+ "Inline Config content: <config> element. Represents
+ an entire configuration datastore, not
+ a subset of the running datastore.";
+ }
+ }
+ }
+ }
+ }
+
+ rpc delete-config {
+ description
+ "Delete a configuration datastore.";
+
+ reference "RFC 6241, Section 7.4";
+
+ input {
+ container target {
+ description
+ "Particular configuration to delete.";
+
+ choice config-target {
+ mandatory true;
+ description
+ "The configuration target to delete.";
+
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config target.";
+ }
+ leaf url {
+ if-feature url;
+ type inet:uri;
+ description
+ "The URL-based configuration is the config target.";
+ }
+ }
+ }
+ }
+ }
+
+ rpc lock {
+ description
+ "The lock operation allows the client to lock the configuration
+ system of a device.";
+
+ reference "RFC 6241, Section 7.5";
+
+ input {
+ container target {
+ description
+ "Particular configuration to lock.";
+
+ choice config-target {
+ mandatory true;
+ description
+ "The configuration target to lock.";
+
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config target.";
+ }
+ leaf running {
+ type empty;
+ description
+ "The running configuration is the config target.";
+ }
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config target.";
+ }
+ }
+ }
+ }
+ }
+
+ rpc unlock {
+ description
+ "The unlock operation is used to release a configuration lock,
+ previously obtained with the 'lock' operation.";
+
+ reference "RFC 6241, Section 7.6";
+
+ input {
+ container target {
+ description
+ "Particular configuration to unlock.";
+
+ choice config-target {
+ mandatory true;
+ description
+ "The configuration target to unlock.";
+
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config target.";
+ }
+ leaf running {
+ type empty;
+ description
+ "The running configuration is the config target.";
+ }
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config target.";
+ }
+ }
+ }
+ }
+ }
+
+ rpc get {
+ description
+ "Retrieve running configuration and device state information.";
+
+ reference "RFC 6241, Section 7.7";
+
+ input {
+ anyxml filter {
+ description
+ "This parameter specifies the portion of the system
+ configuration and state data to retrieve.";
+ nc:get-filter-element-attributes;
+ }
+ }
+
+ output {
+ anyxml data {
+ description
+ "Copy of the running datastore subset and/or state
+ data that matched the filter criteria (if any).
+ An empty data container indicates that the request did not
+ produce any results.";
+ }
+ }
+ }
+
+ rpc close-session {
+ description
+ "Request graceful termination of a NETCONF session.";
+
+ reference "RFC 6241, Section 7.8";
+ }
+
+ rpc kill-session {
+ description
+ "Force the termination of a NETCONF session.";
+
+ reference "RFC 6241, Section 7.9";
+
+ input {
+ leaf session-id {
+ type session-id-type;
+ mandatory true;
+ description
+ "Particular session to kill.";
+ }
+ }
+ }
+
+ rpc commit {
+ if-feature candidate;
+
+ description
+ "Commit the candidate configuration as the device's new
+ current configuration.";
+
+ reference "RFC 6241, Section 8.3.4.1";
+
+ input {
+ leaf confirmed {
+ if-feature confirmed-commit;
+ type empty;
+ description
+ "Requests a confirmed commit.";
+ reference "RFC 6241, Section 8.3.4.1";
+ }
+
+ leaf confirm-timeout {
+ if-feature confirmed-commit;
+ type uint32 {
+ range "1..max";
+ }
+ units "seconds";
+ default "600"; // 10 minutes
+ description
+ "The timeout interval for a confirmed commit.";
+ reference "RFC 6241, Section 8.3.4.1";
+ }
+
+ leaf persist {
+ if-feature confirmed-commit;
+ type string;
+ description
+ "This parameter is used to make a confirmed commit
+ persistent. A persistent confirmed commit is not aborted
+ if the NETCONF session terminates. The only way to abort
+ a persistent confirmed commit is to let the timer expire,
+ or to use the <cancel-commit> operation.
+
+ The value of this parameter is a token that must be given
+ in the 'persist-id' parameter of <commit> or
+ <cancel-commit> operations in order to confirm or cancel
+ the persistent confirmed commit.
+
+ The token should be a random string.";
+ reference "RFC 6241, Section 8.3.4.1";
+ }
+
+ leaf persist-id {
+ if-feature confirmed-commit;
+ type string;
+ description
+ "This parameter is given in order to commit a persistent
+ confirmed commit. The value must be equal to the value
+ given in the 'persist' parameter to the <commit> operation.
+ If it does not match, the operation fails with an
+ 'invalid-value' error.";
+ reference "RFC 6241, Section 8.3.4.1";
+ }
+
+ }
+ }
+
+ rpc discard-changes {
+ if-feature candidate;
+
+ description
+ "Revert the candidate configuration to the current
+ running configuration.";
+ reference "RFC 6241, Section 8.3.4.2";
+ }
+
+ rpc cancel-commit {
+ if-feature confirmed-commit;
+ description
+ "This operation is used to cancel an ongoing confirmed commit.
+ If the confirmed commit is persistent, the parameter
+ 'persist-id' must be given, and it must match the value of the
+ 'persist' parameter.";
+ reference "RFC 6241, Section 8.4.4.1";
+
+ input {
+ leaf persist-id {
+ type string;
+ description
+ "This parameter is given in order to cancel a persistent
+ confirmed commit. The value must be equal to the value
+ given in the 'persist' parameter to the <commit> operation.
+ If it does not match, the operation fails with an
+ 'invalid-value' error.";
+ }
+ }
+ }
+
+ rpc validate {
+ if-feature validate;
+
+ description
+ "Validates the contents of the specified configuration.";
+
+ reference "RFC 6241, Section 8.6.4.1";
+
+ input {
+ container source {
+ description
+ "Particular configuration to validate.";
+
+ choice config-source {
+ mandatory true;
+ description
+ "The configuration source to validate.";
+
+ leaf candidate {
+ if-feature candidate;
+ type empty;
+ description
+ "The candidate configuration is the config source.";
+ }
+ leaf running {
+ type empty;
+ description
+ "The running configuration is the config source.";
+ }
+ leaf startup {
+ if-feature startup;
+ type empty;
+ description
+ "The startup configuration is the config source.";
+ }
+ leaf url {
+ if-feature url;
+ type inet:uri;
+ description
+ "The URL-based configuration is the config source.";
+ }
+ anyxml config {
+ description
+ "Inline Config content: <config> element. Represents
+ an entire configuration datastore, not
+ a subset of the running datastore.";
+ }
+ }
+ }
+ }
+ }
+
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>mdsal-netconf-connector</artifactId>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-mapping-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-core-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.logback_settings</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-core-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-operations</artifactId>
+ <version>0.7.0-SNAPSHOT</version>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <!--FIXME extract yang plugin definition into parent-->
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.netconf.mdsal.mapper;
+
+import org.opendaylight.controller.netconf.mdsal.connector.MdsalNetconfOperationServiceFactory;
+
+public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule {
+ public NetconfMdsalMapperModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfMdsalMapperModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.netconf.mdsal.mapper.NetconfMdsalMapperModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory = new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency(), getDomBrokerDependency()) {
+ @Override
+ public void close() throws Exception {
+ super.close();
+ getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
+ }
+ };
+ getMapperAggregatorDependency().onAddNetconfOperationServiceFactory(mdsalNetconfOperationServiceFactory);
+ return mdsalNetconfOperationServiceFactory;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-mdsal-mapper yang module local name: netconf-mdsal-mapper
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Jan 14 14:58:42 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.netconf.mdsal.mapper;
+public class NetconfMdsalMapperModuleFactory extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModuleFactory {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+
+public class CurrentSchemaContext implements SchemaContextListener, AutoCloseable {
+ final AtomicReference<SchemaContext> currentContext = new AtomicReference<SchemaContext>();
+ private final ListenerRegistration<SchemaContextListener> schemaContextListenerListenerRegistration;
+ private final Set<CapabilityListener> listeners = Collections.synchronizedSet(Sets.<CapabilityListener>newHashSet());
+
+ public SchemaContext getCurrentContext() {
+ Preconditions.checkState(currentContext.get() != null, "Current context not received");
+ return currentContext.get();
+ }
+
+ public CurrentSchemaContext(final SchemaService schemaService) {
+ schemaContextListenerListenerRegistration = schemaService.registerSchemaContextListener(this);
+ }
+
+ @Override
+ public void onGlobalContextUpdated(final SchemaContext schemaContext) {
+ currentContext.set(schemaContext);
+ // FIXME is notifying all the listeners from this callback wise ?
+ final Set<Capability> addedCaps = MdsalNetconfOperationServiceFactory.transformCapabilities(currentContext.get());
+ for (final CapabilityListener listener : listeners) {
+ listener.onCapabilitiesAdded(addedCaps);
+ }
+ }
+
+ @Override
+ public void close() throws Exception {
+ listeners.clear();
+ schemaContextListenerListenerRegistration.close();
+ currentContext.set(null);
+ }
+
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ listener.onCapabilitiesAdded(MdsalNetconfOperationServiceFactory.transformCapabilities(currentContext.get()));
+ listeners.add(listener);
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ listeners.remove(listener);
+ }
+ };
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector;
+
+import java.util.Set;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+
+public class MdsalNetconfOperationService implements NetconfOperationService {
+
+ private final OperationProvider operationProvider;
+
+ public MdsalNetconfOperationService(final CurrentSchemaContext schemaContext, final String netconfSessionIdForReporting,
+ final DOMDataBroker dataBroker) {
+ this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker);
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ @Override
+ public Set<NetconfOperation> getNetconfOperations() {
+ return operationProvider.getOperations();
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, AutoCloseable {
+
+ private static final Logger LOG = LoggerFactory.getLogger(MdsalNetconfOperationServiceFactory.class);
+
+ private final DOMDataBroker dataBroker;
+ private final CurrentSchemaContext currentSchemaContext;
+
+ public MdsalNetconfOperationServiceFactory(final SchemaService schemaService, final DOMDataBroker domDataBroker) {
+ this.currentSchemaContext = new CurrentSchemaContext(Preconditions.checkNotNull(schemaService));
+ this.dataBroker = Preconditions.checkNotNull(domDataBroker);
+ }
+
+ @Override
+ public MdsalNetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker);
+ }
+
+ @Override
+ public void close() throws Exception {
+ currentSchemaContext.close();
+ }
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return transformCapabilities(currentSchemaContext.getCurrentContext());
+ }
+
+ static Set<Capability> transformCapabilities(final SchemaContext currentContext1) {
+ final Set<Capability> capabilities = new HashSet<>();
+ // [RFC6241] 8.3. Candidate Configuration Capability
+ capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
+
+ final SchemaContext currentContext = currentContext1;
+ final Set<Module> modules = currentContext.getModules();
+ for (final Module module : modules) {
+ if(currentContext.getModuleSource(module).isPresent()) {
+ capabilities.add(new YangStoreCapability(module, currentContext.getModuleSource(module).get()));
+ } else {
+ LOG.warn("Missing source for module {}. This module will not be available from netconf server",
+ module);
+ }
+ }
+
+ return capabilities;
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ return currentSchemaContext.registerCapabilityListener(listener);
+ }
+
+ private static class BasicCapability implements Capability {
+
+ private final String capability;
+
+ private BasicCapability(final String capability) {
+ this.capability = capability;
+ }
+
+ @Override
+ public String getCapabilityUri() {
+ return capability;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Collection<String> getLocation() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public String toString() {
+ return capability;
+ }
+ }
+
+ private static final class YangStoreCapability extends BasicCapability {
+
+ private final String content;
+ private final String revision;
+ private final String moduleName;
+ private final String moduleNamespace;
+
+ public YangStoreCapability(final Module module, final String moduleContent) {
+ super(toCapabilityURI(module));
+ this.content = moduleContent;
+ this.moduleName = module.getName();
+ this.moduleNamespace = module.getNamespace().toString();
+ this.revision = SimpleDateFormatUtil.getRevisionFormat().format(module.getRevision());
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.of(content);
+ }
+
+ private static String toCapabilityURI(final Module module) {
+ return String.valueOf(module.getNamespace()) + "?module="
+ + module.getName() + "&revision=" + SimpleDateFormatUtil.getRevisionFormat().format(module.getRevision());
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.of(moduleName);
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.of(moduleNamespace);
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.of(revision);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector;
+
+import com.google.common.collect.Sets;
+import java.util.Set;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.Commit;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.DiscardChanges;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.EditConfig;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.Lock;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.Unlock;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.get.Get;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.get.GetConfig;
+
+final class OperationProvider {
+
+ private final String netconfSessionIdForReporting;
+ private final CurrentSchemaContext schemaContext;
+ private final DOMDataBroker dataBroker;
+ private final TransactionProvider transactionProvider;
+
+ public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final DOMDataBroker dataBroker) {
+ this.netconfSessionIdForReporting = netconfSessionIdForReporting;
+ this.schemaContext = schemaContext;
+ this.dataBroker = dataBroker;
+ this.transactionProvider = new TransactionProvider(dataBroker, netconfSessionIdForReporting);
+
+ }
+
+ Set<NetconfOperation> getOperations() {
+ return Sets.<NetconfOperation>newHashSet(
+ new Commit(netconfSessionIdForReporting, transactionProvider),
+ new DiscardChanges(netconfSessionIdForReporting, transactionProvider),
+ new EditConfig(netconfSessionIdForReporting, schemaContext, transactionProvider),
+ new Get(netconfSessionIdForReporting, schemaContext, transactionProvider),
+ new GetConfig(netconfSessionIdForReporting, schemaContext, transactionProvider),
+ new Lock(netconfSessionIdForReporting),
+ new Unlock(netconfSessionIdForReporting)
+ );
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.ArrayList;
+import java.util.List;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+//TODO make a global TransactionProvider for all Netconf sessions instead of each session having one.
+public class TransactionProvider implements AutoCloseable{
+
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionProvider.class);
+
+ private final DOMDataBroker dataBroker;
+
+ private DOMDataReadWriteTransaction candidateTransaction = null;
+ private DOMDataReadWriteTransaction runningTransaction = null;
+ private final List<DOMDataReadWriteTransaction> allOpenReadWriteTransactions = new ArrayList<>();
+
+ private final String netconfSessionIdForReporting;
+
+ private static final String NO_TRANSACTION_FOUND_FOR_SESSION = "No candidateTransaction found for session ";
+
+
+ public TransactionProvider(DOMDataBroker dataBroker, String netconfSessionIdForReporting) {
+ this.dataBroker = dataBroker;
+ this.netconfSessionIdForReporting = netconfSessionIdForReporting;
+ }
+
+ @Override
+ public synchronized void close() throws Exception {
+ for (DOMDataReadWriteTransaction rwt : allOpenReadWriteTransactions) {
+ rwt.cancel();
+ }
+
+ allOpenReadWriteTransactions.clear();
+ }
+
+ public synchronized Optional<DOMDataReadWriteTransaction> getCandidateTransaction() {
+ if (candidateTransaction == null) {
+ return Optional.absent();
+ }
+
+ return Optional.of(candidateTransaction);
+ }
+
+ public synchronized DOMDataReadWriteTransaction getOrCreateTransaction() {
+ if (getCandidateTransaction().isPresent()) {
+ return getCandidateTransaction().get();
+ }
+
+ candidateTransaction = dataBroker.newReadWriteTransaction();
+ allOpenReadWriteTransactions.add(candidateTransaction);
+ return candidateTransaction;
+ }
+
+ public synchronized boolean commitTransaction() throws NetconfDocumentedException {
+ if (!getCandidateTransaction().isPresent()) {
+ throw new NetconfDocumentedException(NO_TRANSACTION_FOUND_FOR_SESSION + netconfSessionIdForReporting,
+ ErrorType.application, ErrorTag.operation_failed, ErrorSeverity.error);
+ }
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = candidateTransaction.submit();
+ try {
+ future.checkedGet();
+ } catch (TransactionCommitFailedException e) {
+ LOG.debug("Transaction {} failed on", candidateTransaction, e);
+ throw new NetconfDocumentedException("Transaction commit failed on " + e.getMessage() + " " + netconfSessionIdForReporting,
+ ErrorType.application, ErrorTag.operation_failed, ErrorSeverity.error);
+ }
+ allOpenReadWriteTransactions.remove(candidateTransaction);
+ candidateTransaction = null;
+
+ return true;
+ }
+
+ public synchronized void abortTransaction() {
+ LOG.debug("Aborting current candidateTransaction");
+ Optional<DOMDataReadWriteTransaction> otx = getCandidateTransaction();
+ Preconditions.checkState(otx.isPresent(), NO_TRANSACTION_FOUND_FOR_SESSION + netconfSessionIdForReporting);
+ candidateTransaction.cancel();
+ allOpenReadWriteTransactions.remove(candidateTransaction);
+ candidateTransaction = null;
+ }
+
+ public synchronized DOMDataReadWriteTransaction createRunningTransaction() {
+ runningTransaction = dataBroker.newReadWriteTransaction();
+ allOpenReadWriteTransactions.add(runningTransaction);
+ return runningTransaction;
+ }
+
+ public synchronized boolean commitRunningTransaction(DOMDataReadWriteTransaction tx) throws NetconfDocumentedException {
+ allOpenReadWriteTransactions.remove(tx);
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = tx.submit();
+ try {
+ future.checkedGet();
+ } catch (TransactionCommitFailedException e) {
+ LOG.debug("Transaction {} failed on", tx, e);
+ throw new NetconfDocumentedException("Transaction commit failed on " + e.getMessage() + " " + netconfSessionIdForReporting,
+ ErrorType.application, ErrorTag.operation_failed, ErrorSeverity.error);
+ }
+
+ return true;
+ }
+
+ public synchronized void abortRunningTransaction(DOMDataReadWriteTransaction tx) {
+ LOG.debug("Aborting current running Transaction");
+ Preconditions.checkState(runningTransaction != null, NO_TRANSACTION_FOUND_FOR_SESSION + netconfSessionIdForReporting);
+ tx.cancel();
+ allOpenReadWriteTransactions.remove(tx);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class Commit extends AbstractLastNetconfOperation{
+
+ private static final Logger LOG = LoggerFactory.getLogger(Commit.class);
+
+ private static final String OPERATION_NAME = "commit";
+ private final TransactionProvider transactionProvider;
+
+ public Commit(final String netconfSessionIdForReporting, final TransactionProvider transactionProvider) {
+ super(netconfSessionIdForReporting);
+ this.transactionProvider = transactionProvider;
+
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+
+ boolean commitStatus = transactionProvider.commitTransaction();
+ LOG.trace("Transaction commited succesfuly", commitStatus);
+
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+
+}
/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.raft.base.messages;
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
-/**
- * Internal message by Leader to initiate an install snapshot
- */
-public class InitiateInstallSnapshot {
+public enum Datastore {
+ candidate, running
}
-
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import java.util.HashMap;
+import java.util.Map;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class DiscardChanges extends AbstractLastNetconfOperation{
+
+ private static final Logger LOG = LoggerFactory.getLogger(DiscardChanges.class);
+
+ private static final String OPERATION_NAME = "discard-changes";
+
+ private final TransactionProvider transactionProvider;
+
+ public DiscardChanges(final String netconfSessionIdForReporting, final TransactionProvider transactionProvider) {
+ super(netconfSessionIdForReporting);
+ this.transactionProvider = transactionProvider;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ operationElement.getOnlyChildElement(OPERATION_NAME);
+
+ try {
+ transactionProvider.abortTransaction();
+ } catch (IllegalStateException e) {
+ LOG.warn("Abort failed ", e);
+ final Map<String, String> errorInfo = new HashMap<>();
+ errorInfo
+ .put(ErrorTag.operation_failed.name(),
+ "Operation failed. Use 'get-config' or 'edit-config' before triggering 'discard-changes' operation");
+ throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.application, ErrorTag.operation_failed,
+ ErrorSeverity.error, errorInfo);
+ }
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.data.operations.DataModificationException;
+import org.opendaylight.yangtools.yang.data.operations.DataModificationException.DataExistsException;
+import org.opendaylight.yangtools.yang.data.operations.DataModificationException.DataMissingException;
+import org.opendaylight.yangtools.yang.data.operations.DataOperations;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class EditConfig extends AbstractLastNetconfOperation {
+
+ private static final Logger LOG = LoggerFactory.getLogger(EditConfig.class);
+
+ private static final String OPERATION_NAME = "edit-config";
+ private static final String CONFIG_KEY = "config";
+
+ private final CurrentSchemaContext schemaContext;
+ private final TransactionProvider transactionProvider;
+
+ public EditConfig(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final TransactionProvider transactionProvider) {
+ super(netconfSessionIdForReporting);
+ this.schemaContext = schemaContext;
+ this.transactionProvider = transactionProvider;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final XmlElement configElement = getConfigElement(operationElement);
+
+ for (XmlElement element : configElement.getChildElements()) {
+ final String ns = element.getNamespace();
+ final DataSchemaNode schemaNode = getSchemaNodeFromNamespace(ns, element).get();
+ YangInstanceIdentifier ident = YangInstanceIdentifier.of(schemaNode.getQName());
+
+ final NormalizedNode storedNode = readStoredNode(LogicalDatastoreType.CONFIGURATION, ident);
+ try {
+ final Optional<NormalizedNode<?,?>> newNode = modifyNode(schemaNode, element, storedNode);
+ final DOMDataReadWriteTransaction rwTx = transactionProvider.getOrCreateTransaction();
+ if (newNode.isPresent()) {
+ rwTx.put(LogicalDatastoreType.CONFIGURATION, ident, newNode.get());
+ } else {
+ rwTx.delete(LogicalDatastoreType.CONFIGURATION, ident);
+ }
+ } catch (final DataModificationException e) {
+ if (e instanceof DataExistsException) {
+ throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.protocol, ErrorTag.data_exists, ErrorSeverity.error);
+ } else if (e instanceof DataMissingException) {
+ throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.protocol, ErrorTag.data_missing, ErrorSeverity.error);
+ } else {
+ //should never happen, since in edit-config only the 2 previous cases can happen
+ throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.protocol, ErrorTag.operation_failed, ErrorSeverity.error);
+ }
+ }
+ }
+
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ private NormalizedNode readStoredNode(final LogicalDatastoreType logicalDatastoreType, final YangInstanceIdentifier path) throws NetconfDocumentedException{
+ final DOMDataReadWriteTransaction rwTx = transactionProvider.getOrCreateTransaction();
+ final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readFuture = rwTx.read(logicalDatastoreType, path);
+ try {
+ if (readFuture.checkedGet().isPresent()) {
+ final NormalizedNode node = readFuture.checkedGet().get();
+ return node;
+ } else {
+ LOG.warn("Unable to read node : {} from {} datastore", path, logicalDatastoreType);
+ }
+ } catch (final ReadFailedException e) {
+ //only log this since DataOperations.modify will handle throwing an exception or writing the node.
+ LOG.warn("Unable to read stored data: {}", path, e);
+ }
+
+ //we can return null here since DataOperations.modify handles null as input
+ return null;
+ }
+
+ private Optional<DataSchemaNode> getSchemaNodeFromNamespace(final String namespace, final XmlElement element){
+ Optional<DataSchemaNode> dataSchemaNode = Optional.absent();
+ try {
+ //returns module with newest revision since findModuleByNamespace returns a set of modules and we only need the newest one
+ final Module module = schemaContext.getCurrentContext().findModuleByNamespaceAndRevision(new URI(namespace), null);
+ dataSchemaNode = Optional.of(module.getDataChildByName(element.getName()));
+ } catch (URISyntaxException e) {
+ LOG.debug("Unable to create URI for namespace : {}", namespace);
+ }
+
+ return dataSchemaNode;
+ }
+
+ private Optional<NormalizedNode<?, ?>> modifyNode(final DataSchemaNode schemaNode, final XmlElement element, final NormalizedNode storedNode) throws DataModificationException{
+ if (schemaNode instanceof ContainerSchemaNode) {
+ final ContainerNode modifiedNode =
+ DomToNormalizedNodeParserFactory
+ .getInstance(DomUtils.defaultValueCodecProvider())
+ .getContainerNodeParser()
+ .parse(Collections.singletonList(element.getDomElement()), (ContainerSchemaNode) schemaNode);
+
+ final Optional<ContainerNode> oNode = DataOperations.modify((ContainerSchemaNode) schemaNode, (ContainerNode) storedNode, modifiedNode);
+ if (!oNode.isPresent()) {
+ return Optional.absent();
+ }
+
+ final NormalizedNode<?,?> node = oNode.get();
+ return Optional.<NormalizedNode<?,?>>of(node);
+ } else if (schemaNode instanceof ListSchemaNode) {
+ final MapNode modifiedNode =
+ DomToNormalizedNodeParserFactory
+ .getInstance(DomUtils.defaultValueCodecProvider())
+ .getMapNodeParser()
+ .parse(Collections.singletonList(element.getDomElement()), (ListSchemaNode) schemaNode);
+
+ final Optional<MapNode> oNode = DataOperations.modify((ListSchemaNode) schemaNode, (MapNode) storedNode, modifiedNode);
+ if (!oNode.isPresent()) {
+ return Optional.absent();
+ }
+
+ final NormalizedNode<?, ?> node = oNode.get();
+ return Optional.<NormalizedNode<?,?>>of(node);
+ } else {
+ //this should never happen since edit-config on any other node type should not be possible nor makes sense
+ LOG.debug("DataNode from module is not ContainerSchemaNode nor ListSchemaNode, aborting..");
+ return Optional.absent();
+ }
+
+ }
+
+ private XmlElement getConfigElement(final XmlElement operationElement) throws NetconfDocumentedException{
+ final Optional<XmlElement> configChildNode = operationElement.getOnlyChildElementOptionally(CONFIG_KEY);
+ if (!configChildNode.isPresent()) {
+ throw new NetconfDocumentedException("Can't get child element with name: " + CONFIG_KEY,
+ ErrorType.application,
+ ErrorTag.unknown_element,
+ ErrorSeverity.error);
+ }
+
+ return configChildNode.get();
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class Lock extends AbstractLastNetconfOperation{
+
+ private static final Logger LOG = LoggerFactory.getLogger(Lock.class);
+
+ private static final String OPERATION_NAME = "lock";
+ private static final String TARGET_KEY = "target";
+
+ public Lock(final String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final Datastore targetDatastore = extractTargetParameter(operationElement);
+ if (targetDatastore == Datastore.candidate) {
+ LOG.debug("Locking candidate datastore on session: {}", getNetconfSessionIdForReporting());
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ throw new NetconfDocumentedException("Unable to lock " + targetDatastore + " datastore", NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_not_supported, NetconfDocumentedException.ErrorSeverity.error);
+ }
+
+ static Datastore extractTargetParameter(final XmlElement operationElement) throws NetconfDocumentedException {
+ final XmlElement targetChildNode;
+ try {
+ final XmlElement targetElement = operationElement.getOnlyChildElementWithSameNamespace(TARGET_KEY);
+ targetChildNode = targetElement.getOnlyChildElementWithSameNamespace();
+ } catch (final MissingNameSpaceException | UnexpectedNamespaceException e) {
+ LOG.trace("Can't get only child element with same namespace", e);
+ throw NetconfDocumentedException.wrap(e);
+ }
+
+ return Datastore.valueOf(targetChildNode.getName());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class Unlock extends AbstractLastNetconfOperation{
+
+ private static final Logger LOG = LoggerFactory.getLogger(Unlock.class);
+
+ private static final String OPERATION_NAME = "unlock";
+ private static final String TARGET_KEY = "target";
+
+ public Unlock(final String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final Datastore targetDatastore = Lock.extractTargetParameter(operationElement);
+ if (targetDatastore == Datastore.candidate) {
+ LOG.debug("Unlocking candidate datastore on session: {}", getNetconfSessionIdForReporting());
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ throw new NetconfDocumentedException("Unable to unlock " + targetDatastore + " datastore", NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_not_supported, NetconfDocumentedException.ErrorSeverity.error);
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops.get;
+
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+
+public abstract class AbstractGet extends AbstractLastNetconfOperation {
+
+ protected static final YangInstanceIdentifier ROOT = YangInstanceIdentifier.builder().build();
+
+ protected final CurrentSchemaContext schemaContext;
+
+
+ public AbstractGet(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext) {
+ super(netconfSessionIdForReporting);
+ this.schemaContext = schemaContext;
+ }
+
+ private static final XMLOutputFactory XML_OUTPUT_FACTORY;
+
+ static {
+ XML_OUTPUT_FACTORY = XMLOutputFactory.newFactory();
+ XML_OUTPUT_FACTORY.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
+ }
+
+ protected Node transformNormalizedNode(final Document document, final NormalizedNode<?, ?> data, final YangInstanceIdentifier dataRoot) {
+// boolean isDataRoot = true;
+
+ final DOMResult result = new DOMResult(document.createElement(XmlNetconfConstants.DATA_KEY));
+
+ final XMLStreamWriter xmlWriter = getXmlStreamWriter(result);
+
+ final NormalizedNodeStreamWriter nnStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(xmlWriter,
+ schemaContext.getCurrentContext(), getSchemaPath(dataRoot));
+
+ final NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(nnStreamWriter);
+
+// if (isDataRoot) {
+ writeRootElement(xmlWriter, nnWriter, (ContainerNode) data);
+// } else {
+// if (data instanceof MapEntryNode) {
+// // Restconf allows returning one list item. We need to wrap it
+// // in map node in order to serialize it properly
+// data = ImmutableNodes.mapNodeBuilder(data.getNodeType()).addChild((MapEntryNode) data).build();
+// }
+// nnWriter.write(data);
+// nnWriter.flush();
+// }
+ return result.getNode();
+ }
+
+ private XMLStreamWriter getXmlStreamWriter(final DOMResult result) {
+ try {
+ return XML_OUTPUT_FACTORY.createXMLStreamWriter(result);
+ } catch (final XMLStreamException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static final Function<PathArgument, QName> PATH_ARG_TO_QNAME = new Function<YangInstanceIdentifier.PathArgument, QName>() {
+ @Override
+ public QName apply(final YangInstanceIdentifier.PathArgument input) {
+ return input.getNodeType();
+ }
+ };
+
+ private SchemaPath getSchemaPath(final YangInstanceIdentifier dataRoot) {
+ return SchemaPath.create(Iterables.transform(dataRoot.getPathArguments(), PATH_ARG_TO_QNAME), dataRoot.equals(ROOT));
+ }
+
+ // TODO this code is located in Restconf already
+ private void writeRootElement(final XMLStreamWriter xmlWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) {
+ try {
+ final QName name = SchemaContext.NAME;
+ for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
+ nnWriter.write(child);
+ }
+ nnWriter.flush();
+ xmlWriter.flush();
+ } catch (XMLStreamException | IOException e) {
+ Throwables.propagate(e);
+ }
+ }
+
+ protected static final class GetConfigExecution {
+ private final Optional<Datastore> datastore;
+
+ public GetConfigExecution(final Optional<Datastore> datastore) {
+ this.datastore = datastore;
+ }
+
+ public Optional<Datastore> getDatastore() {
+ return datastore;
+ }
+
+ static GetConfigExecution fromXml(final XmlElement xml, final String operationName) throws NetconfDocumentedException {
+ try {
+ validateInputRpc(xml, operationName);
+ } catch (final NetconfDocumentedException e) {
+ throw new NetconfDocumentedException("Incorrect RPC: " + e.getMessage(), e.getErrorType(), e.getErrorTag(), e.getErrorSeverity(), e.getErrorInfo());
+ }
+
+ final Optional<Datastore> sourceDatastore;
+ try {
+ sourceDatastore = parseSource(xml);
+ } catch (final NetconfDocumentedException e) {
+ throw new NetconfDocumentedException("Get-config source attribute error: " + e.getMessage(), e.getErrorType(), e.getErrorTag(), e.getErrorSeverity(), e.getErrorInfo());
+ }
+
+ // Add filter
+
+ return new GetConfigExecution(sourceDatastore);
+ }
+
+ private static Optional<Datastore> parseSource(final XmlElement xml) throws NetconfDocumentedException {
+ final Optional<XmlElement> sourceElement = xml.getOnlyChildElementOptionally(XmlNetconfConstants.SOURCE_KEY,
+ XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
+
+ return sourceElement.isPresent() ?
+ Optional.of(Datastore.valueOf(sourceElement.get().getOnlyChildElement().getName())) : Optional.<Datastore>absent();
+ }
+
+ private static void validateInputRpc(final XmlElement xml, String operationName) throws NetconfDocumentedException{
+ xml.checkName(operationName);
+ xml.checkNamespace(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops.get;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class Get extends AbstractGet {
+
+ private static final Logger LOG = LoggerFactory.getLogger(Get.class);
+
+ private static final String OPERATION_NAME = "get";
+
+ private final TransactionProvider transactionProvider;
+
+ public Get(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final TransactionProvider transactionProvider) {
+ super(netconfSessionIdForReporting, schemaContext);
+ this.transactionProvider = transactionProvider;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ GetConfigExecution getConfigExecution = null;
+ try {
+ getConfigExecution = GetConfigExecution.fromXml(operationElement, OPERATION_NAME);
+
+ } catch (final NetconfDocumentedException e) {
+ LOG.warn("Get request processing failed on session: {}", getNetconfSessionIdForReporting(), e);
+ throw e;
+ }
+
+ final YangInstanceIdentifier dataRoot = ROOT;
+ DOMDataReadWriteTransaction rwTx = getTransaction(Datastore.running);
+ try {
+ final Optional<NormalizedNode<?, ?>> normalizedNodeOptional = rwTx.read(LogicalDatastoreType.OPERATIONAL, dataRoot).checkedGet();
+ transactionProvider.abortRunningTransaction(rwTx);
+ return (Element) transformNormalizedNode(document, normalizedNodeOptional.get(), dataRoot);
+ } catch (ReadFailedException e) {
+ LOG.warn("Unable to read data: {}", dataRoot, e);
+ throw new IllegalStateException("Unable to read data " + dataRoot, e);
+ }
+ }
+
+ private DOMDataReadWriteTransaction getTransaction(Datastore datastore) throws NetconfDocumentedException{
+ if (datastore == Datastore.candidate) {
+ return transactionProvider.getOrCreateTransaction();
+ } else if (datastore == Datastore.running) {
+ return transactionProvider.createRunningTransaction();
+ }
+ throw new NetconfDocumentedException("Incorrect Datastore: ", ErrorType.protocol, ErrorTag.bad_element, ErrorSeverity.error);
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops.get;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class GetConfig extends AbstractGet {
+
+ private static final Logger LOG = LoggerFactory.getLogger(GetConfig.class);
+
+ private static final String OPERATION_NAME = "get-config";
+
+ private final TransactionProvider transactionProvider;
+
+ public GetConfig(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final TransactionProvider transactionProvider) {
+ super(netconfSessionIdForReporting, schemaContext);
+ this.transactionProvider = transactionProvider;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ GetConfigExecution getConfigExecution = null;
+ try {
+ getConfigExecution = GetConfigExecution.fromXml(operationElement, OPERATION_NAME);
+
+ } catch (final NetconfDocumentedException e) {
+ LOG.warn("Get request processing failed on session: {}", getNetconfSessionIdForReporting(), e);
+ throw e;
+ }
+
+ final YangInstanceIdentifier dataRoot = ROOT;
+ // Proper exception should be thrown
+ Preconditions.checkState(getConfigExecution.getDatastore().isPresent(), "Source element missing from request");
+
+ DOMDataReadWriteTransaction rwTx = getTransaction(getConfigExecution.getDatastore().get());
+ try {
+ final Optional<NormalizedNode<?, ?>> normalizedNodeOptional = rwTx.read(LogicalDatastoreType.CONFIGURATION, dataRoot).checkedGet();
+ if (getConfigExecution.getDatastore().get() == Datastore.running) {
+ transactionProvider.abortRunningTransaction(rwTx);
+ rwTx = null;
+ }
+ return (Element) transformNormalizedNode(document, normalizedNodeOptional.get(), dataRoot);
+ } catch (ReadFailedException e) {
+ LOG.warn("Unable to read data: {}", dataRoot, e);
+ throw new IllegalStateException("Unable to read data " + dataRoot, e);
+ }
+ }
+
+ private DOMDataReadWriteTransaction getTransaction(Datastore datastore) throws NetconfDocumentedException{
+ if (datastore == Datastore.candidate) {
+ return transactionProvider.getOrCreateTransaction();
+ } else if (datastore == Datastore.running) {
+ return transactionProvider.createRunningTransaction();
+ }
+ throw new NetconfDocumentedException("Incorrect Datastore: ", ErrorType.protocol, ErrorTag.bad_element, ErrorSeverity.error);
+ }
+
+ @Override
+ protected String getOperationName() {
+ return OPERATION_NAME;
+ }
+
+}
--- /dev/null
+module netconf-mdsal-mapper {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper";
+ prefix "nmm";
+
+ import netconf-northbound-mapper { prefix nnm; revision-date 2015-01-14; }
+ import opendaylight-md-sal-dom { prefix md-sal-dom; revision-date 2013-10-28; }
+ import config { prefix config; revision-date 2013-04-05; }
+
+ organization "Cisco Systems, Inc.";
+
+ description
+ "This module contains the base YANG definitions for
+ an MD-SAL mapper implementation";
+
+ revision "2015-01-14" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-mdsal-mapper {
+ base config:module-type;
+ config:provided-service nnm:netconf-northbound-mapper;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-mdsal-mapper {
+ when "/config:modules/config:module/config:type = 'netconf-mdsal-mapper'";
+
+ container root-schema-service {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity md-sal-dom:schema-service;
+ }
+ }
+ }
+
+ container dom-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity md-sal-dom:dom-async-data-broker;
+ }
+ }
+ }
+
+ container mapper-aggregator {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity nnm:netconf-mapper-registry;
+ }
+ }
+ }
+ }
+ }
+
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>mdsal-netconf-monitoring</artifactId>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+
+ <dependencies>
+ <!-- compile dependencies -->
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-mapping-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-monitoring</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.netconf.mdsal.monitoring;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfState;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class MonitoringToMdsalWriter implements AutoCloseable, NetconfMonitoringService.MonitoringListener, BindingAwareProvider {
+
+ private static final Logger LOG = LoggerFactory.getLogger(MonitoringToMdsalWriter.class);
+
+ private final NetconfMonitoringService serverMonitoringDependency;
+ private DataBroker dataBroker;
+
+ public MonitoringToMdsalWriter(final NetconfMonitoringService serverMonitoringDependency) {
+ this.serverMonitoringDependency = serverMonitoringDependency;
+ }
+
+ @Override
+ public void close() {
+ final WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
+ tx.delete(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(NetconfState.class));
+ final CheckedFuture<Void, TransactionCommitFailedException> submit = tx.submit();
+
+ Futures.addCallback(submit, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void aVoid) {
+ LOG.debug("Netconf state cleared successfully");
+ }
+
+ @Override
+ public void onFailure(final Throwable throwable) {
+ LOG.warn("Unable to clear netconf state", throwable);
+ }
+ });
+ }
+
+ @Override
+ public void onStateChanged(final NetconfState state) {
+ Preconditions.checkState(dataBroker != null);
+ final WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(NetconfState.class), state);
+ // FIXME first attempt (right after we register to binding broker) always fails
+ // Is it due to the fact that we are writing from the onSessionInitiated callback ?
+ final CheckedFuture<Void, TransactionCommitFailedException> submit = tx.submit();
+
+ Futures.addCallback(submit, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void aVoid) {
+ LOG.debug("Netconf state updated successfully");
+ }
+
+ @Override
+ public void onFailure(final Throwable throwable) {
+ LOG.warn("Unable to update netconf state", throwable);
+ }
+ });
+ }
+
+ @Override
+ public void onSessionInitiated(final BindingAwareBroker.ProviderContext providerContext) {
+ dataBroker = providerContext.getSALService(DataBroker.class);
+ serverMonitoringDependency.registerListener(this);
+ }
+}
--- /dev/null
+package org.opendaylight.controller.config.yang.netconf.mdsal.monitoring;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.monitoring.GetSchema;
+import org.opendaylight.controller.netconf.monitoring.MonitoringConstants;
+
+public class NetconfMdsalMonitoringMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.monitoring.AbstractNetconfMdsalMonitoringMapperModule {
+ public NetconfMdsalMonitoringMapperModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfMdsalMonitoringMapperModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, final org.opendaylight.controller.config.yang.netconf.mdsal.monitoring.NetconfMdsalMonitoringMapperModule oldModule, final java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ final NetconfMonitoringService serverMonitoringDependency = getServerMonitoringDependency();
+
+ final MonitoringToMdsalWriter monitoringToMdsalWriter = new MonitoringToMdsalWriter(serverMonitoringDependency);
+ getBindingAwareBrokerDependency().registerProvider(monitoringToMdsalWriter);
+
+ final MdSalMonitoringMapperFactory mdSalMonitoringMapperFactory = new MdSalMonitoringMapperFactory(new MdsalMonitoringMapper(serverMonitoringDependency)) {
+ @Override
+ public void close() {
+ super.close();
+ monitoringToMdsalWriter.close();
+ getAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
+ }
+ };
+
+ getAggregatorDependency().onAddNetconfOperationServiceFactory(mdSalMonitoringMapperFactory);
+ return mdSalMonitoringMapperFactory;
+
+ }
+
+ // FIXME almost exactly same code as in netconf-monitoring, refactor
+ private static class MdSalMonitoringMapperFactory implements NetconfOperationServiceFactory, AutoCloseable {
+
+ private final NetconfOperationService operationService;
+
+ private static final Set<Capability> CAPABILITIES = Sets.<Capability>newHashSet(new Capability() {
+
+ @Override
+ public String getCapabilityUri() {
+ return MonitoringConstants.URI;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.of(MonitoringConstants.NAMESPACE);
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.of(MonitoringConstants.MODULE_NAME);
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.of(MonitoringConstants.MODULE_REVISION);
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Collection<String> getLocation() {
+ return Collections.emptyList();
+ }
+ });
+
+ private static final AutoCloseable AUTO_CLOSEABLE = new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ // NOOP
+ }
+ };
+
+ private final List<CapabilityListener> listeners = new ArrayList<>();
+
+ public MdSalMonitoringMapperFactory(final NetconfOperationService operationService) {
+ this.operationService = operationService;
+ }
+
+ @Override
+ public NetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return operationService;
+ }
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return CAPABILITIES;
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ listener.onCapabilitiesAdded(getCapabilities());
+ listeners.add(listener);
+ return AUTO_CLOSEABLE;
+ }
+
+ @Override
+ public void close() {
+ for (final CapabilityListener listener : listeners) {
+ listener.onCapabilitiesRemoved(getCapabilities());
+ }
+ }
+ }
+
+
+ private static class MdsalMonitoringMapper implements NetconfOperationService {
+
+ private final NetconfMonitoringService serverMonitoringDependency;
+
+ public MdsalMonitoringMapper(final NetconfMonitoringService serverMonitoringDependency) {
+ this.serverMonitoringDependency = serverMonitoringDependency;
+ }
+
+ @Override
+ public Set<NetconfOperation> getNetconfOperations() {
+ return Collections.<NetconfOperation>singleton(new GetSchema(serverMonitoringDependency));
+ }
+
+ @Override
+ public void close() {
+ // NOOP
+ }
+ }
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-mdsal-monitoring yang module local name: netconf-mdsal-monitoring-mapper
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Feb 18 10:22:17 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.netconf.mdsal.monitoring;
+public class NetconfMdsalMonitoringMapperModuleFactory extends org.opendaylight.controller.config.yang.netconf.mdsal.monitoring.AbstractNetconfMdsalMonitoringMapperModuleFactory {
+
+}
--- /dev/null
+/*
+* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Eclipse Public License v1.0 which accompanies this distribution,
+* and is available at http://www.eclipse.org/legal/epl-v10.html
+*/
+package org.opendaylight.controller.netconf.monitoring;
+
+import com.google.common.collect.Sets;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.monitoring.Get;
+import org.opendaylight.controller.netconf.monitoring.GetSchema;
+
+public class NetconfMonitoringOperationService implements NetconfOperationService {
+
+ private final NetconfMonitoringService monitor;
+
+ public NetconfMonitoringOperationService(final NetconfMonitoringService monitor) {
+ this.monitor = monitor;
+ }
+
+ @Override
+ public Set<NetconfOperation> getNetconfOperations() {
+ return Sets.<NetconfOperation>newHashSet(new Get(monitor), new GetSchema(monitor));
+ }
+
+ @Override
+ public void close() {
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.monitoring;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+
+/**
+* Created by mmarsale on 18.2.2015.
+*/
+public class NetconfMonitoringOperationServiceFactory implements NetconfOperationServiceFactory, AutoCloseable {
+
+ private final NetconfMonitoringOperationService operationService;
+
+ private static final Set<Capability> CAPABILITIES = Sets.<Capability>newHashSet(new Capability() {
+
+ @Override
+ public String getCapabilityUri() {
+ return MonitoringConstants.URI;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.of(MonitoringConstants.NAMESPACE);
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.of(MonitoringConstants.MODULE_NAME);
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.of(MonitoringConstants.MODULE_REVISION);
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Collection<String> getLocation() {
+ return Collections.emptyList();
+ }
+ });
+
+ private static final AutoCloseable AUTO_CLOSEABLE = new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ // NOOP
+ }
+ };
+
+ private final List<CapabilityListener> listeners = new ArrayList<>();
+
+ public NetconfMonitoringOperationServiceFactory(final NetconfMonitoringOperationService operationService) {
+ this.operationService = operationService;
+ }
+
+ @Override
+ public NetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return operationService;
+ }
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return CAPABILITIES;
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ listener.onCapabilitiesAdded(getCapabilities());
+ listeners.add(listener);
+ return AUTO_CLOSEABLE;
+ }
+
+ @Override
+ public void close() {
+ for (final CapabilityListener listener : listeners) {
+ listener.onCapabilitiesRemoved(getCapabilities());
+ }
+ }
+}
--- /dev/null
+module netconf-mdsal-monitoring {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring";
+ prefix "nmmonitor";
+
+ import netconf-northbound-mapper { prefix nnm; revision-date 2015-01-14; }
+ import opendaylight-md-sal-binding {prefix md-sal-binding; revision-date 2013-10-28;}
+ import netconf-northbound { prefix nn; revision-date 2015-01-14; }
+ import config { prefix config; revision-date 2013-04-05; }
+
+ organization "Cisco Systems, Inc.";
+
+ description
+ "This module contains the base YANG definitions for
+ an MD-SAL monitoring mapper implementation";
+
+ revision "2015-02-18" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-mdsal-monitoring-mapper {
+ base config:module-type;
+ config:provided-service nnm:netconf-northbound-mapper;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-mdsal-monitoring-mapper {
+ when "/config:modules/config:module/config:type = 'netconf-mdsal-monitoring-mapper'";
+
+ container server-monitoring {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity nn:netconf-server-monitoring;
+ }
+ }
+ }
+
+ container aggregator {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity nnm:netconf-mapper-registry;
+ }
+ }
+ }
+
+ container binding-aware-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity md-sal-binding:binding-broker-osgi-registry;
+ }
+ }
+ }
+ }
+ }
+
+}
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <configuration>
- <instructions>
- <Export-Package>org.opendaylight.controller.netconf.api,
- org.opendaylight.controller.netconf.api.jmx,
- org.opendaylight.controller.netconf.api.xml,
- org.opendaylight.controller.netconf.api.monitoring,</Export-Package>
- </instructions>
- </configuration>
</plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
</plugins>
</build>
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.mapping.api;
+package org.opendaylight.controller.netconf.api;
import com.google.common.base.Optional;
import java.util.Collection;
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.api;
+
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+
+public interface NetconfServerDispatcher {
+
+ ChannelFuture createServer(InetSocketAddress address);
+
+ ChannelFuture createLocalServer(LocalAddress address);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.api.monitoring;
+
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+
+public interface CapabilityListener {
+
+ void onCapabilitiesAdded(Set<Capability> addedCaps);
+
+ void onCapabilitiesRemoved(Set<Capability> removedCaps);
+}
*/
package org.opendaylight.controller.netconf.api.monitoring;
+import com.google.common.base.Optional;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfState;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Capabilities;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Schemas;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Sessions;
-public interface NetconfMonitoringService {
+public interface NetconfMonitoringService extends CapabilityListener, SessionListener {
Sessions getSessions();
Schemas getSchemas();
+
+ String getSchemaForCapability(String moduleName, Optional<String> revision);
+
+ Capabilities getCapabilities();
+
+ /**
+ * Allows push based state information transfer. After the listener is registered, current state is pushed to the listener.
+ */
+ AutoCloseable registerListener(MonitoringListener listener);
+
+ interface MonitoringListener {
+
+ // TODO more granular updates would make sense
+ void onStateChanged(NetconfState state);
+ }
}
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.impl.osgi;
-import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
-
-public interface SessionMonitoringService {
+package org.opendaylight.controller.netconf.api.monitoring;
+/**
+ * Created by mmarsale on 13.2.2015.
+ */
+public interface SessionListener {
void onSessionUp(NetconfManagementSession session);
void onSessionDown(NetconfManagementSession session);
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.api.util;
+
+public final class NetconfConstants {
+ /*
+ * TODO define marker interface in mapping-api that the serviceFactories in cofing subsystem
+ * will implement so we can check for services with instanceof instead of constants
+ */
+ public static final String SERVICE_NAME = "name";
+ public static final String CONFIG_NETCONF_CONNECTOR = "config-netconf-connector";
+ public static final String NETCONF_MONITORING = "ietf-netconf-monitoring";
+}
--- /dev/null
+module netconf-northbound {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound";
+ prefix "nn";
+
+ import config { prefix config; revision-date 2013-04-05; }
+
+ description
+ "This module contains the base YANG definitions for
+ netconf northbound server API";
+
+ revision "2015-01-14" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-server-dispatcher {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.netconf.api.NetconfServerDispatcher";
+ }
+
+ identity netconf-server-monitoring {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService";
+ }
+
+}
\ No newline at end of file
<artifactId>netconf-connector-config</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-mdsal-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>mdsal-netconf-connector</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-impl</artifactId>
<artifactId>netconf-monitoring</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>mdsal-netconf-monitoring</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-netty-util</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-notifications-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-client</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-parser-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ </dependency>
</dependencies>
<build>
}
case SSH: {
writeStatus(consoleIO, "Connecting to %s via SSH. Please wait.", cliArgs.getAddress());
- connectionManager.connectBlocking(cliArgs.getAddress(), getClientSshConfig(cliArgs));
+ connectionManager.connectBlocking(cliArgs.getAddress(), cliArgs.getServerAddress(), getClientSshConfig(cliArgs));
break;
}
case NONE: {/* Do not connect initially */
import org.opendaylight.controller.netconf.cli.io.ConsoleContext;
import org.opendaylight.controller.netconf.cli.io.ConsoleIO;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
* Implementation of RemoteDeviceHandler. Integrates cli with
* sal-netconf-connector.
*/
-public class NetconfDeviceConnectionHandler implements RemoteDeviceHandler<NetconfSessionCapabilities> {
+public class NetconfDeviceConnectionHandler implements RemoteDeviceHandler<NetconfSessionPreferences> {
private final CommandDispatcher commandDispatcher;
private final SchemaContextRegistry schemaContextRegistry;
@Override
public synchronized void onDeviceConnected(final SchemaContext context,
- final NetconfSessionCapabilities capabilities, final RpcImplementation rpcImplementation) {
+ final NetconfSessionPreferences preferences, final RpcImplementation rpcImplementation) {
console.enterRootContext(new ConsoleContext() {
@Override
up = false;
}
+ @Override
+ public void onDeviceFailed(Throwable throwable) {
+ // FIXME
+ }
+
@Override
public void onNotification(final CompositeNode compositeNode) {
// FIXME
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
+import java.net.InetSocketAddress;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
// TODO we receive configBuilder in order to add SessionListener, Session
// Listener should not be part of config
- public synchronized void connect(final String name, final NetconfClientConfigurationBuilder configBuilder) {
+ public synchronized void connect(final String name, final InetSocketAddress address, final NetconfClientConfigurationBuilder configBuilder) {
// TODO change IllegalState exceptions to custom ConnectionException
Preconditions.checkState(listener == null, "Already connected");
- final RemoteDeviceId deviceId = new RemoteDeviceId(name);
+ final RemoteDeviceId deviceId = new RemoteDeviceId(name, address);
handler = new NetconfDeviceConnectionHandler(commandDispatcher, schemaContextRegistry,
console, name);
/**
* Blocks thread until connection is fully established
*/
- public synchronized Set<String> connectBlocking(final String name, final NetconfClientConfigurationBuilder configBuilder) {
- this.connect(name, configBuilder);
+ public synchronized Set<String> connectBlocking(final String name, final InetSocketAddress address, final NetconfClientConfigurationBuilder configBuilder) {
+ this.connect(name, address, configBuilder);
synchronized (handler) {
while (handler.isUp() == false) {
try {
@Override
public Output invoke(final Input inputArgs) {
final NetconfClientConfigurationBuilder config = getConfig(inputArgs);
- return invoke(config, getArgument(inputArgs, "address-name", String.class));
+ return invoke(config, getArgument(inputArgs, "address-name", String.class), inputArgs);
}
- private Output invoke(final NetconfClientConfigurationBuilder config, final String addressName) {
- final Set<String> remoteCmds = connectManager.connectBlocking(addressName, config);
+ private Output invoke(final NetconfClientConfigurationBuilder config, final String addressName, final Input inputArgs) {
+ final Set<String> remoteCmds = connectManager.connectBlocking(addressName, getAdress(inputArgs), config);
final ArrayList<Node<?>> output = Lists.newArrayList();
output.add(new SimpleNodeTOImpl<>(QName.create(getCommandId(), "status"), null, "Connection initiated"));
.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
}
+ private InetSocketAddress getAdress(final Input inputArgs) {
+ final String address = getArgument(inputArgs, "address-name", String.class);
+ final InetSocketAddress inetAddress;
+ try {
+ inetAddress = new InetSocketAddress(InetAddress.getByName(address), getArgument(inputArgs, "address-port", Integer.class));
+ } catch (final UnknownHostException e) {
+ throw new IllegalArgumentException("Unable to use address: " + address, e);
+ }
+ return inetAddress;
+ }
+
private <T> Optional<T> getArgumentOpt(final Input inputArgs, final String argName, final Class<T> type) {
final QName argQName = QName.create(getCommandId(), argName);
final Node<?> argumentNode = inputArgs.getArg(argName);
<name>global-netconf-processing-executor-threadfactory</name>
</threadFactory>
</module>
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:scheduled">prefix:threadpool-scheduled</type>
+ <name>global-netconf-ssh-scheduled-executor</name>
+ <max-thread-count xmlns="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:scheduled">8</max-thread-count>
+
+ <threadFactory xmlns="urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:scheduled">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool">prefix:threadfactory</type>
+ <name>global-netconf-processing-executor-threadfactory</name>
+ </threadFactory>
+ </module>
</modules>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<name>global-netconf-processing-executor</name>
<provider>/modules/module[type='threadpool-flexible'][name='global-netconf-processing-executor']</provider>
</instance>
+ <instance>
+ <name>global-netconf-ssh-scheduled-executor</name>
+ <provider>/modules/module[type='threadpool-scheduled'][name='global-netconf-ssh-scheduled-executor']</provider>
+ </instance>
</service>
</services>
<capability>urn:opendaylight:params:xml:ns:yang:controller:config:netconf:client:dispatcher?module=odl-netconfig-client-cfg&revision=2014-04-08</capability>
<capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl?module=threadpool-impl&revision=2013-04-05</capability>
<capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:flexible?module=threadpool-impl-flexible&revision=2013-12-01</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:scheduled?module=threadpool-impl-scheduled&revision=2013-12-01</capability>
</required-capabilities>
</snapshot>
<username xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</username>
<password xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</password>
<tcp-only xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">false</tcp-only>
+ <reconnect-on-changed-schema>true</reconnect-on-changed-schema>
<event-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-event-executor</type>
<name>global-event-executor</name>
<name>global-netconf-processing-executor</name>
</processing-executor>
</module>
- </modules>
+ </modules>
</data>
</configuration>
<required-capabilities>
<configuration>
<instructions>
<Bundle-Activator>org.opendaylight.controller.netconf.impl.osgi.NetconfImplActivator</Bundle-Activator>
+ <Export-Package>org.opendaylight.controller.netconf.impl.*</Export-Package>
</instructions>
</configuration>
</plugin>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
</plugins>
</build>
--- /dev/null
+package org.opendaylight.controller.config.yang.config.netconf.northbound.impl;
+
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
+
+public class NetconfMapperAggregatorModule extends org.opendaylight.controller.config.yang.config.netconf.northbound.impl.AbstractNetconfMapperAggregatorModule {
+ public NetconfMapperAggregatorModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfMapperAggregatorModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, final org.opendaylight.controller.config.yang.config.netconf.northbound.impl.NetconfMapperAggregatorModule oldModule, final java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {}
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ return new AggregatedNetconfOperationServiceFactory();
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-northbound-impl yang module local name: netconf-mapper-aggregator
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Feb 17 17:24:19 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.netconf.northbound.impl;
+public class NetconfMapperAggregatorModuleFactory extends org.opendaylight.controller.config.yang.config.netconf.northbound.impl.AbstractNetconfMapperAggregatorModuleFactory {
+
+}
--- /dev/null
+package org.opendaylight.controller.config.yang.config.netconf.northbound.impl;
+
+import org.opendaylight.controller.config.api.JmxAttributeValidationException;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.impl.CommitNotifier;
+import org.opendaylight.controller.netconf.impl.NetconfServerDispatcherImpl;
+import org.opendaylight.controller.netconf.impl.NetconfServerSessionNegotiatorFactory;
+import org.opendaylight.controller.netconf.impl.SessionIdProvider;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+
+public class NetconfServerDispatcherModule extends org.opendaylight.controller.config.yang.config.netconf.northbound.impl.AbstractNetconfServerDispatcherModule {
+ public NetconfServerDispatcherModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfServerDispatcherModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.netconf.northbound.impl.NetconfServerDispatcherModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ JmxAttributeValidationException.checkCondition(getConnectionTimeoutMillis() > 0, "Invalid connection timeout", connectionTimeoutMillisJmxAttribute);
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+
+ final AggregatedNetconfOperationServiceFactory aggregatedOpProvider = getAggregatedOpProvider();
+ final NetconfMonitoringService monitoringService = getServerMonitorDependency();
+ final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
+ getTimerDependency(), aggregatedOpProvider, new SessionIdProvider(), getConnectionTimeoutMillis(), CommitNotifier.NoopCommitNotifier.getInstance(), monitoringService);
+ final NetconfServerDispatcherImpl.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcherImpl.ServerChannelInitializer(
+ serverNegotiatorFactory);
+
+ return new NetconfServerDispatcherImpl(serverChannelInitializer, getBossThreadGroupDependency(), getWorkerThreadGroupDependency()) {
+
+ @Override
+ public void close() {
+ // NOOP, close should not be present here, the deprecated method closes injected evet loop groups
+ }
+ };
+
+ }
+
+ private AggregatedNetconfOperationServiceFactory getAggregatedOpProvider() {
+ final AggregatedNetconfOperationServiceFactory netconfOperationProvider = new AggregatedNetconfOperationServiceFactory();
+ for (final NetconfOperationServiceFactory netconfOperationServiceFactory : getMappersDependency()) {
+ netconfOperationProvider.onAddNetconfOperationServiceFactory(netconfOperationServiceFactory);
+ }
+ return netconfOperationProvider;
+ }
+
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-northbound-impl yang module local name: netconf-server-dispatcher-impl
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Thu Feb 12 11:32:29 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.netconf.northbound.impl;
+public class NetconfServerDispatcherModuleFactory extends org.opendaylight.controller.config.yang.config.netconf.northbound.impl.AbstractNetconfServerDispatcherModuleFactory {
+
+}
--- /dev/null
+package org.opendaylight.controller.config.yang.config.netconf.northbound.impl;
+
+import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
+
+public class NetconfServerMonitoringModule extends org.opendaylight.controller.config.yang.config.netconf.northbound.impl.AbstractNetconfServerMonitoringModule {
+ public NetconfServerMonitoringModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfServerMonitoringModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.netconf.northbound.impl.NetconfServerMonitoringModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ return new NetconfMonitoringServiceImpl(getAggregatorDependency());
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-northbound-impl yang module local name: netconf-server-monitoring-impl
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Feb 17 17:24:19 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.netconf.northbound.impl;
+public class NetconfServerMonitoringModuleFactory extends org.opendaylight.controller.config.yang.config.netconf.northbound.impl.AbstractNetconfServerMonitoringModuleFactory {
+
+}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class CapabilityProviderImpl implements CapabilityProvider {
- private final NetconfOperationServiceSnapshot netconfOperationServiceSnapshot;
- private final Set<String> capabilityURIs;
-
- private static final Logger LOG = LoggerFactory.getLogger(CapabilityProviderImpl.class);
-
- public CapabilityProviderImpl(NetconfOperationServiceSnapshot netconfOperationServiceSnapshot) {
- this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
- Map<String, Capability> urisToCapabilitiesInternalMap = getCapabilitiesInternal(netconfOperationServiceSnapshot);
- List<String> capabilityURIs = new ArrayList<>(urisToCapabilitiesInternalMap.keySet());
- Collections.sort(capabilityURIs);
- this.capabilityURIs = Collections.unmodifiableSet(new TreeSet<>(capabilityURIs));
- }
-
- private static Map<String, Capability> getCapabilitiesInternal(
- NetconfOperationServiceSnapshot netconfOperationServiceSnapshot) {
- Map<String, Capability> capabilityMap = Maps.newHashMap();
-
- for (NetconfOperationService netconfOperationService : netconfOperationServiceSnapshot.getServices()) {
- final Set<Capability> caps = netconfOperationService.getCapabilities();
-
- for (Capability cap : caps) {
-
- if(capabilityMap.containsKey(cap.getCapabilityUri())) {
- LOG.debug("Duplicate capability {} from service {}", cap.getCapabilityUri(), netconfOperationService);
- }
-
- capabilityMap.put(cap.getCapabilityUri(), cap);
- }
- }
-
- return capabilityMap;
- }
-
- @Override
- public synchronized String getSchemaForCapability(String moduleName, Optional<String> revision) {
-
- Map<String, Map<String, String>> mappedModulesToRevisionToSchema = Maps.newHashMap();
-
- for (NetconfOperationService netconfOperationService : netconfOperationServiceSnapshot.getServices()) {
- final Set<Capability> caps = netconfOperationService.getCapabilities();
-
- for (Capability cap : caps) {
- if (!cap.getModuleName().isPresent()
- || !cap.getRevision().isPresent()
- || !cap.getCapabilitySchema().isPresent()){
- continue;
- }
-
- final String currentModuleName = cap.getModuleName().get();
- Map<String, String> revisionMap = mappedModulesToRevisionToSchema.get(currentModuleName);
- if (revisionMap == null) {
- revisionMap = Maps.newHashMap();
- mappedModulesToRevisionToSchema.put(currentModuleName, revisionMap);
- }
-
- String currentRevision = cap.getRevision().get();
- revisionMap.put(currentRevision, cap.getCapabilitySchema().get());
- }
- }
-
- Map<String, String> revisionMapRequest = mappedModulesToRevisionToSchema.get(moduleName);
- Preconditions.checkState(revisionMapRequest != null, "Capability for module %s not present, " + ""
- + "available modules : %s", moduleName, capabilityURIs);
-
- if (revision.isPresent()) {
- String schema = revisionMapRequest.get(revision.get());
-
- Preconditions.checkState(schema != null,
- "Capability for module %s:%s not present, available revisions for module: %s", moduleName,
- revision.get(), revisionMapRequest.keySet());
-
- return schema;
- } else {
- Preconditions.checkState(revisionMapRequest.size() == 1,
- "Expected 1 capability for module %s, available revisions : %s", moduleName,
- revisionMapRequest.keySet());
- return revisionMapRequest.values().iterator().next();
- }
- }
-
- @Override
- public synchronized Set<String> getCapabilities() {
- return capabilityURIs;
- }
-
-}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl;
+
+import java.util.Set;
+import org.w3c.dom.Element;
+
+public interface CommitNotifier {
+ void sendCommitNotification(String message, Element cfgSnapshot, Set<String> capabilities);
+
+ public static final class NoopCommitNotifier implements CommitNotifier {
+
+ private static final CommitNotifier INSTANCE = new NoopCommitNotifier();
+
+ private NoopCommitNotifier() {}
+
+ public static CommitNotifier getInstance() {
+ return INSTANCE;
+ }
+
+ @Override
+ public void sendCommitNotification(final String message, final Element cfgSnapshot, final Set<String> capabilities) {
+ // NOOP
+ }
+ }
+}
import org.w3c.dom.Element;
public class DefaultCommitNotificationProducer extends NotificationBroadcasterSupport implements
- DefaultCommitOperationMXBean, AutoCloseable {
+ DefaultCommitOperationMXBean, AutoCloseable, CommitNotifier {
private static final Logger LOG = LoggerFactory.getLogger(DefaultCommitNotificationProducer.class);
}
}
+ @Override
public void sendCommitNotification(String message, Element cfgSnapshot, Set<String> capabilities) {
CommitJMXNotification notif = NetconfJMXNotification.afterCommit(this, message, cfgSnapshot, capabilities);
LOG.debug("Notification about commit {} sent", notif);
package org.opendaylight.controller.netconf.impl;
-import com.google.common.annotations.VisibleForTesting;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.util.concurrent.Promise;
import java.net.InetSocketAddress;
+import org.opendaylight.controller.netconf.api.NetconfServerDispatcher;
import org.opendaylight.controller.netconf.impl.util.DeserializerExceptionHandler;
import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer;
import org.opendaylight.protocol.framework.AbstractDispatcher;
-public class NetconfServerDispatcher extends AbstractDispatcher<NetconfServerSession, NetconfServerSessionListener> {
+public class NetconfServerDispatcherImpl extends AbstractDispatcher<NetconfServerSession, NetconfServerSessionListener> implements NetconfServerDispatcher {
private final ServerChannelInitializer initializer;
- public NetconfServerDispatcher(ServerChannelInitializer serverChannelInitializer, EventLoopGroup bossGroup,
- EventLoopGroup workerGroup) {
+ public NetconfServerDispatcherImpl(ServerChannelInitializer serverChannelInitializer, EventLoopGroup bossGroup,
+ EventLoopGroup workerGroup) {
super(bossGroup, workerGroup);
this.initializer = serverChannelInitializer;
}
- @VisibleForTesting
+ @Override
public ChannelFuture createServer(InetSocketAddress address) {
-
return super.createServer(address, new PipelineInitializer<NetconfServerSession>() {
@Override
public void initializeChannel(final SocketChannel ch, final Promise<NetconfServerSession> promise) {
});
}
+ @Override
public ChannelFuture createLocalServer(LocalAddress address) {
return super.createServer(address, LocalServerChannel.class, new ChannelPipelineInitializer<LocalChannel, NetconfServerSession>() {
@Override
@Override
protected void sessionUp() {
- super.sessionUp();
Preconditions.checkState(loginTime == null, "Session is already up");
this.loginTime = new Date();
+ super.sessionUp();
}
public void onIncommingRpcSuccess() {
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.NetconfSessionListener;
import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import org.opendaylight.controller.netconf.util.messages.SendErrorExceptionUtil;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public class NetconfServerSessionListener implements NetconfSessionListener<NetconfServerSession> {
private static final Logger LOG = LoggerFactory.getLogger(NetconfServerSessionListener.class);
- private final SessionMonitoringService monitoringService;
+ private final NetconfMonitoringService monitoringService;
private final NetconfOperationRouter operationRouter;
private final AutoCloseable onSessionDownCloseable;
- public NetconfServerSessionListener(final NetconfOperationRouter operationRouter, final SessionMonitoringService monitoringService,
+ public NetconfServerSessionListener(final NetconfOperationRouter operationRouter, NetconfMonitoringService monitoringService,
final AutoCloseable onSessionDownCloseable) {
this.operationRouter = operationRouter;
this.monitoringService = monitoringService;
@Override
public void onSessionUp(final NetconfServerSession netconfNetconfServerSession) {
monitoringService.onSessionUp(netconfNetconfServerSession);
+ // FIXME monitoring service should be also notified about all the other changes to netconf session (from ietf-netconf-monitoring point of view)
+ // This means also notifying after every message is processed
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.impl;
-
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouterImpl;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
-import org.opendaylight.protocol.framework.SessionListenerFactory;
-
-public class NetconfServerSessionListenerFactory implements SessionListenerFactory<NetconfServerSessionListener> {
-
- private final DefaultCommitNotificationProducer commitNotifier;
- private final SessionMonitoringService monitor;
- private final NetconfOperationServiceSnapshot netconfOperationServiceSnapshot;
- private final CapabilityProvider capabilityProvider;
-
- public NetconfServerSessionListenerFactory(final DefaultCommitNotificationProducer commitNotifier,
- final SessionMonitoringService monitor,
- final NetconfOperationServiceSnapshot netconfOperationServiceSnapshot,
- final CapabilityProvider capabilityProvider) {
-
- this.commitNotifier = commitNotifier;
- this.monitor = monitor;
- this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
- this.capabilityProvider = capabilityProvider;
- }
-
- @Override
- public NetconfServerSessionListener getSessionListener() {
- NetconfOperationRouter operationRouter = new NetconfOperationRouterImpl(netconfOperationServiceSnapshot, capabilityProvider, commitNotifier);
- return new NetconfServerSessionListener(operationRouter, monitor, netconfOperationServiceSnapshot);
- }
-}
package org.opendaylight.controller.netconf.impl;
-import static org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider.NetconfOperationProviderUtil.getNetconfSessionIdForReporting;
-
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import java.util.Set;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfServerSessionPreferences;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
+import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCommit;
+import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
+import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouterImpl;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
import org.opendaylight.protocol.framework.SessionListenerFactory;
import org.opendaylight.protocol.framework.SessionNegotiator;
private final Timer timer;
private final SessionIdProvider idProvider;
- private final NetconfOperationProvider netconfOperationProvider;
+ private final NetconfOperationServiceFactory aggregatedOpService;
private final long connectionTimeoutMillis;
- private final DefaultCommitNotificationProducer commitNotificationProducer;
- private final SessionMonitoringService monitoringService;
+ private final CommitNotifier commitNotificationProducer;
+ private final NetconfMonitoringService monitoringService;
private static final Logger LOG = LoggerFactory.getLogger(NetconfServerSessionNegotiatorFactory.class);
private final Set<String> baseCapabilities;
// TODO too many params, refactor
- public NetconfServerSessionNegotiatorFactory(Timer timer, NetconfOperationProvider netconfOperationProvider,
- SessionIdProvider idProvider, long connectionTimeoutMillis,
- DefaultCommitNotificationProducer commitNot,
- SessionMonitoringService monitoringService) {
+ public NetconfServerSessionNegotiatorFactory(final Timer timer, final NetconfOperationServiceFactory netconfOperationProvider,
+ final SessionIdProvider idProvider, final long connectionTimeoutMillis,
+ final CommitNotifier commitNot,
+ final NetconfMonitoringService monitoringService) {
this(timer, netconfOperationProvider, idProvider, connectionTimeoutMillis, commitNot, monitoringService, DEFAULT_BASE_CAPABILITIES);
}
// TODO too many params, refactor
- public NetconfServerSessionNegotiatorFactory(Timer timer, NetconfOperationProvider netconfOperationProvider,
- SessionIdProvider idProvider, long connectionTimeoutMillis,
- DefaultCommitNotificationProducer commitNot,
- SessionMonitoringService monitoringService, Set<String> baseCapabilities) {
+ public NetconfServerSessionNegotiatorFactory(final Timer timer, final NetconfOperationServiceFactory netconfOperationProvider,
+ final SessionIdProvider idProvider, final long connectionTimeoutMillis,
+ final CommitNotifier commitNot,
+ final NetconfMonitoringService monitoringService, final Set<String> baseCapabilities) {
this.timer = timer;
- this.netconfOperationProvider = netconfOperationProvider;
+ this.aggregatedOpService = netconfOperationProvider;
this.idProvider = idProvider;
this.connectionTimeoutMillis = connectionTimeoutMillis;
this.commitNotificationProducer = commitNot;
private ImmutableSet<String> validateBaseCapabilities(final Set<String> baseCapabilities) {
// Check base capabilities to be supported by the server
- Sets.SetView<String> unknownBaseCaps = Sets.difference(baseCapabilities, DEFAULT_BASE_CAPABILITIES);
+ final Sets.SetView<String> unknownBaseCaps = Sets.difference(baseCapabilities, DEFAULT_BASE_CAPABILITIES);
Preconditions.checkArgument(unknownBaseCaps.isEmpty(),
"Base capabilities that will be supported by netconf server have to be subset of %s, unknown base capabilities: %s",
DEFAULT_BASE_CAPABILITIES, unknownBaseCaps);
- ImmutableSet.Builder<String> b = ImmutableSet.builder();
+ final ImmutableSet.Builder<String> b = ImmutableSet.builder();
b.addAll(baseCapabilities);
// Base 1.0 capability is supported by default
b.add(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0);
* @return session negotiator
*/
@Override
- public SessionNegotiator<NetconfServerSession> getSessionNegotiator(SessionListenerFactory<NetconfServerSessionListener> defunctSessionListenerFactory,
- Channel channel, Promise<NetconfServerSession> promise) {
- long sessionId = idProvider.getNextSessionId();
- NetconfOperationServiceSnapshot netconfOperationServiceSnapshot = netconfOperationProvider.openSnapshot(
- getNetconfSessionIdForReporting(sessionId));
- CapabilityProvider capabilityProvider = new CapabilityProviderImpl(netconfOperationServiceSnapshot);
-
- NetconfServerSessionPreferences proposal = null;
+ public SessionNegotiator<NetconfServerSession> getSessionNegotiator(final SessionListenerFactory<NetconfServerSessionListener> defunctSessionListenerFactory,
+ final Channel channel, final Promise<NetconfServerSession> promise) {
+ final long sessionId = idProvider.getNextSessionId();
+
+ NetconfServerSessionPreferences proposal;
try {
- proposal = new NetconfServerSessionPreferences(
- createHelloMessage(sessionId, capabilityProvider), sessionId);
- } catch (NetconfDocumentedException e) {
- LOG.error("Unable to create hello mesage for session {} with capability provider {}", sessionId,capabilityProvider);
+ proposal = new NetconfServerSessionPreferences(createHelloMessage(sessionId, monitoringService), sessionId);
+ } catch (final NetconfDocumentedException e) {
+ LOG.error("Unable to create hello message for session {} with {}", sessionId, monitoringService);
throw new IllegalStateException(e);
}
- NetconfServerSessionListenerFactory sessionListenerFactory = new NetconfServerSessionListenerFactory(
- commitNotificationProducer, monitoringService,
- netconfOperationServiceSnapshot, capabilityProvider);
-
return new NetconfServerSessionNegotiator(proposal, promise, channel, timer,
- sessionListenerFactory.getSessionListener(), connectionTimeoutMillis);
+ getListener(Long.toString(sessionId)), connectionTimeoutMillis);
+ }
+
+ private NetconfServerSessionListener getListener(final String netconfSessionIdForReporting) {
+ final NetconfOperationService service =
+ this.aggregatedOpService.createService(netconfSessionIdForReporting);
+ final NetconfOperationRouter operationRouter =
+ new NetconfOperationRouterImpl(service, commitNotificationProducer, monitoringService, netconfSessionIdForReporting);
+ return new NetconfServerSessionListener(operationRouter, monitoringService, service);
+
}
- private NetconfHelloMessage createHelloMessage(long sessionId, CapabilityProvider capabilityProvider) throws NetconfDocumentedException {
- return NetconfHelloMessage.createServerHello(Sets.union(capabilityProvider.getCapabilities(), baseCapabilities), sessionId);
+ private NetconfHelloMessage createHelloMessage(final long sessionId, final NetconfMonitoringService capabilityProvider) throws NetconfDocumentedException {
+ return NetconfHelloMessage.createServerHello(Sets.union(DefaultCommit.transformCapabilities(capabilityProvider.getCapabilities()), baseCapabilities), sessionId);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.impl.mapping;
-
-import com.google.common.base.Optional;
-import java.util.Set;
-
-public interface CapabilityProvider {
-
- String getSchemaForCapability(String moduleName, Optional<String> revision);
-
- Set<String> getCapabilities();
-
-}
package org.opendaylight.controller.netconf.impl.mapping.operations;
+import com.google.common.base.Function;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Sets;
import java.io.InputStream;
+import java.util.Set;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
+import org.opendaylight.controller.netconf.impl.CommitNotifier;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.util.mapping.AbstractNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Capabilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
private static final String NOTIFY_ATTR = "notify";
- private final DefaultCommitNotificationProducer notificationProducer;
- private final CapabilityProvider cap;
+ private final CommitNotifier notificationProducer;
+ private final NetconfMonitoringService cap;
private final NetconfOperationRouter operationRouter;
- public DefaultCommit(DefaultCommitNotificationProducer notifier, CapabilityProvider cap,
+ public DefaultCommit(CommitNotifier notifier, NetconfMonitoringService cap,
String netconfSessionIdForReporting, NetconfOperationRouter netconfOperationRouter) {
super(netconfSessionIdForReporting);
this.notificationProducer = notifier;
removePersisterAttributes(requestMessage);
Element cfgSnapshot = getConfigSnapshot(operationRouter);
LOG.debug("Config snapshot retrieved successfully {}", cfgSnapshot);
- notificationProducer.sendCommitNotification("ok", cfgSnapshot, cap.getCapabilities());
+ notificationProducer.sendCommitNotification("ok", cfgSnapshot, transformCapabilities(cap.getCapabilities()));
}
return subsequentOperation.execute(requestMessage);
}
+ // FIXME move somewhere to util since this is required also by negotiatiorFactory
+ public static Set<String> transformCapabilities(final Capabilities capabilities) {
+ return Sets.newHashSet(Collections2.transform(capabilities.getCapability(), new Function<Uri, String>() {
+ @Override
+ public String apply(final Uri uri) {
+ return uri.getValue();
+ }
+ }));
+ }
+
@Override
protected Element handle(Document document, XmlElement message, NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
throw new UnsupportedOperationException("Never gets called");
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.osgi;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSet.Builder;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactoryListener;
+import org.opendaylight.controller.netconf.util.CloseableUtil;
+
+/**
+ * NetconfOperationService aggregator. Makes a collection of operation services accessible as one.
+ */
+public class AggregatedNetconfOperationServiceFactory implements NetconfOperationServiceFactory, NetconfOperationServiceFactoryListener, AutoCloseable {
+
+ private final Set<NetconfOperationServiceFactory> factories = new HashSet<>();
+ private final Multimap<NetconfOperationServiceFactory, AutoCloseable> registrations = HashMultimap.create();
+ private final Set<CapabilityListener> listeners = Sets.newHashSet();
+
+ @Override
+ public synchronized void onAddNetconfOperationServiceFactory(NetconfOperationServiceFactory service) {
+ factories.add(service);
+
+ for (final CapabilityListener listener : listeners) {
+ AutoCloseable reg = service.registerCapabilityListener(listener);
+ registrations.put(service, reg);
+ }
+ }
+
+ @Override
+ public synchronized void onRemoveNetconfOperationServiceFactory(NetconfOperationServiceFactory service) {
+ factories.remove(service);
+
+ for (final AutoCloseable autoCloseable : registrations.get(service)) {
+ try {
+ autoCloseable.close();
+ } catch (Exception e) {
+ // FIXME Issue warning
+ }
+ }
+
+ registrations.removeAll(service);
+ }
+
+ @Override
+ public synchronized Set<Capability> getCapabilities() {
+ final HashSet<Capability> capabilities = Sets.newHashSet();
+ for (final NetconfOperationServiceFactory factory : factories) {
+ capabilities.addAll(factory.getCapabilities());
+ }
+ return capabilities;
+ }
+
+ @Override
+ public synchronized AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ final Map<NetconfOperationServiceFactory, AutoCloseable> regs = Maps.newHashMap();
+
+ for (final NetconfOperationServiceFactory factory : factories) {
+ final AutoCloseable reg = factory.registerCapabilityListener(listener);
+ regs.put(factory, reg);
+ }
+ listeners.add(listener);
+
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ synchronized (AggregatedNetconfOperationServiceFactory.this) {
+ listeners.remove(listener);
+ CloseableUtil.closeAll(regs.values());
+ for (final Map.Entry<NetconfOperationServiceFactory, AutoCloseable> reg : regs.entrySet()) {
+ registrations.remove(reg.getKey(), reg.getValue());
+ }
+ }
+ }
+ };
+ }
+
+ @Override
+ public synchronized NetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return new AggregatedNetconfOperation(factories, netconfSessionIdForReporting);
+ }
+
+ @Override
+ public synchronized void close() throws Exception {
+ factories.clear();
+ for (AutoCloseable reg : registrations.values()) {
+ reg.close();
+ }
+ registrations.clear();
+ listeners.clear();
+ }
+
+ private static final class AggregatedNetconfOperation implements NetconfOperationService {
+
+ private final Set<NetconfOperationService> services;
+
+ public AggregatedNetconfOperation(final Set<NetconfOperationServiceFactory> factories, final String netconfSessionIdForReporting) {
+ final Builder<NetconfOperationService> b = ImmutableSet.builder();
+ for (final NetconfOperationServiceFactory factory : factories) {
+ b.add(factory.createService(netconfSessionIdForReporting));
+ }
+ this.services = b.build();
+ }
+
+ @Override
+ public Set<NetconfOperation> getNetconfOperations() {
+ final HashSet<NetconfOperation> operations = Sets.newHashSet();
+ for (final NetconfOperationService service : services) {
+ operations.addAll(service.getNetconfOperations());
+ }
+ return operations;
+ }
+
+ @Override
+ public void close() {
+ try {
+ CloseableUtil.closeAll(services);
+ } catch (final Exception e) {
+ throw new IllegalStateException("Unable to properly close all aggregated services", e);
+ }
+ }
+ }
+}
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
-import org.opendaylight.controller.netconf.impl.NetconfServerDispatcher;
+import org.opendaylight.controller.netconf.impl.NetconfServerDispatcherImpl;
import org.opendaylight.controller.netconf.impl.NetconfServerSessionNegotiatorFactory;
import org.opendaylight.controller.netconf.impl.SessionIdProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactoryListener;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
@Override
public void start(final BundleContext context) {
- NetconfOperationServiceFactoryListenerImpl factoriesListener = new NetconfOperationServiceFactoryListenerImpl();
+ AggregatedNetconfOperationServiceFactory factoriesListener = new AggregatedNetconfOperationServiceFactory();
startOperationServiceFactoryTracker(context, factoriesListener);
SessionIdProvider idProvider = new SessionIdProvider();
commitNot = new DefaultCommitNotificationProducer(ManagementFactory.getPlatformMBeanServer());
- SessionMonitoringService monitoringService = startMonitoringService(context, factoriesListener);
+ NetconfMonitoringService monitoringService = startMonitoringService(context, factoriesListener);
NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
timer, factoriesListener, idProvider, connectionTimeoutMillis, commitNot, monitoringService);
eventLoopGroup = new NioEventLoopGroup();
- NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
+ NetconfServerDispatcherImpl.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcherImpl.ServerChannelInitializer(
serverNegotiatorFactory);
- NetconfServerDispatcher dispatch = new NetconfServerDispatcher(serverChannelInitializer, eventLoopGroup, eventLoopGroup);
+ NetconfServerDispatcherImpl dispatch = new NetconfServerDispatcherImpl(serverChannelInitializer, eventLoopGroup, eventLoopGroup);
LocalAddress address = NetconfConfigUtil.getNetconfLocalAddress();
LOG.trace("Starting local netconf server at {}", address);
dispatch.createLocalServer(address);
-
- context.registerService(NetconfOperationProvider.class, factoriesListener, null);
-
}
- private void startOperationServiceFactoryTracker(BundleContext context, NetconfOperationServiceFactoryListenerImpl factoriesListener) {
+ private void startOperationServiceFactoryTracker(BundleContext context, NetconfOperationServiceFactoryListener factoriesListener) {
factoriesTracker = new NetconfOperationServiceFactoryTracker(context, factoriesListener);
factoriesTracker.open();
}
- private NetconfMonitoringServiceImpl startMonitoringService(BundleContext context, NetconfOperationServiceFactoryListenerImpl factoriesListener) {
+ private NetconfMonitoringServiceImpl startMonitoringService(BundleContext context, AggregatedNetconfOperationServiceFactory factoriesListener) {
NetconfMonitoringServiceImpl netconfMonitoringServiceImpl = new NetconfMonitoringServiceImpl(factoriesListener);
Dictionary<String, ?> dic = new Hashtable<>();
regMonitoring = context.registerService(NetconfMonitoringService.class, netconfMonitoringServiceImpl, dic);
package org.opendaylight.controller.netconf.impl.osgi;
import com.google.common.base.Function;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableList.Builder;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
import io.netty.util.internal.ConcurrentSet;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import javax.annotation.Nonnull;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfState;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfStateBuilder;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Capabilities;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.CapabilitiesBuilder;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Schemas;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.SchemasBuilder;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Sessions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, SessionMonitoringService {
+public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, AutoCloseable {
+
private static final Schema.Location NETCONF_LOCATION = new Schema.Location(Schema.Location.Enumeration.NETCONF);
private static final List<Schema.Location> NETCONF_LOCATIONS = ImmutableList.of(NETCONF_LOCATION);
private static final Logger LOG = LoggerFactory.getLogger(NetconfMonitoringServiceImpl.class);
return input.toManagementSession();
}
};
+ private static final Function<Capability, Uri> CAPABILITY_TO_URI = new Function<Capability, Uri>() {
+ @Override
+ public Uri apply(final Capability input) {
+ return new Uri(input.getCapabilityUri());
+ }
+ };
private final Set<NetconfManagementSession> sessions = new ConcurrentSet<>();
- private final NetconfOperationProvider netconfOperationProvider;
+ private final NetconfOperationServiceFactory netconfOperationProvider;
+ private final Map<Uri, Capability> capabilities = new ConcurrentHashMap<>();
+
+ private final Set<MonitoringListener> listeners = Sets.newHashSet();
- public NetconfMonitoringServiceImpl(final NetconfOperationProvider netconfOperationProvider) {
+ public NetconfMonitoringServiceImpl(final NetconfOperationServiceFactory netconfOperationProvider) {
this.netconfOperationProvider = netconfOperationProvider;
+ netconfOperationProvider.registerCapabilityListener(this);
}
@Override
- public void onSessionUp(final NetconfManagementSession session) {
+ public synchronized void onSessionUp(final NetconfManagementSession session) {
LOG.debug("Session {} up", session);
Preconditions.checkState(!sessions.contains(session), "Session %s was already added", session);
sessions.add(session);
+ notifyListeners();
}
@Override
- public void onSessionDown(final NetconfManagementSession session) {
+ public synchronized void onSessionDown(final NetconfManagementSession session) {
LOG.debug("Session {} down", session);
Preconditions.checkState(sessions.contains(session), "Session %s not present", session);
sessions.remove(session);
+ notifyListeners();
}
@Override
- public Sessions getSessions() {
+ public synchronized Sessions getSessions() {
return new SessionsBuilder().setSession(ImmutableList.copyOf(Collections2.transform(sessions, SESSION_FUNCTION))).build();
}
@Override
- public Schemas getSchemas() {
- // capabilities should be split from operations (it will allow to move getSchema operation to monitoring module)
- try (NetconfOperationServiceSnapshot snapshot = netconfOperationProvider.openSnapshot("netconf-monitoring")) {
- return transformSchemas(snapshot.getServices());
- } catch (RuntimeException e) {
+ public synchronized Schemas getSchemas() {
+ try {
+ return transformSchemas(netconfOperationProvider.getCapabilities());
+ } catch (final RuntimeException e) {
throw e;
- } catch (Exception e) {
+ } catch (final Exception e) {
throw new IllegalStateException("Exception while closing", e);
}
}
- private static Schemas transformSchemas(final Set<NetconfOperationService> services) {
- // FIXME: Capability implementations do not have hashcode/equals!
- final Set<Capability> caps = new HashSet<>();
- for (NetconfOperationService netconfOperationService : services) {
- // TODO check for duplicates ? move capability merging to snapshot
- // Split capabilities from operations first and delete this duplicate code
- caps.addAll(netconfOperationService.getCapabilities());
+ @Override
+ public synchronized String getSchemaForCapability(final String moduleName, final Optional<String> revision) {
+
+ // FIXME not effective at all
+
+ Map<String, Map<String, String>> mappedModulesToRevisionToSchema = Maps.newHashMap();
+
+ final Collection<Capability> caps = capabilities.values();
+
+ for (Capability cap : caps) {
+ if (!cap.getModuleName().isPresent()
+ || !cap.getRevision().isPresent()
+ || !cap.getCapabilitySchema().isPresent()){
+ continue;
+ }
+
+ final String currentModuleName = cap.getModuleName().get();
+ Map<String, String> revisionMap = mappedModulesToRevisionToSchema.get(currentModuleName);
+ if (revisionMap == null) {
+ revisionMap = Maps.newHashMap();
+ mappedModulesToRevisionToSchema.put(currentModuleName, revisionMap);
+ }
+
+ String currentRevision = cap.getRevision().get();
+ revisionMap.put(currentRevision, cap.getCapabilitySchema().get());
}
+ Map<String, String> revisionMapRequest = mappedModulesToRevisionToSchema.get(moduleName);
+ Preconditions.checkState(revisionMapRequest != null, "Capability for module %s not present, " + ""
+ + "available modules : %s", moduleName, Collections2.transform(caps, CAPABILITY_TO_URI));
+
+ if (revision.isPresent()) {
+ String schema = revisionMapRequest.get(revision.get());
+
+ Preconditions.checkState(schema != null,
+ "Capability for module %s:%s not present, available revisions for module: %s", moduleName,
+ revision.get(), revisionMapRequest.keySet());
+
+ return schema;
+ } else {
+ Preconditions.checkState(revisionMapRequest.size() == 1,
+ "Expected 1 capability for module %s, available revisions : %s", moduleName,
+ revisionMapRequest.keySet());
+ return revisionMapRequest.values().iterator().next();
+ }
+ }
+
+ @Override
+ public synchronized Capabilities getCapabilities() {
+ return new CapabilitiesBuilder().setCapability(Lists.newArrayList(capabilities.keySet())).build();
+ }
+
+ @Override
+ public synchronized AutoCloseable registerListener(final MonitoringListener listener) {
+ listeners.add(listener);
+ listener.onStateChanged(getCurrentNetconfState());
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ listeners.remove(listener);
+ }
+ };
+ }
+
+ private NetconfState getCurrentNetconfState() {
+ return new NetconfStateBuilder()
+ .setCapabilities(getCapabilities())
+ .setSchemas(getSchemas())
+ .setSessions(getSessions())
+ .build();
+ }
+
+ private static Schemas transformSchemas(final Set<Capability> caps) {
final List<Schema> schemas = new ArrayList<>(caps.size());
- for (Capability cap : caps) {
+ for (final Capability cap : caps) {
if (cap.getCapabilitySchema().isPresent()) {
- SchemaBuilder builder = new SchemaBuilder();
+ final SchemaBuilder builder = new SchemaBuilder();
Preconditions.checkState(cap.getModuleNamespace().isPresent());
builder.setNamespace(new Uri(cap.getModuleNamespace().get()));
Preconditions.checkState(cap.getRevision().isPresent());
- String version = cap.getRevision().get();
+ final String version = cap.getRevision().get();
builder.setVersion(version);
Preconditions.checkState(cap.getModuleName().isPresent());
- String identifier = cap.getModuleName().get();
+ final String identifier = cap.getModuleName().get();
builder.setIdentifier(identifier);
builder.setFormat(Yang.class);
final Builder<Schema.Location> b = ImmutableList.builder();
b.add(NETCONF_LOCATION);
- for (String location : locations) {
+ for (final String location : locations) {
b.add(new Schema.Location(new Uri(location)));
}
return b.build();
}
+
+ @Override
+ public synchronized void onCapabilitiesAdded(final Set<Capability> addedCaps) {
+ // FIXME howto check for duplicates
+ this.capabilities.putAll(Maps.uniqueIndex(addedCaps, CAPABILITY_TO_URI));
+ notifyListeners();
+ }
+
+ private void notifyListeners() {
+ for (final MonitoringListener listener : listeners) {
+ listener.onStateChanged(getCurrentNetconfState());
+ }
+ }
+
+ @Override
+ public synchronized void onCapabilitiesRemoved(final Set<Capability> addedCaps) {
+ for (final Capability addedCap : addedCaps) {
+ capabilities.remove(addedCap.getCapabilityUri());
+ }
+ notifyListeners();
+ }
+
+ @Override
+ public synchronized void close() throws Exception {
+ listeners.clear();
+ sessions.clear();
+ capabilities.clear();
+ }
}
import java.util.Set;
import java.util.TreeMap;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.impl.CommitNotifier;
import org.opendaylight.controller.netconf.impl.NetconfServerSession;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCloseSession;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultCommit;
-import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultGetSchema;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultNetconfOperation;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultStartExi;
import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultStopExi;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
+import org.opendaylight.controller.netconf.mapping.api.SessionAwareNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NetconfOperationRouterImpl implements NetconfOperationRouter {
private static final Logger LOG = LoggerFactory.getLogger(NetconfOperationRouterImpl.class);
- private final NetconfOperationServiceSnapshot netconfOperationServiceSnapshot;
+ private final NetconfOperationService netconfOperationServiceSnapshot;
private final Collection<NetconfOperation> allNetconfOperations;
- public NetconfOperationRouterImpl(final NetconfOperationServiceSnapshot netconfOperationServiceSnapshot, final CapabilityProvider capabilityProvider,
- final DefaultCommitNotificationProducer commitNotifier) {
+ public NetconfOperationRouterImpl(final NetconfOperationService netconfOperationServiceSnapshot,
+ final CommitNotifier commitNotifier, final NetconfMonitoringService netconfMonitoringService, final String sessionId) {
this.netconfOperationServiceSnapshot = Preconditions.checkNotNull(netconfOperationServiceSnapshot);
- final String sessionId = netconfOperationServiceSnapshot.getNetconfSessionIdForReporting();
-
final Set<NetconfOperation> ops = new HashSet<>();
- ops.add(new DefaultGetSchema(capabilityProvider, sessionId));
ops.add(new DefaultCloseSession(sessionId, this));
ops.add(new DefaultStartExi(sessionId));
ops.add(new DefaultStopExi(sessionId));
- ops.add(new DefaultCommit(commitNotifier, capabilityProvider, sessionId, this));
+ ops.add(new DefaultCommit(commitNotifier, netconfMonitoringService, sessionId, this));
- for (NetconfOperationService netconfOperationService : netconfOperationServiceSnapshot.getServices()) {
- for (NetconfOperation netconfOperation : netconfOperationService.getNetconfOperations()) {
- Preconditions.checkState(!ops.contains(netconfOperation),
- "Netconf operation %s already present", netconfOperation);
- ops.add(netconfOperation);
- }
- }
+ ops.addAll(netconfOperationServiceSnapshot.getNetconfOperations());
allNetconfOperations = ImmutableSet.copyOf(ops);
}
if (netconfOperation instanceof DefaultNetconfOperation) {
((DefaultNetconfOperation) netconfOperation).setNetconfSession(session);
}
+ if(netconfOperation instanceof SessionAwareNetconfOperation) {
+ ((SessionAwareNetconfOperation) netconfOperation).setSession(session);
+ }
if (!handlingPriority.equals(HandlingPriority.CANNOT_HANDLE)) {
Preconditions.checkState(!sortedPriority.containsKey(handlingPriority),
- "Multiple %s available to handle message %s with priority %s",
- NetconfOperation.class.getName(), message, handlingPriority);
+ "Multiple %s available to handle message %s with priority %s, %s and %s",
+ NetconfOperation.class.getName(), message, handlingPriority, netconfOperation, sortedPriority.get(handlingPriority));
sortedPriority.put(handlingPriority, netconfOperation);
}
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.impl.osgi;
-
-import java.util.HashSet;
-import java.util.Set;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
-
-public class NetconfOperationServiceFactoryListenerImpl implements NetconfOperationServiceFactoryListener,
- NetconfOperationProvider {
- private final Set<NetconfOperationServiceFactory> allFactories = new HashSet<>();
-
- @Override
- public synchronized void onAddNetconfOperationServiceFactory(NetconfOperationServiceFactory service) {
- allFactories.add(service);
- }
-
- @Override
- public synchronized void onRemoveNetconfOperationServiceFactory(NetconfOperationServiceFactory service) {
- allFactories.remove(service);
- }
-
- @Override
- public synchronized NetconfOperationServiceSnapshotImpl openSnapshot(String sessionIdForReporting) {
- return new NetconfOperationServiceSnapshotImpl(allFactories, sessionIdForReporting);
- }
-
-}
*/
package org.opendaylight.controller.netconf.impl.osgi;
+import org.opendaylight.controller.netconf.api.util.NetconfConstants;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactoryListener;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
import org.osgi.util.tracker.ServiceTracker;
@Override
public NetconfOperationServiceFactory addingService(ServiceReference<NetconfOperationServiceFactory> reference) {
- NetconfOperationServiceFactory netconfOperationServiceFactory = super.addingService(reference);
- factoriesListener.onAddNetconfOperationServiceFactory(netconfOperationServiceFactory);
- return netconfOperationServiceFactory;
+ Object property = reference.getProperty(NetconfConstants.SERVICE_NAME);
+ if (property != null
+ && (property.equals(NetconfConstants.CONFIG_NETCONF_CONNECTOR)
+ || property.equals(NetconfConstants.NETCONF_MONITORING))) {
+ NetconfOperationServiceFactory netconfOperationServiceFactory = super.addingService(reference);
+ factoriesListener.onAddNetconfOperationServiceFactory(netconfOperationServiceFactory);
+ return netconfOperationServiceFactory;
+ }
+
+ return null;
}
@Override
public void removedService(ServiceReference<NetconfOperationServiceFactory> reference,
NetconfOperationServiceFactory netconfOperationServiceFactory) {
- factoriesListener.onRemoveNetconfOperationServiceFactory(netconfOperationServiceFactory);
+ if (netconfOperationServiceFactory != null) {
+ factoriesListener.onRemoveNetconfOperationServiceFactory(netconfOperationServiceFactory);
+ }
}
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.impl.osgi;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSet.Builder;
-import java.util.Set;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
-import org.opendaylight.controller.netconf.util.CloseableUtil;
-
-public class NetconfOperationServiceSnapshotImpl implements NetconfOperationServiceSnapshot {
-
- private final Set<NetconfOperationService> services;
- private final String netconfSessionIdForReporting;
-
- public NetconfOperationServiceSnapshotImpl(final Set<NetconfOperationServiceFactory> factories, final String sessionIdForReporting) {
- final Builder<NetconfOperationService> b = ImmutableSet.builder();
- netconfSessionIdForReporting = sessionIdForReporting;
- for (NetconfOperationServiceFactory factory : factories) {
- b.add(factory.createService(netconfSessionIdForReporting));
- }
- this.services = b.build();
- }
-
- @Override
- public String getNetconfSessionIdForReporting() {
- return netconfSessionIdForReporting;
- }
-
- @Override
- public Set<NetconfOperationService> getServices() {
- return services;
- }
-
- @Override
- public void close() throws Exception {
- CloseableUtil.closeAll(services);
- }
-
- @Override
- public String toString() {
- return "NetconfOperationServiceSnapshotImpl{" + netconfSessionIdForReporting + '}';
- }
-}
--- /dev/null
+// vi: set smarttab et sw=4 tabstop=4:
+module netconf-northbound-impl {
+
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl";
+ prefix "cfg-net-s-i";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import netconf-northbound-mapper { prefix nnm; revision-date 2015-01-14; }
+ import netconf-northbound { prefix nn; revision-date 2015-01-14; }
+ import netty {prefix netty; }
+
+ description
+ "This module contains the base YANG definitions for
+ netconf-server-dispatcher implementation.
+
+ Copyright (c)2013 Cisco Systems, Inc. All rights reserved.;
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2015-01-12" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-server-dispatcher-impl {
+ base config:module-type;
+ config:provided-service nn:netconf-server-dispatcher;
+ config:java-name-prefix NetconfServerDispatcher;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-server-dispatcher-impl {
+ when "/config:modules/config:module/config:type = 'netconf-server-dispatcher-impl'";
+
+ leaf connection-timeout-millis {
+ description "Specifies timeout in milliseconds after which connection must be established.";
+ type uint32;
+ default 20000;
+ }
+
+ container boss-thread-group {
+ uses config:service-ref {
+ refine type {
+ config:required-identity netty:netty-threadgroup;
+ }
+ }
+ }
+
+ container worker-thread-group {
+ uses config:service-ref {
+ refine type {
+ config:required-identity netty:netty-threadgroup;
+ }
+ }
+ }
+
+ list mappers {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity nnm:netconf-northbound-mapper;
+ }
+ }
+ }
+
+ container server-monitor {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity nn:netconf-server-monitoring;
+ }
+ }
+ }
+
+ container timer {
+ uses config:service-ref {
+ refine type {
+ config:required-identity netty:netty-timer;
+ }
+ }
+ }
+ }
+ }
+
+
+ identity netconf-server-monitoring-impl {
+ base config:module-type;
+ config:provided-service nn:netconf-server-monitoring;
+ config:java-name-prefix NetconfServerMonitoring;
+ }
+
+ // TODO Monitoring could expose the monitoring data over JMX...
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-server-monitoring-impl {
+ when "/config:modules/config:module/config:type = 'netconf-server-monitoring-impl'";
+
+ container aggregator {
+ uses config:service-ref {
+ refine type {
+ config:required-identity nnm:netconf-northbound-mapper;
+ }
+ }
+ }
+
+ }
+ }
+
+ identity netconf-mapper-aggregator {
+ base config:module-type;
+ config:provided-service nnm:netconf-northbound-mapper;
+ config:provided-service nnm:netconf-mapper-registry;
+ config:java-name-prefix NetconfMapperAggregator;
+ description "Aggregated operation provider for netconf servers. Joins all the operations and capabilities of all the mappers it aggregates and exposes them as a single service. The dependency orientation is reversed in order to prevent cyclic dependencies when monitoring service is considered";
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-mapper-aggregator {
+ when "/config:modules/config:module/config:type = 'netconf-mapper-aggregator'";
+
+ }
+ }
+
+}
\ No newline at end of file
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anySetOf;
import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import com.google.common.base.Preconditions;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.CapabilitiesBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
HashedWheelTimer hashedWheelTimer;
private TestingNetconfOperation testingNetconfOperation;
- public static SessionMonitoringService createMockedMonitoringService() {
- SessionMonitoringService monitoring = mock(SessionMonitoringService.class);
+ public static NetconfMonitoringService createMockedMonitoringService() {
+ NetconfMonitoringService monitoring = mock(NetconfMonitoringService.class);
doNothing().when(monitoring).onSessionUp(any(NetconfServerSession.class));
doNothing().when(monitoring).onSessionDown(any(NetconfServerSession.class));
+ doReturn(new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+
+ }
+ }).when(monitoring).registerListener(any(NetconfMonitoringService.MonitoringListener.class));
+ doNothing().when(monitoring).onCapabilitiesAdded(anySetOf(Capability.class));
+ doNothing().when(monitoring).onCapabilitiesRemoved(anySetOf(Capability.class));
+ doReturn(new CapabilitiesBuilder().setCapability(Collections.<Uri>emptyList()).build()).when(monitoring).getCapabilities();
return monitoring;
}
nettyGroup = new NioEventLoopGroup(nettyThreads);
netconfClientDispatcher = new NetconfClientDispatcherImpl(nettyGroup, nettyGroup, hashedWheelTimer);
- NetconfOperationServiceFactoryListenerImpl factoriesListener = new NetconfOperationServiceFactoryListenerImpl();
+ AggregatedNetconfOperationServiceFactory factoriesListener = new AggregatedNetconfOperationServiceFactory();
testingNetconfOperation = new TestingNetconfOperation();
factoriesListener.onAddNetconfOperationServiceFactory(new TestingOperationServiceFactory(testingNetconfOperation));
commitNot = new DefaultCommitNotificationProducer(ManagementFactory.getPlatformMBeanServer());
- NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(serverNegotiatorFactory);
- final NetconfServerDispatcher dispatch = new NetconfServerDispatcher(serverChannelInitializer, nettyGroup, nettyGroup);
+ NetconfServerDispatcherImpl.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcherImpl.ServerChannelInitializer(serverNegotiatorFactory);
+ final NetconfServerDispatcherImpl dispatch = new NetconfServerDispatcherImpl(serverChannelInitializer, nettyGroup, nettyGroup);
ChannelFuture s = dispatch.createServer(netconfAddress);
s.await();
this.operations = operations;
}
+ @Override
+ public Set<Capability> getCapabilities() {
+ return Collections.emptySet();
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ return new AutoCloseable(){
+ @Override
+ public void close() throws Exception {}
+ };
+ }
+
@Override
public NetconfOperationService createService(String netconfSessionIdForReporting) {
return new NetconfOperationService() {
- @Override
- public Set<Capability> getCapabilities() {
- return Collections.emptySet();
- }
@Override
public Set<NetconfOperation> getNetconfOperations() {
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
public class NetconfDispatcherImplTest {
private EventLoopGroup nettyGroup;
- private NetconfServerDispatcher dispatch;
+ private NetconfServerDispatcherImpl dispatch;
private DefaultCommitNotificationProducer commitNot;
private HashedWheelTimer hashedWheelTimer;
commitNot = new DefaultCommitNotificationProducer(
ManagementFactory.getPlatformMBeanServer());
- NetconfOperationServiceFactoryListenerImpl factoriesListener = new NetconfOperationServiceFactoryListenerImpl();
+ AggregatedNetconfOperationServiceFactory factoriesListener = new AggregatedNetconfOperationServiceFactory();
SessionIdProvider idProvider = new SessionIdProvider();
hashedWheelTimer = new HashedWheelTimer();
NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
hashedWheelTimer, factoriesListener, idProvider, 5000, commitNot, ConcurrentClientsTest.createMockedMonitoringService());
- NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(serverNegotiatorFactory);
+ NetconfServerDispatcherImpl.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcherImpl.ServerChannelInitializer(serverNegotiatorFactory);
- dispatch = new NetconfServerDispatcher(
+ dispatch = new NetconfServerDispatcherImpl(
serverChannelInitializer, nettyGroup, nettyGroup);
}
package org.opendaylight.controller.netconf.impl;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import io.netty.channel.Channel;
-import java.util.Set;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
-import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
-
public class NetconfMonitoringServiceImplTest {
- private NetconfMonitoringServiceImpl service;
-
- @Mock
- private NetconfOperationProvider operationProvider;
- @Mock
- private NetconfManagementSession managementSession;
- @Mock
- private NetconfOperationServiceSnapshot snapshot;
- @Mock
- private NetconfOperationService operationService;
-
- @Before
- public void setUp() throws Exception {
- MockitoAnnotations.initMocks(this);
- service = new NetconfMonitoringServiceImpl(operationProvider);
- }
-
- @Test
- public void testSessions() throws Exception {
- doReturn("sessToStr").when(managementSession).toString();
- service.onSessionUp(managementSession);
- }
-
- @Test(expected = RuntimeException.class)
- public void testGetSchemas() throws Exception {
- doThrow(RuntimeException.class).when(operationProvider).openSnapshot(anyString());
- service.getSchemas();
- }
-
- @Test(expected = IllegalStateException.class)
- public void testGetSchemas2() throws Exception {
- doThrow(Exception.class).when(operationProvider).openSnapshot(anyString());
- service.getSchemas();
- }
-
- @Test
- public void testGetSchemas3() throws Exception {
- doReturn("").when(managementSession).toString();
- Capability cap = mock(Capability.class);
- Set<Capability> caps = Sets.newHashSet(cap);
- Set<NetconfOperationService> services = Sets.newHashSet(operationService);
- doReturn(snapshot).when(operationProvider).openSnapshot(anyString());
- doReturn(services).when(snapshot).getServices();
- doReturn(caps).when(operationService).getCapabilities();
- Optional<String> opt = mock(Optional.class);
- doReturn(opt).when(cap).getCapabilitySchema();
- doReturn(true).when(opt).isPresent();
- doReturn(opt).when(cap).getModuleNamespace();
- doReturn("namespace").when(opt).get();
- Optional<String> optRev = Optional.of("rev");
- doReturn(optRev).when(cap).getRevision();
- doReturn(Optional.of("modName")).when(cap).getModuleName();
- doReturn(Lists.newArrayList("loc")).when(cap).getLocation();
- doNothing().when(snapshot).close();
-
- assertNotNull(service.getSchemas());
- verify(snapshot, times(1)).close();
-
- NetconfServerSessionListener sessionListener = mock(NetconfServerSessionListener.class);
- Channel channel = mock(Channel.class);
- doReturn("mockChannel").when(channel).toString();
- NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("name", "addr", "2", "tcp", "id");
- NetconfServerSession sm = new NetconfServerSession(sessionListener, channel, 10, header);
- doNothing().when(sessionListener).onSessionUp(any(NetconfServerSession.class));
- sm.sessionUp();
- service.onSessionUp(sm);
- assertEquals(1, service.getSessions().getSession().size());
-
- assertEquals(Long.valueOf(10), service.getSessions().getSession().get(0).getSessionId());
-
- service.onSessionDown(sm);
- assertEquals(0, service.getSessions().getSession().size());
- }
+ // TODO redo test
}
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import com.google.common.collect.Sets;
+import java.util.Collections;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.impl.NetconfServerSession;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.CapabilitiesBuilder;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
private Document requestMessage;
private NetconfOperationRouter router;
private DefaultCommitNotificationProducer notifier;
- private CapabilityProvider cap;
+ private NetconfMonitoringService cap;
private DefaultCommit commit;
@Before
doReturn(false).when(operation).isExecutionTermination();
notifier = mock(DefaultCommitNotificationProducer.class);
doNothing().when(notifier).sendCommitNotification(anyString(), any(Element.class), anySetOf(String.class));
- cap = mock(CapabilityProvider.class);
- doReturn(Sets.newHashSet()).when(cap).getCapabilities();
+ cap = mock(NetconfMonitoringService.class);
+ doReturn(new CapabilitiesBuilder().setCapability(Collections.<Uri>emptyList()).build()).when(cap).getCapabilities();
Document rpcData = XmlFileLoader.xmlFileToDocument("netconfMessages/editConfig_expectedResult.xml");
doReturn(rpcData).when(router).onNetconfMessage(any(Document.class), any(NetconfServerSession.class));
commit = new DefaultCommit(notifier, cap, "", router);
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.util.Arrays;
doReturn(filter).when(bundle).createFilter(anyString());
doNothing().when(bundle).addServiceListener(any(ServiceListener.class), anyString());
- ServiceReference<?>[] refs = new ServiceReference[0];
+ ServiceReference<?>[] refs = {};
doReturn(refs).when(bundle).getServiceReferences(anyString(), anyString());
doReturn(Arrays.asList(refs)).when(bundle).getServiceReferences(any(Class.class), anyString());
doReturn("").when(bundle).getProperty(anyString());
- doReturn(registration).when(bundle).registerService(any(Class.class), any(NetconfOperationServiceFactoryListenerImpl.class), any(Dictionary.class));
+ doReturn(registration).when(bundle).registerService(any(Class.class), any(AggregatedNetconfOperationServiceFactory.class), any(Dictionary.class));
doNothing().when(registration).unregister();
doNothing().when(bundle).removeServiceListener(any(ServiceListener.class));
}
public void testStart() throws Exception {
NetconfImplActivator activator = new NetconfImplActivator();
activator.start(bundle);
- verify(bundle, times(2)).registerService(any(Class.class), any(NetconfOperationServiceFactoryListenerImpl.class), any(Dictionary.class));
+ verify(bundle).registerService(any(Class.class), any(AggregatedNetconfOperationServiceFactory.class), any(Dictionary.class));
activator.stop(bundle);
}
}
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.util.NetconfConstants;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactoryListener;
import org.osgi.framework.BundleContext;
import org.osgi.framework.Filter;
import org.osgi.framework.ServiceReference;
doNothing().when(listener).onRemoveNetconfOperationServiceFactory(any(NetconfOperationServiceFactory.class));
doReturn(filter).when(context).createFilter(anyString());
doReturn("").when(reference).toString();
+ doReturn(NetconfConstants.CONFIG_NETCONF_CONNECTOR).when(reference).getProperty(NetconfConstants.SERVICE_NAME);
doReturn(factory).when(context).getService(any(ServiceReference.class));
doReturn("").when(factory).toString();
doNothing().when(listener).onAddNetconfOperationServiceFactory(any(NetconfOperationServiceFactory.class));
import static org.mockito.Matchers.anySetOf;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import io.netty.channel.Channel;
import org.opendaylight.controller.config.yang.test.impl.NetconfTestImplModuleFactory;
import org.opendaylight.controller.config.yang.test.impl.TestImplModuleFactory;
import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.NetconfOperationServiceFactoryImpl;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreException;
import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreService;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreServiceImpl;
-import org.opendaylight.controller.netconf.confignetconfconnector.osgi.YangStoreSnapshot;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
-import org.opendaylight.controller.netconf.impl.NetconfServerDispatcher;
+import org.opendaylight.controller.netconf.impl.NetconfServerDispatcherImpl;
import org.opendaylight.controller.netconf.impl.NetconfServerSessionNegotiatorFactory;
import org.opendaylight.controller.netconf.impl.SessionIdProvider;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringActivator;
+import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringOperationService;
+import org.opendaylight.controller.netconf.notifications.BaseNetconfNotificationListener;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
setUpTestInitial();
- final NetconfOperationServiceFactoryListenerImpl factoriesListener = new NetconfOperationServiceFactoryListenerImpl();
+ final AggregatedNetconfOperationServiceFactory factoriesListener = new AggregatedNetconfOperationServiceFactory();
+ final NetconfMonitoringService netconfMonitoringService = getNetconfMonitoringService(factoriesListener);
factoriesListener.onAddNetconfOperationServiceFactory(new NetconfOperationServiceFactoryImpl(getYangStore()));
+ factoriesListener.onAddNetconfOperationServiceFactory(new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(new NetconfMonitoringOperationService(netconfMonitoringService)));
- for (final NetconfOperationServiceFactory netconfOperationServiceFactory : getAdditionalServiceFactories()) {
+ for (final NetconfOperationServiceFactory netconfOperationServiceFactory : getAdditionalServiceFactories(factoriesListener)) {
factoriesListener.onAddNetconfOperationServiceFactory(netconfOperationServiceFactory);
}
- serverTcpChannel = startNetconfTcpServer(factoriesListener);
+ serverTcpChannel = startNetconfTcpServer(factoriesListener, netconfMonitoringService);
clientDispatcher = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
}
return get;
}
- private Channel startNetconfTcpServer(final NetconfOperationServiceFactoryListenerImpl factoriesListener) throws Exception {
- final NetconfServerDispatcher dispatch = createDispatcher(factoriesListener, getNetconfMonitoringService(), getNotificationProducer());
+ private Channel startNetconfTcpServer(final AggregatedNetconfOperationServiceFactory listener, final NetconfMonitoringService monitoring) throws Exception {
+ final NetconfServerDispatcherImpl dispatch = createDispatcher(listener, monitoring, getNotificationProducer());
final ChannelFuture s;
if(getTcpServerAddress() instanceof LocalAddress) {
return notificationProducer;
}
- protected Iterable<NetconfOperationServiceFactory> getAdditionalServiceFactories() {
+ protected Iterable<NetconfOperationServiceFactory> getAdditionalServiceFactories(final AggregatedNetconfOperationServiceFactory factoriesListener) throws Exception {
return Collections.emptySet();
}
- protected SessionMonitoringService getNetconfMonitoringService() throws Exception {
- final NetconfOperationProvider netconfOperationProvider = mock(NetconfOperationProvider.class);
- final NetconfOperationServiceSnapshotImpl snap = mock(NetconfOperationServiceSnapshotImpl.class);
- doReturn(Collections.<NetconfOperationService>emptySet()).when(snap).getServices();
- doReturn(snap).when(netconfOperationProvider).openSnapshot(anyString());
- return new NetconfMonitoringServiceImpl(netconfOperationProvider);
+ protected NetconfMonitoringService getNetconfMonitoringService(final AggregatedNetconfOperationServiceFactory factoriesListener) throws Exception {
+ return new NetconfMonitoringServiceImpl(factoriesListener);
}
protected abstract SocketAddress getTcpServerAddress();
return clientDispatcher;
}
- private HardcodedYangStoreService getYangStore() throws YangStoreException, IOException {
+ private HardcodedYangStoreService getYangStore() throws IOException {
final Collection<InputStream> yangDependencies = getBasicYangs();
return new HardcodedYangStoreService(yangDependencies);
}
return yangDependencies;
}
- protected NetconfServerDispatcher createDispatcher(
- final NetconfOperationServiceFactoryListenerImpl factoriesListener, final SessionMonitoringService sessionMonitoringService,
+ protected NetconfServerDispatcherImpl createDispatcher(
+ final AggregatedNetconfOperationServiceFactory factoriesListener, final NetconfMonitoringService sessionMonitoringService,
final DefaultCommitNotificationProducer commitNotifier) {
final SessionIdProvider idProvider = new SessionIdProvider();
final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
hashedWheelTimer, factoriesListener, idProvider, SERVER_CONNECTION_TIMEOUT_MILLIS, commitNotifier, sessionMonitoringService);
- final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
+ final NetconfServerDispatcherImpl.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcherImpl.ServerChannelInitializer(
serverNegotiatorFactory);
- return new NetconfServerDispatcher(serverChannelInitializer, nettyThreadgroup, nettyThreadgroup);
+ return new NetconfServerDispatcherImpl(serverChannelInitializer, nettyThreadgroup, nettyThreadgroup);
}
protected HashedWheelTimer getHashedWheelTimer() {
return b.build();
}
- public static final class HardcodedYangStoreService implements YangStoreService {
-
- private final List<InputStream> byteArrayInputStreams;
+ public static final class HardcodedYangStoreService extends YangStoreService {
+ public HardcodedYangStoreService(final Collection<? extends InputStream> inputStreams) throws IOException {
+ super(new SchemaContextProvider() {
+ @Override
+ public SchemaContext getSchemaContext() {
+ return getSchema(inputStreams);
+ }
+ }, new BaseNetconfNotificationListener() {
+ @Override
+ public void onCapabilityChanged(final NetconfCapabilityChange capabilityChange) {
+ // NOOP
+ }
+ });
+ }
- public HardcodedYangStoreService(final Collection<? extends InputStream> inputStreams) throws YangStoreException, IOException {
- byteArrayInputStreams = new ArrayList<>();
+ private static SchemaContext getSchema(final Collection<? extends InputStream> inputStreams) {
+ final ArrayList<InputStream> byteArrayInputStreams = new ArrayList<>();
for (final InputStream inputStream : inputStreams) {
assertNotNull(inputStream);
- final byte[] content = IOUtils.toByteArray(inputStream);
+ final byte[] content;
+ try {
+ content = IOUtils.toByteArray(inputStream);
+ } catch (IOException e) {
+ throw new IllegalStateException("Cannot read " + inputStream, e);
+ }
final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(content);
byteArrayInputStreams.add(byteArrayInputStream);
}
- }
- @Override
- public YangStoreSnapshot getYangStoreSnapshot() throws YangStoreException {
for (final InputStream inputStream : byteArrayInputStreams) {
try {
inputStream.reset();
}
final YangParserImpl yangParser = new YangParserImpl();
- final SchemaContext schemaContext = yangParser.resolveSchemaContext(new HashSet<>(yangParser.parseYangModelsFromStreamsMapped(byteArrayInputStreams).values()));
- final YangStoreServiceImpl yangStoreService = new YangStoreServiceImpl(new SchemaContextProvider() {
- @Override
- public SchemaContext getSchemaContext() {
- return schemaContext ;
- }
- });
- return yangStoreService.getYangStoreSnapshot();
+ return yangParser.resolveSchemaContext(new HashSet<>(yangParser.parseYangModelsFromStreamsMapped(byteArrayInputStreams).values()));
}
}
}
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.opendaylight.controller.netconf.util.test.XmlUnitUtil.assertContainsElementWithName;
import static org.opendaylight.controller.netconf.util.test.XmlUnitUtil.assertElementsCount;
import static org.opendaylight.controller.netconf.util.xml.XmlUtil.readXmlToDocument;
import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
-import java.util.Collections;
import java.util.List;
-import java.util.Set;
import javax.management.InstanceNotFoundException;
import javax.management.Notification;
import javax.management.NotificationListener;
import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
-import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringActivator;
-import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringOperationService;
import org.opendaylight.controller.netconf.persist.impl.ConfigPersisterNotificationHandler;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.w3c.dom.Document;
public static final int PORT = 12026;
private static final InetSocketAddress TCP_ADDRESS = new InetSocketAddress(LOOPBACK_ADDRESS, PORT);
- private NetconfMonitoringServiceImpl netconfMonitoringService;
-
- @Override
- protected void setUpTestInitial() {
- netconfMonitoringService = new NetconfMonitoringServiceImpl(getNetconfOperationProvider());
- }
-
- @Override
- protected SessionMonitoringService getNetconfMonitoringService() throws Exception {
- return netconfMonitoringService;
- }
@Override
protected SocketAddress getTcpServerAddress() {
return TCP_ADDRESS;
}
- @Override
- protected Iterable<NetconfOperationServiceFactory> getAdditionalServiceFactories() {
- return Collections.<NetconfOperationServiceFactory>singletonList(new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(
- new NetconfMonitoringOperationService(netconfMonitoringService)));
- }
-
@Override
protected DefaultCommitNotificationProducer getNotificationProducer() {
return new DefaultCommitNotificationProducer(ManagementFactory.getPlatformMBeanServer());
try (ConfigPersisterNotificationHandler configPersisterNotificationHandler = new ConfigPersisterNotificationHandler(
platformMBeanServer, mockedAggregator)) {
-
try (TestingNetconfClient netconfClient = new TestingNetconfClient("client", getClientDispatcher(), getClientConfiguration(TCP_ADDRESS, 4000))) {
NetconfMessage response = netconfClient.sendMessage(loadGetConfigMessage());
assertContainsElementWithName(response.getDocument(), "modules");
}
notificationVerifier.assertNotificationCount(2);
- notificationVerifier.assertNotificationContent(0, 0, 0, 9);
- notificationVerifier.assertNotificationContent(1, 4, 3, 9);
+ notificationVerifier.assertNotificationContent(0, 0, 0, 8);
+ notificationVerifier.assertNotificationContent(1, 4, 3, 8);
mockedAggregator.assertSnapshotCount(2);
// Capabilities are stripped for persister
return XmlFileLoader.xmlFileToNetconfMessage("netconfMessages/commit.xml");
}
-
- public NetconfOperationProvider getNetconfOperationProvider() {
- final NetconfOperationProvider factoriesListener = mock(NetconfOperationProvider.class);
- final NetconfOperationServiceSnapshotImpl snap = mock(NetconfOperationServiceSnapshotImpl.class);
- final NetconfOperationService service = mock(NetconfOperationService.class);
- final Set<Capability> caps = Sets.newHashSet();
- doReturn(caps).when(service).getCapabilities();
- final Set<NetconfOperationService> services = Sets.newHashSet(service);
- doReturn(services).when(snap).getServices();
- doReturn(snap).when(factoriesListener).openSnapshot(anyString());
-
- return factoriesListener;
- }
-
private static class VerifyingNotificationListener implements NotificationListener {
public List<Notification> notifications = Lists.newArrayList();
package org.opendaylight.controller.netconf.it;
import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import java.util.List;
import java.util.Set;
import org.junit.Test;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
import org.opendaylight.controller.netconf.client.TestingNetconfClient;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
-import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
-import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringActivator;
-import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringOperationService;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import org.slf4j.Logger;
import org.w3c.dom.Document;
public class NetconfITMonitoringTest extends AbstractNetconfConfigTest {
public static final InetSocketAddress TCP_ADDRESS = new InetSocketAddress(LOOPBACK_ADDRESS, PORT);
public static final TestingCapability TESTING_CAPABILITY = new TestingCapability();
- private NetconfMonitoringServiceImpl netconfMonitoringService;
-
- @Override
- protected void setUpTestInitial() {
- netconfMonitoringService = new NetconfMonitoringServiceImpl(getNetconfOperationProvider());
- }
-
- @Override
- protected SessionMonitoringService getNetconfMonitoringService() throws Exception {
- return netconfMonitoringService;
- }
-
- @Override
- protected Iterable<NetconfOperationServiceFactory> getAdditionalServiceFactories() {
- return Collections.<NetconfOperationServiceFactory>singletonList(new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(
- new NetconfMonitoringOperationService(netconfMonitoringService)));
- }
-
@Override
protected InetSocketAddress getTcpServerAddress() {
return TCP_ADDRESS;
}
- static SessionMonitoringService getNetconfMonitoringListenerService(final Logger LOG, final NetconfMonitoringServiceImpl monitor) {
- return new SessionMonitoringService() {
- @Override
- public void onSessionUp(final NetconfManagementSession session) {
- LOG.debug("Management session up {}", session);
- monitor.onSessionUp(session);
- }
-
- @Override
- public void onSessionDown(final NetconfManagementSession session) {
- LOG.debug("Management session down {}", session);
- monitor.onSessionDown(session);
- }
- };
- }
-
@Test
public void testGetResponseFromMonitoring() throws Exception {
try (TestingNetconfClient netconfClient = new TestingNetconfClient("client-monitoring", getClientDispatcher(), getClientConfiguration(TCP_ADDRESS, 10000))) {
assertEquals("Incorrect number of session-id tags in " + XmlUtil.toString(document), i, elementSize);
}
- public static NetconfOperationProvider getNetconfOperationProvider() {
- final NetconfOperationProvider factoriesListener = mock(NetconfOperationProvider.class);
- final NetconfOperationServiceSnapshotImpl snap = mock(NetconfOperationServiceSnapshotImpl.class);
+ public static AggregatedNetconfOperationServiceFactory getNetconfOperationProvider() throws Exception {
+ final AggregatedNetconfOperationServiceFactory factoriesListener = mock(AggregatedNetconfOperationServiceFactory.class);
+ final NetconfOperationService snap = mock(NetconfOperationService.class);
try {
doNothing().when(snap).close();
} catch (final Exception e) {
// not happening
throw new IllegalStateException(e);
}
- final NetconfOperationService service = mock(NetconfOperationService.class);
final Set<Capability> caps = Sets.newHashSet();
caps.add(TESTING_CAPABILITY);
- doReturn(caps).when(service).getCapabilities();
- final Set<NetconfOperationService> services = Sets.newHashSet(service);
- doReturn(services).when(snap).getServices();
- doReturn(snap).when(factoriesListener).openSnapshot(anyString());
+ doReturn(caps).when(factoriesListener).getCapabilities();
+ doReturn(snap).when(factoriesListener).createService(anyString());
+
+ AutoCloseable mock = mock(AutoCloseable.class);
+ doNothing().when(mock).close();
+ doReturn(mock).when(factoriesListener).registerCapabilityListener(any(CapabilityListener.class));
return factoriesListener;
}
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
-import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
-import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPreferences;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
import org.opendaylight.yangtools.yang.common.QName;
}
static NetconfDeviceCommunicator getSessionListener() {
- RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice = mock(RemoteDevice.class);
- doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+ RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> mockedRemoteDevice = mock(RemoteDevice.class);
+ doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionPreferences.class), any(NetconfDeviceCommunicator.class));
doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
}
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <configuration>
- <instructions>
- <Export-Package>org.opendaylight.controller.netconf.mapping.api,</Export-Package>
- </instructions>
- </configuration>
</plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
</plugins>
</build>
</project>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.mapping.api;
-
-public interface NetconfOperationProvider {
-
- NetconfOperationServiceSnapshot openSnapshot(String sessionIdForReporting);
-
- public static class NetconfOperationProviderUtil {
-
- public static String getNetconfSessionIdForReporting(long sessionId) {
- return "netconf session id " + sessionId;
- }
-
- }
-
-}
*/
public interface NetconfOperationService extends AutoCloseable {
- /**
- * Get capabilities announced by server hello message.
- */
- Set<Capability> getCapabilities();
-
/**
* Get set of netconf operations that are handled by this service.
*/
package org.opendaylight.controller.netconf.mapping.api;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+
/**
* Factory that must be registered in OSGi service registry in order to be used
* by netconf-impl. Responsible for creating per-session instances of
*/
public interface NetconfOperationServiceFactory {
+ /**
+ * Get capabilities supported by current operation service.
+ */
+ Set<Capability> getCapabilities();
+
+ /**
+ * Supported capabilities may change over time, registering a listener allows for push based information retrieval about current notifications
+ */
+ AutoCloseable registerCapabilityListener(CapabilityListener listener);
+
NetconfOperationService createService(String netconfSessionIdForReporting);
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.impl.osgi;
-
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+package org.opendaylight.controller.netconf.mapping.api;
public interface NetconfOperationServiceFactoryListener {
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.controller.netconf.mapping.api;
-import java.util.Set;
+import org.opendaylight.controller.netconf.api.NetconfSession;
-public interface NetconfOperationServiceSnapshot extends AutoCloseable {
- String getNetconfSessionIdForReporting();
-
- Set<NetconfOperationService> getServices();
+public interface SessionAwareNetconfOperation extends NetconfOperation {
+ void setSession(NetconfSession session);
}
--- /dev/null
+module netconf-northbound-mapper {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper";
+ prefix "nnm";
+
+ import config { prefix config; revision-date 2013-04-05; }
+
+ description
+ "This module contains the base YANG definitions for
+ mapping services plugged into a netconf northbound server";
+
+ revision "2015-01-14" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-northbound-mapper {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory";
+ }
+
+ identity netconf-mapper-registry {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactoryListener";
+ }
+
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>netconf-mdsal-config</artifactId>
+ <description>Configuration files for netconf for mdsal</description>
+ <packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/08-netconf-mdsal.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">prefix:netconf-mdsal-mapper</type>
+ <name>netconf-mdsal-mapper</name>
+ <root-schema-service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
+ <name>yang-schema-service</name>
+ </root-schema-service>
+ <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
+ <name>inmemory-data-broker</name>
+ </dom-broker>
+ <mapper-aggregator xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-mapper-registry</type>
+ <name>mapper-aggregator-registry</name>
+ </mapper-aggregator>
+ </module>
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">prefix:netconf-server-dispatcher-impl</type>
+ <name>netconf-mdsal-server-dispatcher</name>
+ <mappers xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">dom:netconf-northbound-mapper</type>
+ <name>mapper-aggregator</name>
+ </mappers>
+ <server-monitor xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound">prefix:netconf-server-monitoring</type>
+ <name>server-monitor</name>
+ </server-monitor>
+ <boss-thread-group xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-threadgroup</type>
+ <name>global-boss-group</name>
+ </boss-thread-group>
+ <worker-thread-group xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-threadgroup</type>
+ <name>global-worker-group</name>
+ </worker-thread-group>
+ <timer xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-timer</type>
+ <name>global-timer</name>
+ </timer>
+ </module>
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring">prefix:netconf-mdsal-monitoring-mapper</type>
+ <name>netconf-mdsal-monitoring-mapper</name>
+ <server-monitoring xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound">prefix:netconf-server-monitoring</type>
+ <name>server-monitor</name>
+ </server-monitoring>
+ <binding-aware-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">prefix:binding-broker-osgi-registry</type>
+ <name>binding-osgi-broker</name>
+ </binding-aware-broker>
+ <aggregator xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-mapper-registry</type>
+ <name>mapper-aggregator-registry</name>
+ </aggregator>
+ </module>
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">prefix:netconf-mapper-aggregator</type>
+ <name>mapper-aggregator</name>
+ </module>
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">prefix:netconf-server-monitoring-impl</type>
+ <name>server-monitor</name>
+ <aggregator xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">dom:netconf-northbound-mapper</type>
+ <name>mapper-aggregator</name>
+ </aggregator>
+ </module>
+
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh">prefix:netconf-northbound-ssh</type>
+ <name>netconf-mdsal-ssh-server</name>
+
+ <event-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-event-executor</type>
+ <name>global-event-executor</name>
+ </event-executor>
+ <worker-thread-group xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-threadgroup</type>
+ <name>global-worker-group</name>
+ </worker-thread-group>
+ <processing-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool">prefix:threadpool</type>
+ <name>global-netconf-ssh-scheduled-executor</name>
+ </processing-executor>
+ <dispatcher xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound">prefix:netconf-server-dispatcher</type>
+ <name>netconf-mdsal-server-dispatcher</name>
+ </dispatcher>
+
+ <username>admin</username>
+ <password>admin</password>
+ </module>
+
+ </modules>
+
+ <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <service>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound">prefix:netconf-server-monitoring</type>
+ <instance>
+ <name>server-monitor</name>
+ <provider>/modules/module[type='netconf-server-monitoring-impl'][name='server-monitor']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-northbound-mapper</type>
+ <instance>
+ <name>netconf-mdsal-mapper</name>
+ <provider>/modules/module[type='netconf-mdsal-mapper'][name='netconf-mdsal-mapper']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-northbound-mapper</type>
+ <instance>
+ <name>mapper-aggregator</name>
+ <provider>/modules/module[type='netconf-mapper-aggregator'][name='mapper-aggregator']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-mapper-registry</type>
+ <instance>
+ <name>mapper-aggregator-registry</name>
+ <provider>/modules/module[type='netconf-mapper-aggregator'][name='mapper-aggregator']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound">prefix:netconf-server-dispatcher</type>
+ <instance>
+ <name>netconf-mdsal-server-dispatcher</name>
+ <provider>/modules/module[type='netconf-server-dispatcher-impl'][name='netconf-mdsal-server-dispatcher']</provider>
+ </instance>
+ </service>
+ </services>
+
+ </data>
+ </configuration>
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper?module=netconf-mdsal-mapper&revision=2015-01-14</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:monitoring?module=netconf-mdsal-monitoring&revision=2015-02-18</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh?module=netconf-northbound-ssh&revision=2015-01-14</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:netconf:northbound:impl?module=netconf-northbound-impl&revision=2015-01-12</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl:scheduled?module=threadpool-impl-scheduled&revision=2013-12-01</capability>
+ </required-capabilities>
+</snapshot>
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.impl.mapping.operations;
+package org.opendaylight.controller.netconf.monitoring;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import java.util.Map;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-public final class DefaultGetSchema extends AbstractLastNetconfOperation {
+public class GetSchema extends AbstractLastNetconfOperation {
public static final String GET_SCHEMA = "get-schema";
public static final String IDENTIFIER = "identifier";
public static final String VERSION = "version";
- private static final Logger LOG = LoggerFactory.getLogger(DefaultGetSchema.class);
- private final CapabilityProvider cap;
+ private static final Logger LOG = LoggerFactory.getLogger(GetSchema.class);
+ private final NetconfMonitoringService cap;
- public DefaultGetSchema(CapabilityProvider cap, String netconfSessionIdForReporting) {
- super(netconfSessionIdForReporting);
+ public GetSchema(final NetconfMonitoringService cap) {
+ super(MonitoringConstants.MODULE_NAME);
this.cap = cap;
}
}
@Override
- protected Element handleWithNoSubsequentOperations(Document document, XmlElement xml) throws NetconfDocumentedException {
- GetSchemaEntry entry;
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement xml) throws NetconfDocumentedException {
+ final GetSchemaEntry entry;
entry = new GetSchemaEntry(xml);
- String schema;
+ final String schema;
try {
schema = cap.getSchemaForCapability(entry.identifier, entry.version);
- } catch (IllegalStateException e) {
- Map<String, String> errorInfo = Maps.newHashMap();
+ } catch (final IllegalStateException e) {
+ final Map<String, String> errorInfo = Maps.newHashMap();
errorInfo.put(entry.identifier, e.getMessage());
LOG.warn("Rpc error: {}", NetconfDocumentedException.ErrorTag.operation_failed, e);
throw new NetconfDocumentedException(e.getMessage(), NetconfDocumentedException.ErrorType.application,
NetconfDocumentedException.ErrorSeverity.error, errorInfo);
}
- Element getSchemaResult;
+ final Element getSchemaResult;
getSchemaResult = XmlUtil.createTextElement(document, XmlNetconfConstants.DATA_KEY, schema,
Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_YANG_IETF_NETCONF_MONITORING));
LOG.trace("{} operation successful", GET_SCHEMA);
private final String identifier;
private final Optional<String> version;
- GetSchemaEntry(XmlElement getSchemaElement) throws NetconfDocumentedException {
+ GetSchemaEntry(final XmlElement getSchemaElement) throws NetconfDocumentedException {
getSchemaElement.checkName(GET_SCHEMA);
getSchemaElement.checkNamespace(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_YANG_IETF_NETCONF_MONITORING);
XmlElement identifierElement = null;
try {
identifierElement = getSchemaElement.getOnlyChildElementWithSameNamespace(IDENTIFIER);
- } catch (MissingNameSpaceException e) {
+ } catch (final MissingNameSpaceException e) {
LOG.trace("Can't get identifier element as only child element with same namespace due to ",e);
throw NetconfDocumentedException.wrap(e);
}
identifier = identifierElement.getTextContent();
- Optional<XmlElement> versionElement = getSchemaElement
+ final Optional<XmlElement> versionElement = getSchemaElement
.getOnlyChildElementWithSameNamespaceOptionally(VERSION);
if (versionElement.isPresent()) {
version = Optional.of(versionElement.get().getTextContent());
*/
package org.opendaylight.controller.netconf.monitoring.osgi;
+import java.util.Collections;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.osgi.framework.BundleActivator;
if(monitor!=null) {
try {
monitor.close();
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.warn("Ignoring exception while closing {}", monitor, e);
}
}
}
- public static class NetconfMonitoringOperationServiceFactory implements NetconfOperationServiceFactory {
+ public static class NetconfMonitoringOperationServiceFactory implements NetconfOperationServiceFactory, AutoCloseable {
+
private final NetconfMonitoringOperationService operationService;
- public NetconfMonitoringOperationServiceFactory(NetconfMonitoringOperationService operationService) {
+ private static final AutoCloseable AUTO_CLOSEABLE = new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ // NOOP
+ }
+ };
+
+ public NetconfMonitoringOperationServiceFactory(final NetconfMonitoringOperationService operationService) {
this.operationService = operationService;
}
@Override
- public NetconfOperationService createService(String netconfSessionIdForReporting) {
+ public NetconfOperationService createService(final String netconfSessionIdForReporting) {
return operationService;
}
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return Collections.emptySet();
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ return AUTO_CLOSEABLE;
+ }
+
+ @Override
+ public void close() {}
}
}
*/
package org.opendaylight.controller.netconf.monitoring.osgi;
-import com.google.common.base.Optional;
import com.google.common.collect.Sets;
-import java.util.Collection;
-import java.util.Collections;
import java.util.Set;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.monitoring.Get;
-import org.opendaylight.controller.netconf.monitoring.MonitoringConstants;
+import org.opendaylight.controller.netconf.monitoring.GetSchema;
public class NetconfMonitoringOperationService implements NetconfOperationService {
- private static final Set<Capability> CAPABILITIES = Sets.<Capability>newHashSet(new Capability() {
-
- @Override
- public String getCapabilityUri() {
- return MonitoringConstants.URI;
- }
-
- @Override
- public Optional<String> getModuleNamespace() {
- return Optional.of(MonitoringConstants.NAMESPACE);
- }
-
- @Override
- public Optional<String> getModuleName() {
- return Optional.of(MonitoringConstants.MODULE_NAME);
- }
-
- @Override
- public Optional<String> getRevision() {
- return Optional.of(MonitoringConstants.MODULE_REVISION);
- }
-
- @Override
- public Optional<String> getCapabilitySchema() {
- return Optional.absent();
- }
-
- @Override
- public Collection<String> getLocation() {
- return Collections.emptyList();
- }
- });
-
private final NetconfMonitoringService monitor;
public NetconfMonitoringOperationService(final NetconfMonitoringService monitor) {
this.monitor = monitor;
}
- @Override
- public Set<Capability> getCapabilities() {
- return CAPABILITIES;
- }
-
@Override
public Set<NetconfOperation> getNetconfOperations() {
- return Sets.<NetconfOperation>newHashSet(new Get(monitor));
+ return Sets.<NetconfOperation>newHashSet(new Get(monitor), new GetSchema(monitor));
}
@Override
package org.opendaylight.controller.netconf.monitoring.osgi;
import com.google.common.base.Preconditions;
+import java.util.Dictionary;
import java.util.Hashtable;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.api.util.NetconfConstants;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
private static final Logger LOG = LoggerFactory.getLogger(NetconfMonitoringServiceTracker.class);
private ServiceRegistration<NetconfOperationServiceFactory> reg;
+ private NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory factory;
NetconfMonitoringServiceTracker(final BundleContext context) {
super(context, NetconfMonitoringService.class, null);
final NetconfMonitoringOperationService operationService = new NetconfMonitoringOperationService(
netconfMonitoringService);
- final NetconfOperationServiceFactory factory = new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(
+ factory = new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(
operationService);
- reg = context.registerService(NetconfOperationServiceFactory.class, factory, new Hashtable<String, Object>());
+ Dictionary<String, String> properties = new Hashtable<>();
+ properties.put(NetconfConstants.SERVICE_NAME, NetconfConstants.NETCONF_MONITORING);
+ reg = context.registerService(NetconfOperationServiceFactory.class, factory, properties);
return netconfMonitoringService;
}
LOG.warn("Ignoring exception while unregistering {}", reg, e);
}
}
+ if(factory!=null) {
+ factory.close();
+ }
}
}
/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.impl.mapping.operations;
+package org.opendaylight.controller.netconf.monitoring;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.any;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
-import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
-public class DefaultGetSchemaTest {
+public class GetSchemaTest {
- private CapabilityProvider cap;
+
+ private NetconfMonitoringService cap;
private Document doc;
private String getSchema;
@Before
public void setUp() throws Exception {
- cap = mock(CapabilityProvider.class);
+ cap = mock(NetconfMonitoringService.class);
doc = XmlUtil.newDocument();
getSchema = "<get-schema xmlns=\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\">\n" +
" <identifier>threadpool-api</identifier>\n" +
@Test(expected = NetconfDocumentedException.class)
public void testDefaultGetSchema() throws Exception {
- DefaultGetSchema schema = new DefaultGetSchema(cap, "");
+ GetSchema schema = new GetSchema(cap);
doThrow(IllegalStateException.class).when(cap).getSchemaForCapability(anyString(), any(Optional.class));
schema.handleWithNoSubsequentOperations(doc, XmlElement.fromDomElement(XmlUtil.readXmlToElement(getSchema)));
}
@Test
public void handleWithNoSubsequentOperations() throws Exception {
- DefaultGetSchema schema = new DefaultGetSchema(cap, "");
+ GetSchema schema = new GetSchema(cap);
doReturn("").when(cap).getSchemaForCapability(anyString(), any(Optional.class));
assertNotNull(schema.handleWithNoSubsequentOperations(doc, XmlElement.fromDomElement(XmlUtil.readXmlToElement(getSchema))));
}
-}
+
+}
\ No newline at end of file
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
-import com.google.common.base.Optional;
-import java.util.Collections;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
-import org.opendaylight.controller.netconf.monitoring.MonitoringConstants;
public class NetconfMonitoringOperationServiceTest {
@Test
public void testGetters() throws Exception {
NetconfMonitoringService monitor = mock(NetconfMonitoringService.class);
NetconfMonitoringOperationService service = new NetconfMonitoringOperationService(monitor);
+ NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory serviceFactory = new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(service);
- assertEquals(1, service.getNetconfOperations().size());
+ assertEquals(2, service.getNetconfOperations().size());
- assertEquals(Optional.<String>absent(), service.getCapabilities().iterator().next().getCapabilitySchema());
- assertEquals(Collections.<String>emptyList(), service.getCapabilities().iterator().next().getLocation());
- assertEquals(Optional.of(MonitoringConstants.MODULE_REVISION), service.getCapabilities().iterator().next().getRevision());
- assertEquals(Optional.of(MonitoringConstants.MODULE_NAME), service.getCapabilities().iterator().next().getModuleName());
- assertEquals(Optional.of(MonitoringConstants.NAMESPACE), service.getCapabilities().iterator().next().getModuleNamespace());
- assertEquals(MonitoringConstants.URI, service.getCapabilities().iterator().next().getCapabilityUri());
}
}
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
+import com.google.common.base.Optional;
import com.google.common.collect.Lists;
+import java.util.Set;
import org.hamcrest.CoreMatchers;
import org.junit.Test;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.monitoring.xml.model.NetconfState;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.NetconfSsh;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Transport;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Capabilities;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Schemas;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.SchemasBuilder;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.Sessions;
final NetconfMonitoringService service = new NetconfMonitoringService() {
+ @Override
+ public void onSessionUp(final NetconfManagementSession session) {
+
+ }
+
+ @Override
+ public void onSessionDown(final NetconfManagementSession session) {
+
+ }
+
+ @Override
+ public void onCapabilitiesAdded(final Set<Capability> addedCaps) {
+
+ }
+
+ @Override
+ public void onCapabilitiesRemoved(final Set<Capability> addedCaps) {
+
+ }
+
@Override
public Sessions getSessions() {
return new SessionsBuilder().setSession(Lists.newArrayList(getMockSession(NetconfTcp.class), getMockSession(NetconfSsh.class))).build();
public Schemas getSchemas() {
return new SchemasBuilder().setSchema(Lists.newArrayList(getMockSchema("id", "v1", Yang.class), getMockSchema("id2", "", Yang.class))).build();
}
+
+ @Override
+ public String getSchemaForCapability(final String moduleName, final Optional<String> revision) {
+ return null;
+ }
+
+ @Override
+ public Capabilities getCapabilities() {
+ return null;
+ }
+
+ @Override
+ public AutoCloseable registerListener(final MonitoringListener listener) {
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ // NOOP
+ }
+ };
+ }
};
final NetconfState model = new NetconfState(service);
final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)).replaceAll("\\s", "");
private static final Logger LOG = LoggerFactory.getLogger(NetconfMessageToEXIEncoder.class);
/**
* This class is not marked as shared, so it can be attached to only a single channel,
- * which means that {@link #encode(ChannelHandlerContext, NetconfMessage, ByteBuf)}
+ * which means that {@link #encode(io.netty.channel.ChannelHandlerContext, org.opendaylight.controller.netconf.api.NetconfMessage, io.netty.buffer.ByteBuf)}
* cannot be invoked concurrently. Hence we can reuse the transmogrifier.
*/
- private final Transmogrifier transmogrifier;
+ private final NetconfEXICodec codec;
- private NetconfMessageToEXIEncoder(final Transmogrifier transmogrifier) {
- this.transmogrifier = Preconditions.checkNotNull(transmogrifier);
+ private NetconfMessageToEXIEncoder(final NetconfEXICodec codec) {
+ this.codec = Preconditions.checkNotNull(codec);
}
public static NetconfMessageToEXIEncoder create(final NetconfEXICodec codec) throws EXIOptionsException, TransmogrifierException {
- return new NetconfMessageToEXIEncoder(codec.getTransmogrifier());
+ return new NetconfMessageToEXIEncoder(codec);
}
@Override
protected void encode(final ChannelHandlerContext ctx, final NetconfMessage msg, final ByteBuf out) throws EXIOptionsException, IOException, TransformerException, TransmogrifierException {
LOG.trace("Sent to encode : {}", msg);
+ // TODO Workaround for bug 2679, recreate transmogrifier every time
+ // If the transmogrifier is reused, encoded xml can become non valid according to EXI decoder
+ // Seems like a bug in the nagasena library (try newer version of the library or fix the bug inside of it)
+ // Related bugs 2459: reuse nagasena resources, 2458: upgrade nagasena to newest version
+ final Transmogrifier transmogrifier = codec.getTransmogrifier();
+
try (final OutputStream os = new ByteBufOutputStream(out)) {
transmogrifier.setOutputStream(os);
final ContentHandler handler = transmogrifier.getSAXTransmogrifier();
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>netconf-subsystem</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <packaging>bundle</packaging>
+ <artifactId>netconf-notifications-api</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-notifications</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.controller.netconf.notifications.*</Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+
+
+/**
+ * Listener for base netconf notifications defined in https://tools.ietf.org/html/rfc6470.
+ * This listener uses generated classes from yang model defined in RFC6470.
+ * It alleviates the provisioning of base netconf notifications from the code.
+ */
+public interface BaseNetconfNotificationListener {
+
+ /**
+ * Callback used to notify about a change in used capabilities
+ */
+ void onCapabilityChanged(NetconfCapabilityChange capabilityChange);
+
+ // TODO add other base notifications
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Registration for base notification publisher. This registration allows for publishing of base netconf notifications using generated classes
+ */
+public interface BaseNotificationPublisherRegistration extends NotificationRegistration, BaseNetconfNotificationListener {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import com.google.common.base.Preconditions;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Special kind of netconf message that contains a timestamp.
+ */
+public final class NetconfNotification extends NetconfMessage {
+
+ public static final String NOTIFICATION = "notification";
+ public static final String NOTIFICATION_NAMESPACE = "urn:ietf:params:netconf:capability:notification:1.0";
+ public static final String RFC3339_DATE_FORMAT_BLUEPRINT = "yyyy-MM-dd'T'HH:mm:ssXXX";
+ public static final String EVENT_TIME = "eventTime";
+
+ /**
+ * Create new notification and capture the timestamp in the constructor
+ */
+ public NetconfNotification(final Document notificationContent) {
+ this(notificationContent, new Date());
+ }
+
+ /**
+ * Create new notification with provided timestamp
+ */
+ public NetconfNotification(final Document notificationContent, final Date eventTime) {
+ super(wrapNotification(notificationContent, eventTime));
+ }
+
+ private static Document wrapNotification(final Document notificationContent, final Date eventTime) {
+ Preconditions.checkNotNull(notificationContent);
+ Preconditions.checkNotNull(eventTime);
+
+ final Element baseNotification = notificationContent.getDocumentElement();
+ final Element entireNotification = notificationContent.createElementNS(NOTIFICATION_NAMESPACE, NOTIFICATION);
+ entireNotification.appendChild(baseNotification);
+
+ final Element eventTimeElement = notificationContent.createElementNS(NOTIFICATION_NAMESPACE, EVENT_TIME);
+ eventTimeElement.setTextContent(getSerializedEventTime(eventTime));
+ entireNotification.appendChild(eventTimeElement);
+
+ notificationContent.appendChild(entireNotification);
+ return notificationContent;
+ }
+
+ private static String getSerializedEventTime(final Date eventTime) {
+ // SimpleDateFormat is not threadsafe, cannot be in a constant
+ return new SimpleDateFormat(RFC3339_DATE_FORMAT_BLUEPRINT).format(eventTime);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.Stream;
+
+/**
+ * Collector of all notifications. Base or generic
+ */
+public interface NetconfNotificationCollector {
+
+ /**
+ * Add notification publisher for a particular stream
+ *
+ * Implementations should allow for multiple publishers of a single stream
+ * and its up to implementations to decide how to merge metadata (e.g. description)
+ * for the same stream when providing information about available stream
+ *
+ */
+ NotificationPublisherRegistration registerNotificationPublisher(Stream stream);
+
+ /**
+ * Register base notification publisher
+ */
+ BaseNotificationPublisherRegistration registerBaseNotificationPublisher();
+
+ /**
+ * Users of the registry have an option to get notification each time new notification stream gets registered
+ * This allows for a push model in addition to pull model for retrieving information about available streams.
+ *
+ * The listener should receive callbacks for each stream available prior to the registration when its registered
+ */
+ NotificationRegistration registerStreamListener(NetconfNotificationStreamListener listener);
+
+ /**
+ * Simple listener that receives notifications about changes in stream availability
+ */
+ public interface NetconfNotificationStreamListener {
+
+ /**
+ * Stream becomes available in the collector (first publisher is registered)
+ */
+ void onStreamRegistered(Stream stream);
+
+ /**
+ * Stream is not available anymore in the collector (last publisher is unregistered)
+ */
+ void onStreamUnregistered(StreamNameType stream);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+
+/**
+ * Generic listener for netconf notifications
+ */
+public interface NetconfNotificationListener {
+
+ /**
+ * Callback used to notify the listener about any new notification
+ */
+ void onNotification(StreamNameType stream, NetconfNotification notification);
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+
+/**
+ *
+ */
+public interface NetconfNotificationRegistry {
+
+ /**
+ * Add listener for a certain notification type
+ */
+ NotificationListenerRegistration registerNotificationListener(StreamNameType stream, NetconfNotificationListener listener);
+
+ /**
+ * Check stream availability
+ */
+ boolean isStreamAvailable(StreamNameType streamNameType);
+
+ /**
+ * Get all the streams available
+ */
+ Streams getNotificationPublishers();
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Manages the registration of a single listener
+ */
+public interface NotificationListenerRegistration extends NotificationRegistration {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Registration for notification publisher. This registration allows for publishing any netconf notifications
+ */
+public interface NotificationPublisherRegistration extends NetconfNotificationListener, NotificationRegistration {
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications;
+
+/**
+ * Generic registration, used as a base for other registration types
+ */
+public interface NotificationRegistration extends AutoCloseable {
+
+ // Overriden close does not throw any kind of checked exception
+
+ /**
+ * Close the registration.
+ */
+ @Override
+ void close();
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ~ Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>netconf-subsystem</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <packaging>bundle</packaging>
+ <artifactId>netconf-notifications-impl</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-notifications-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>binding-generator-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>binding-data-codec</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>xmlunit</groupId>
+ <artifactId>xmlunit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Bundle-Activator>org.opendaylight.controller.netconf.notifications.impl.osgi.Activator</Bundle-Activator>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.HashMultiset;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Multiset;
+import com.google.common.collect.Sets;
+import java.util.Map;
+import java.util.Set;
+import javax.annotation.concurrent.GuardedBy;
+import javax.annotation.concurrent.ThreadSafe;
+import org.opendaylight.controller.netconf.notifications.BaseNotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.controller.netconf.notifications.NotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NotificationRegistration;
+import org.opendaylight.controller.netconf.notifications.impl.ops.NotificationsTransformUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.StreamsBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.Stream;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamKey;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@ThreadSafe
+public class NetconfNotificationManager implements NetconfNotificationCollector, NetconfNotificationRegistry, NetconfNotificationListener, AutoCloseable {
+
+ public static final StreamNameType BASE_STREAM_NAME = new StreamNameType("NETCONF");
+ public static final Stream BASE_NETCONF_STREAM;
+
+ static {
+ BASE_NETCONF_STREAM = new StreamBuilder()
+ .setName(BASE_STREAM_NAME)
+ .setKey(new StreamKey(BASE_STREAM_NAME))
+ .setReplaySupport(false)
+ .setDescription("Default Event Stream")
+ .build();
+ }
+
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfNotificationManager.class);
+
+ // TODO excessive synchronization provides thread safety but is most likely not optimal (combination of concurrent collections might improve performance)
+ // And also calling callbacks from a synchronized block is dangerous since the listeners/publishers can block the whole notification processing
+
+ @GuardedBy("this")
+ private final Multimap<StreamNameType, GenericNotificationListenerReg> notificationListeners = HashMultimap.create();
+
+ @GuardedBy("this")
+ private final Set<NetconfNotificationStreamListener> streamListeners = Sets.newHashSet();
+
+ @GuardedBy("this")
+ private final Map<StreamNameType, Stream> streamMetadata = Maps.newHashMap();
+
+ @GuardedBy("this")
+ private final Multiset<StreamNameType> availableStreams = HashMultiset.create();
+
+ @GuardedBy("this")
+ private final Set<GenericNotificationPublisherReg> notificationPublishers = Sets.newHashSet();
+
+ @Override
+ public synchronized void onNotification(final StreamNameType stream, final NetconfNotification notification) {
+ LOG.debug("Notification of type {} detected", stream);
+ if(LOG.isTraceEnabled()) {
+ LOG.debug("Notification of type {} detected: {}", stream, notification);
+ }
+
+ for (final GenericNotificationListenerReg listenerReg : notificationListeners.get(BASE_STREAM_NAME)) {
+ listenerReg.getListener().onNotification(BASE_STREAM_NAME, notification);
+ }
+ }
+
+ @Override
+ public synchronized NotificationListenerRegistration registerNotificationListener(final StreamNameType stream, final NetconfNotificationListener listener) {
+ Preconditions.checkNotNull(stream);
+ Preconditions.checkNotNull(listener);
+
+ LOG.trace("Notification listener registered for stream: {}", stream);
+
+ final GenericNotificationListenerReg genericNotificationListenerReg = new GenericNotificationListenerReg(listener) {
+ @Override
+ public void close() {
+ synchronized (NetconfNotificationManager.this) {
+ LOG.trace("Notification listener unregistered for stream: {}", stream);
+ super.close();
+ }
+ }
+ };
+
+ notificationListeners.put(BASE_STREAM_NAME, genericNotificationListenerReg);
+ return genericNotificationListenerReg;
+ }
+
+ @Override
+ public synchronized Streams getNotificationPublishers() {
+ return new StreamsBuilder().setStream(Lists.newArrayList(streamMetadata.values())).build();
+ }
+
+ @Override
+ public synchronized boolean isStreamAvailable(final StreamNameType streamNameType) {
+ return availableStreams.contains(streamNameType);
+ }
+
+ @Override
+ public synchronized NotificationRegistration registerStreamListener(final NetconfNotificationStreamListener listener) {
+ streamListeners.add(listener);
+
+ // Notify about all already available
+ for (final Stream availableStream : streamMetadata.values()) {
+ listener.onStreamRegistered(availableStream);
+ }
+
+ return new NotificationRegistration() {
+ @Override
+ public void close() {
+ synchronized(NetconfNotificationManager.this) {
+ streamListeners.remove(listener);
+ }
+ }
+ };
+ }
+
+ @Override
+ public synchronized void close() {
+ // Unregister all listeners
+ for (final GenericNotificationListenerReg genericNotificationListenerReg : notificationListeners.values()) {
+ genericNotificationListenerReg.close();
+ }
+ notificationListeners.clear();
+
+ // Unregister all publishers
+ for (final GenericNotificationPublisherReg notificationPublisher : notificationPublishers) {
+ notificationPublisher.close();
+ }
+ notificationPublishers.clear();
+
+ // Clear stream Listeners
+ streamListeners.clear();
+ }
+
+ @Override
+ public synchronized NotificationPublisherRegistration registerNotificationPublisher(final Stream stream) {
+ Preconditions.checkNotNull(stream);
+ final StreamNameType streamName = stream.getName();
+
+ LOG.debug("Notification publisher registered for stream: {}", streamName);
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Notification publisher registered for stream: {}", stream);
+ }
+
+ if(streamMetadata.containsKey(streamName)) {
+ LOG.warn("Notification stream {} already registered as: {}. Will be reused", streamName, streamMetadata.get(streamName));
+ } else {
+ streamMetadata.put(streamName, stream);
+ }
+
+ availableStreams.add(streamName);
+
+ final GenericNotificationPublisherReg genericNotificationPublisherReg = new GenericNotificationPublisherReg(this, streamName) {
+ @Override
+ public void close() {
+ synchronized (NetconfNotificationManager.this) {
+ super.close();
+ }
+ }
+ };
+
+ notificationPublishers.add(genericNotificationPublisherReg);
+
+ notifyStreamAdded(stream);
+ return genericNotificationPublisherReg;
+ }
+
+ private void unregisterNotificationPublisher(final StreamNameType streamName, final GenericNotificationPublisherReg genericNotificationPublisherReg) {
+ availableStreams.remove(streamName);
+ notificationPublishers.remove(genericNotificationPublisherReg);
+
+ LOG.debug("Notification publisher unregistered for stream: {}", streamName);
+
+ // Notify stream listeners if all publishers are gone and also clear metadata for stream
+ if (!isStreamAvailable(streamName)) {
+ LOG.debug("Notification stream: {} became unavailable", streamName);
+ streamMetadata.remove(streamName);
+ notifyStreamRemoved(streamName);
+ }
+ }
+
+ private synchronized void notifyStreamAdded(final Stream stream) {
+ for (final NetconfNotificationStreamListener streamListener : streamListeners) {
+ streamListener.onStreamRegistered(stream);
+ }
+ }
+ private synchronized void notifyStreamRemoved(final StreamNameType stream) {
+ for (final NetconfNotificationStreamListener streamListener : streamListeners) {
+ streamListener.onStreamUnregistered(stream);
+ }
+ }
+
+ @Override
+ public BaseNotificationPublisherRegistration registerBaseNotificationPublisher() {
+ final NotificationPublisherRegistration notificationPublisherRegistration = registerNotificationPublisher(BASE_NETCONF_STREAM);
+ return new BaseNotificationPublisherReg(notificationPublisherRegistration);
+ }
+
+ private static class GenericNotificationPublisherReg implements NotificationPublisherRegistration {
+ private NetconfNotificationManager baseListener;
+ private final StreamNameType registeredStream;
+
+ public GenericNotificationPublisherReg(final NetconfNotificationManager baseListener, final StreamNameType registeredStream) {
+ this.baseListener = baseListener;
+ this.registeredStream = registeredStream;
+ }
+
+ @Override
+ public void close() {
+ baseListener.unregisterNotificationPublisher(registeredStream, this);
+ baseListener = null;
+ }
+
+ @Override
+ public void onNotification(final StreamNameType stream, final NetconfNotification notification) {
+ Preconditions.checkState(baseListener != null, "Already closed");
+ Preconditions.checkArgument(stream.equals(registeredStream));
+ baseListener.onNotification(stream, notification);
+ }
+ }
+
+ private static class BaseNotificationPublisherReg implements BaseNotificationPublisherRegistration {
+
+ private final NotificationPublisherRegistration baseRegistration;
+
+ public BaseNotificationPublisherReg(final NotificationPublisherRegistration baseRegistration) {
+ this.baseRegistration = baseRegistration;
+ }
+
+ @Override
+ public void close() {
+ baseRegistration.close();
+ }
+
+ @Override
+ public void onCapabilityChanged(final NetconfCapabilityChange capabilityChange) {
+ baseRegistration.onNotification(BASE_STREAM_NAME, serializeNotification(capabilityChange));
+ }
+
+ private static NetconfNotification serializeNotification(final NetconfCapabilityChange capabilityChange) {
+ return NotificationsTransformUtil.transform(capabilityChange);
+ }
+ }
+
+ private class GenericNotificationListenerReg implements NotificationListenerRegistration {
+ private final NetconfNotificationListener listener;
+
+ public GenericNotificationListenerReg(final NetconfNotificationListener listener) {
+ this.listener = listener;
+ }
+
+ public NetconfNotificationListener getListener() {
+ return listener;
+ }
+
+ @Override
+ public void close() {
+ notificationListeners.remove(BASE_STREAM_NAME, this);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import java.util.List;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.SessionAwareNetconfOperation;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.controller.netconf.notifications.impl.NetconfNotificationManager;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.CreateSubscriptionInput;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Create subscription listens for create subscription requests and registers notification listeners into notification registry.
+ * Received notifications are sent to the client right away
+ */
+public class CreateSubscription extends AbstractLastNetconfOperation implements SessionAwareNetconfOperation, AutoCloseable {
+
+ private static final Logger LOG = LoggerFactory.getLogger(CreateSubscription.class);
+
+ static final String CREATE_SUBSCRIPTION = "create-subscription";
+
+ private final NetconfNotificationRegistry notifications;
+ private final List<NotificationListenerRegistration> subscriptions = Lists.newArrayList();
+ private NetconfSession netconfSession;
+
+ public CreateSubscription(final String netconfSessionIdForReporting, final NetconfNotificationRegistry notifications) {
+ super(netconfSessionIdForReporting);
+ this.notifications = notifications;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ operationElement.checkName(CREATE_SUBSCRIPTION);
+ operationElement.checkNamespace(CreateSubscriptionInput.QNAME.getNamespace().toString());
+ // FIXME reimplement using CODEC_REGISTRY and parse everything into generated class instance
+ // Waiting ofr https://git.opendaylight.org/gerrit/#/c/13763/
+
+ // FIXME filter could be supported same way as netconf server filters get and get-config results
+ final Optional<XmlElement> filter = operationElement.getOnlyChildElementWithSameNamespaceOptionally("filter");
+ Preconditions.checkArgument(filter.isPresent() == false, "Filter element not yet supported");
+
+ // Replay not supported
+ final Optional<XmlElement> startTime = operationElement.getOnlyChildElementWithSameNamespaceOptionally("startTime");
+ Preconditions.checkArgument(startTime.isPresent() == false, "StartTime element not yet supported");
+
+ // Stop time not supported
+ final Optional<XmlElement> stopTime = operationElement.getOnlyChildElementWithSameNamespaceOptionally("stopTime");
+ Preconditions.checkArgument(stopTime.isPresent() == false, "StopTime element not yet supported");
+
+ final StreamNameType streamNameType = parseStreamIfPresent(operationElement);
+
+ Preconditions.checkNotNull(netconfSession);
+ // Premature streams are allowed (meaning listener can register even if no provider is available yet)
+ if(notifications.isStreamAvailable(streamNameType) == false) {
+ LOG.warn("Registering premature stream {}. No publisher available yet for session {}", streamNameType, getNetconfSessionIdForReporting());
+ }
+
+ final NotificationListenerRegistration notificationListenerRegistration =
+ notifications.registerNotificationListener(streamNameType, new NotificationSubscription(netconfSession));
+ subscriptions.add(notificationListenerRegistration);
+
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ private StreamNameType parseStreamIfPresent(final XmlElement operationElement) throws NetconfDocumentedException {
+ final Optional<XmlElement> stream = operationElement.getOnlyChildElementWithSameNamespaceOptionally("stream");
+ return stream.isPresent() ? new StreamNameType(stream.get().getTextContent()) : NetconfNotificationManager.BASE_STREAM_NAME;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return CREATE_SUBSCRIPTION;
+ }
+
+ @Override
+ protected String getOperationNamespace() {
+ return CreateSubscriptionInput.QNAME.getNamespace().toString();
+ }
+
+ @Override
+ public void setSession(final NetconfSession session) {
+ this.netconfSession = session;
+ }
+
+ @Override
+ public void close() {
+ netconfSession = null;
+ // Unregister from notification streams
+ for (final NotificationListenerRegistration subscription : subscriptions) {
+ subscription.close();
+ }
+ }
+
+ private static class NotificationSubscription implements NetconfNotificationListener {
+ private final NetconfSession currentSession;
+
+ public NotificationSubscription(final NetconfSession currentSession) {
+ this.currentSession = currentSession;
+ }
+
+ @Override
+ public void onNotification(final StreamNameType stream, final NetconfNotification notification) {
+ currentSession.sendMessage(notification);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.util.mapping.AbstractNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.Netconf;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.NetconfBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Serialize the subtree for netconf notifications into the response of get rpc.
+ * This operation just adds its subtree into the common response of get rpc.
+ */
+public class Get extends AbstractNetconfOperation implements AutoCloseable {
+
+ private static final String GET = "get";
+ private static final InstanceIdentifier<Netconf> NETCONF_SUBTREE_INSTANCE_IDENTIFIER = InstanceIdentifier.builder(Netconf.class).build();
+
+ private final NetconfNotificationRegistry notificationRegistry;
+
+ public Get(final String netconfSessionIdForReporting, final NetconfNotificationRegistry notificationRegistry) {
+ super(netconfSessionIdForReporting);
+ Preconditions.checkNotNull(notificationRegistry);
+ this.notificationRegistry = notificationRegistry;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return GET;
+ }
+
+ @Override
+ public Document handle(final Document requestMessage, final NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ final Document partialResponse = subsequentOperation.execute(requestMessage);
+ final Streams availableStreams = notificationRegistry.getNotificationPublishers();
+ if(availableStreams.getStream().isEmpty() == false) {
+ serializeStreamsSubtree(partialResponse, availableStreams);
+ }
+ return partialResponse;
+ }
+
+ static void serializeStreamsSubtree(final Document partialResponse, final Streams availableStreams) throws NetconfDocumentedException {
+ final Netconf netconfSubtree = new NetconfBuilder().setStreams(availableStreams).build();
+ final NormalizedNode<?, ?> normalized = toNormalized(netconfSubtree);
+
+ final DOMResult result = new DOMResult(getPlaceholder(partialResponse));
+
+ try {
+ NotificationsTransformUtil.writeNormalizedNode(normalized, result, SchemaPath.ROOT);
+ } catch (final XMLStreamException | IOException e) {
+ throw new IllegalStateException("Unable to serialize " + netconfSubtree, e);
+ }
+ }
+
+ private static Element getPlaceholder(final Document innerResult)
+ throws NetconfDocumentedException {
+ final XmlElement rootElement = XmlElement.fromDomElementWithExpected(
+ innerResult.getDocumentElement(), XmlNetconfConstants.RPC_REPLY_KEY, XmlNetconfConstants.RFC4741_TARGET_NAMESPACE);
+ return rootElement.getOnlyChildElement(XmlNetconfConstants.DATA_KEY).getDomElement();
+ }
+
+ private static NormalizedNode<?, ?> toNormalized(final Netconf netconfSubtree) {
+ return NotificationsTransformUtil.CODEC_REGISTRY.toNormalizedNode(NETCONF_SUBTREE_INSTANCE_IDENTIFIER, netconfSubtree).getValue();
+ }
+
+ @Override
+ protected Element handle(final Document document, final XmlElement message, final NetconfOperationChainedExecution subsequentOperation)
+ throws NetconfDocumentedException {
+ throw new UnsupportedOperationException("Never gets called");
+ }
+
+ @Override
+ protected HandlingPriority getHandlingPriority() {
+ return HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY.increasePriority(2);
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Date;
+import javassist.ClassPool;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.$YangModuleInfoImpl;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
+import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
+import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.sal.binding.generator.util.BindingRuntimeContext;
+import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+
+public final class NotificationsTransformUtil {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NotificationsTransformUtil.class);
+
+ private NotificationsTransformUtil() {}
+
+ static final SchemaContext NOTIFICATIONS_SCHEMA_CTX;
+ static final BindingNormalizedNodeCodecRegistry CODEC_REGISTRY;
+ static final XMLOutputFactory XML_FACTORY;
+ static final RpcDefinition CREATE_SUBSCRIPTION_RPC;
+
+ static final SchemaPath CAPABILITY_CHANGE_SCHEMA_PATH = SchemaPath.create(true, NetconfCapabilityChange.QNAME);
+
+ static {
+ XML_FACTORY = XMLOutputFactory.newFactory();
+ XML_FACTORY.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
+
+ final ModuleInfoBackedContext moduleInfoBackedContext = ModuleInfoBackedContext.create();
+ moduleInfoBackedContext.addModuleInfos(Collections.singletonList($YangModuleInfoImpl.getInstance()));
+ moduleInfoBackedContext.addModuleInfos(Collections.singletonList(org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.$YangModuleInfoImpl.getInstance()));
+ final Optional<SchemaContext> schemaContextOptional = moduleInfoBackedContext.tryToCreateSchemaContext();
+ Preconditions.checkState(schemaContextOptional.isPresent());
+ NOTIFICATIONS_SCHEMA_CTX = schemaContextOptional.get();
+
+ CREATE_SUBSCRIPTION_RPC = Preconditions.checkNotNull(findCreateSubscriptionRpc());
+
+ Preconditions.checkNotNull(CREATE_SUBSCRIPTION_RPC);
+
+ final JavassistUtils javassist = JavassistUtils.forClassPool(ClassPool.getDefault());
+ CODEC_REGISTRY = new BindingNormalizedNodeCodecRegistry(StreamWriterGenerator.create(javassist));
+ CODEC_REGISTRY.onBindingRuntimeContextUpdated(BindingRuntimeContext.create(moduleInfoBackedContext, NOTIFICATIONS_SCHEMA_CTX));
+ }
+
+ private static RpcDefinition findCreateSubscriptionRpc() {
+ return Iterables.getFirst(Collections2.filter(NOTIFICATIONS_SCHEMA_CTX.getOperations(), new Predicate<RpcDefinition>() {
+ @Override
+ public boolean apply(final RpcDefinition input) {
+ return input.getQName().getLocalName().equals(CreateSubscription.CREATE_SUBSCRIPTION);
+ }
+ }), null);
+ }
+
+ /**
+ * Transform base notification for capabilities into NetconfNotification
+ */
+ public static NetconfNotification transform(final NetconfCapabilityChange capabilityChange) {
+ return transform(capabilityChange, Optional.<Date>absent());
+ }
+
+ public static NetconfNotification transform(final NetconfCapabilityChange capabilityChange, final Date eventTime) {
+ return transform(capabilityChange, Optional.fromNullable(eventTime));
+ }
+
+ private static NetconfNotification transform(final NetconfCapabilityChange capabilityChange, final Optional<Date> eventTime) {
+ final ContainerNode containerNode = CODEC_REGISTRY.toNormalizedNodeNotification(capabilityChange);
+ final DOMResult result = new DOMResult(XmlUtil.newDocument());
+ try {
+ writeNormalizedNode(containerNode, result, CAPABILITY_CHANGE_SCHEMA_PATH);
+ } catch (final XMLStreamException| IOException e) {
+ throw new IllegalStateException("Unable to serialize " + capabilityChange, e);
+ }
+ final Document node = (Document) result.getNode();
+ return eventTime.isPresent() ?
+ new NetconfNotification(node, eventTime.get()):
+ new NetconfNotification(node);
+ }
+
+ static void writeNormalizedNode(final NormalizedNode<?, ?> normalized, final DOMResult result, final SchemaPath schemaPath) throws IOException, XMLStreamException {
+ NormalizedNodeWriter normalizedNodeWriter = null;
+ NormalizedNodeStreamWriter normalizedNodeStreamWriter = null;
+ XMLStreamWriter writer = null;
+ try {
+ writer = XML_FACTORY.createXMLStreamWriter(result);
+ normalizedNodeStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(writer, NOTIFICATIONS_SCHEMA_CTX, schemaPath);
+ normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(normalizedNodeStreamWriter);
+
+ normalizedNodeWriter.write(normalized);
+
+ normalizedNodeWriter.flush();
+ } finally {
+ try {
+ if(normalizedNodeWriter != null) {
+ normalizedNodeWriter.close();
+ }
+ if(normalizedNodeStreamWriter != null) {
+ normalizedNodeStreamWriter.close();
+ }
+ if(writer != null) {
+ writer.close();
+ }
+ } catch (final Exception e) {
+ LOG.warn("Unable to close resource properly", e);
+ }
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.osgi;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Dictionary;
+import java.util.Hashtable;
+import java.util.Set;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.api.util.NetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.controller.netconf.notifications.impl.NetconfNotificationManager;
+import org.opendaylight.controller.netconf.notifications.impl.ops.CreateSubscription;
+import org.opendaylight.controller.netconf.notifications.impl.ops.Get;
+import org.osgi.framework.BundleActivator;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceRegistration;
+
+public class Activator implements BundleActivator {
+
+ private ServiceRegistration<NetconfNotificationCollector> netconfNotificationCollectorServiceRegistration;
+ private ServiceRegistration<NetconfOperationServiceFactory> operationaServiceRegistration;
+ private NetconfNotificationManager netconfNotificationManager;
+
+ @Override
+ public void start(final BundleContext context) throws Exception {
+ netconfNotificationManager = new NetconfNotificationManager();
+ netconfNotificationCollectorServiceRegistration = context.registerService(NetconfNotificationCollector.class, netconfNotificationManager, new Hashtable<String, Object>());
+
+ final NetconfOperationServiceFactory netconfOperationServiceFactory = new NetconfOperationServiceFactory() {
+
+ private final Set<Capability> capabilities = Collections.<Capability>singleton(new NotificationsCapability());
+
+ @Override
+ public Set<Capability> getCapabilities() {
+ return capabilities;
+ }
+
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ listener.onCapabilitiesAdded(capabilities);
+ return new AutoCloseable() {
+ @Override
+ public void close() {
+ listener.onCapabilitiesRemoved(capabilities);
+ }
+ };
+ }
+
+ @Override
+ public NetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return new NetconfOperationService() {
+
+ private final CreateSubscription createSubscription = new CreateSubscription(netconfSessionIdForReporting, netconfNotificationManager);
+
+
+ @Override
+ public Set<NetconfOperation> getNetconfOperations() {
+ return Sets.<NetconfOperation>newHashSet(
+ new Get(netconfSessionIdForReporting, netconfNotificationManager),
+ createSubscription);
+ }
+
+ @Override
+ public void close() {
+ createSubscription.close();
+ }
+ };
+ }
+ };
+
+ final Dictionary<String, String> properties = new Hashtable<>();
+ properties.put(NetconfConstants.SERVICE_NAME, NetconfConstants.NETCONF_MONITORING);
+ operationaServiceRegistration = context.registerService(NetconfOperationServiceFactory.class, netconfOperationServiceFactory, properties);
+
+ }
+
+ @Override
+ public void stop(final BundleContext context) throws Exception {
+ if(netconfNotificationCollectorServiceRegistration != null) {
+ netconfNotificationCollectorServiceRegistration.unregister();
+ netconfNotificationCollectorServiceRegistration = null;
+ }
+ if (netconfNotificationManager != null) {
+ netconfNotificationManager.close();
+ }
+ if (operationaServiceRegistration != null) {
+ operationaServiceRegistration.unregister();
+ operationaServiceRegistration = null;
+ }
+ }
+
+ private class NotificationsCapability implements Capability {
+ @Override
+ public String getCapabilityUri() {
+ return NetconfNotification.NOTIFICATION_NAMESPACE;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public Collection<String> getLocation() {
+ return Collections.emptyList();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.notifications.BaseNotificationPublisherRegistration;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationCollector;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.Stream;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChangeBuilder;
+
+public class NetconfNotificationManagerTest {
+
+ @Mock
+ private NetconfNotificationRegistry notificationRegistry;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testNotificationListeners() throws Exception {
+ final NetconfNotificationManager netconfNotificationManager = new NetconfNotificationManager();
+ final BaseNotificationPublisherRegistration baseNotificationPublisherRegistration =
+ netconfNotificationManager.registerBaseNotificationPublisher();
+
+ final NetconfCapabilityChangeBuilder capabilityChangedBuilder = new NetconfCapabilityChangeBuilder();
+
+ final NetconfNotificationListener listener = mock(NetconfNotificationListener.class);
+ doNothing().when(listener).onNotification(any(StreamNameType.class), any(NetconfNotification.class));
+ final NotificationListenerRegistration notificationListenerRegistration = netconfNotificationManager.registerNotificationListener(NetconfNotificationManager.BASE_NETCONF_STREAM.getName(), listener);
+ final NetconfCapabilityChange notification = capabilityChangedBuilder.build();
+ baseNotificationPublisherRegistration.onCapabilityChanged(notification);
+
+ verify(listener).onNotification(any(StreamNameType.class), any(NetconfNotification.class));
+
+ notificationListenerRegistration.close();
+
+ baseNotificationPublisherRegistration.onCapabilityChanged(notification);
+ verifyNoMoreInteractions(listener);
+ }
+
+ @Test
+ public void testClose() throws Exception {
+ final NetconfNotificationManager netconfNotificationManager = new NetconfNotificationManager();
+
+ final BaseNotificationPublisherRegistration baseNotificationPublisherRegistration = netconfNotificationManager.registerBaseNotificationPublisher();
+
+ final NetconfNotificationListener listener = mock(NetconfNotificationListener.class);
+ doNothing().when(listener).onNotification(any(StreamNameType.class), any(NetconfNotification.class));
+
+ netconfNotificationManager.registerNotificationListener(NetconfNotificationManager.BASE_NETCONF_STREAM.getName(), listener);
+
+ final NetconfNotificationCollector.NetconfNotificationStreamListener streamListener =
+ mock(NetconfNotificationCollector.NetconfNotificationStreamListener.class);
+ doNothing().when(streamListener).onStreamUnregistered(any(StreamNameType.class));
+ doNothing().when(streamListener).onStreamRegistered(any(Stream.class));
+ netconfNotificationManager.registerStreamListener(streamListener);
+
+ verify(streamListener).onStreamRegistered(NetconfNotificationManager.BASE_NETCONF_STREAM);
+
+ netconfNotificationManager.close();
+
+ verify(streamListener).onStreamUnregistered(NetconfNotificationManager.BASE_NETCONF_STREAM.getName());
+
+ try {
+ baseNotificationPublisherRegistration.onCapabilityChanged(new NetconfCapabilityChangeBuilder().build());
+ } catch (final IllegalStateException e) {
+ // Exception should be thrown after manager is closed
+ return;
+ }
+
+ fail("Publishing into a closed manager should fail");
+ }
+
+ @Test
+ public void testStreamListeners() throws Exception {
+ final NetconfNotificationManager netconfNotificationManager = new NetconfNotificationManager();
+
+ final NetconfNotificationCollector.NetconfNotificationStreamListener streamListener = mock(NetconfNotificationCollector.NetconfNotificationStreamListener.class);
+ doNothing().when(streamListener).onStreamRegistered(any(Stream.class));
+ doNothing().when(streamListener).onStreamUnregistered(any(StreamNameType.class));
+
+ netconfNotificationManager.registerStreamListener(streamListener);
+
+ final BaseNotificationPublisherRegistration baseNotificationPublisherRegistration =
+ netconfNotificationManager.registerBaseNotificationPublisher();
+
+ verify(streamListener).onStreamRegistered(NetconfNotificationManager.BASE_NETCONF_STREAM);
+
+
+ baseNotificationPublisherRegistration.close();
+
+ verify(streamListener).onStreamUnregistered(NetconfNotificationManager.BASE_STREAM_NAME);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationListener;
+import org.opendaylight.controller.netconf.notifications.NetconfNotificationRegistry;
+import org.opendaylight.controller.netconf.notifications.NotificationListenerRegistration;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.w3c.dom.Element;
+
+public class CreateSubscriptionTest {
+
+ private static final String CREATE_SUBSCRIPTION_XML = "<create-subscription\n" +
+ "xmlns=\"urn:ietf:params:xml:ns:netconf:notification:1.0\" xmlns:netconf=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<stream>TESTSTREAM</stream>" +
+ "</create-subscription>";
+
+ @Mock
+ private NetconfNotificationRegistry notificationRegistry;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(true).when(notificationRegistry).isStreamAvailable(any(StreamNameType.class));
+ doReturn(mock(NotificationListenerRegistration.class)).when(notificationRegistry).registerNotificationListener(any(StreamNameType.class), any(NetconfNotificationListener.class));
+ }
+
+ @Test
+ public void testHandleWithNoSubsequentOperations() throws Exception {
+ final CreateSubscription createSubscription = new CreateSubscription("id", notificationRegistry);
+ createSubscription.setSession(mock(NetconfSession.class));
+
+ final Element e = XmlUtil.readXmlToElement(CREATE_SUBSCRIPTION_XML);
+
+ final XmlElement operationElement = XmlElement.fromDomElement(e);
+ final Element element = createSubscription.handleWithNoSubsequentOperations(XmlUtil.newDocument(), operationElement);
+
+ Assert.assertThat(XmlUtil.toString(element), CoreMatchers.containsString("ok"));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+import java.io.IOException;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.notifications.impl.ops.Get;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netconf.notification._1._0.rev080714.StreamNameType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.Streams;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.StreamsBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamBuilder;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.netmod.notification.rev080714.netconf.streams.StreamKey;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+public class GetTest {
+
+ @Test
+ public void testSerializeStreamsSubtree() throws Exception {
+ final StreamsBuilder streamsBuilder = new StreamsBuilder();
+ final StreamBuilder streamBuilder = new StreamBuilder();
+ final StreamNameType base = new StreamNameType("base");
+ streamBuilder.setName(base);
+ streamBuilder.setKey(new StreamKey(base));
+ streamBuilder.setDescription("description");
+ streamBuilder.setReplaySupport(false);
+ streamsBuilder.setStream(Lists.newArrayList(streamBuilder.build()));
+ final Streams streams = streamsBuilder.build();
+
+ final Document response = getBlankResponse();
+ Get.serializeStreamsSubtree(response, streams);
+ final Diff diff = XMLUnit.compareXML(XmlUtil.toString(response),
+ "<rpc-reply message-id=\"101\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<data>\n" +
+ "<netconf xmlns=\"urn:ietf:params:xml:ns:netmod:notification\">\n" +
+ "<streams>\n" +
+ "<stream>\n" +
+ "<name>base</name>\n" +
+ "<description>description</description>\n" +
+ "<replaySupport>false</replaySupport>\n" +
+ "</stream>\n" +
+ "</streams>\n" +
+ "</netconf>\n" +
+ "</data>\n" +
+ "</rpc-reply>\n");
+
+ assertTrue(diff.toString(), diff.identical());
+ }
+
+ private Document getBlankResponse() throws IOException, SAXException {
+
+ return XmlUtil.readXmlToDocument("<rpc-reply message-id=\"101\"\n" +
+ "xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<data>\n" +
+ "</data>\n" +
+ "</rpc-reply>");
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.notifications.impl.ops;
+
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.notifications.NetconfNotification;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChange;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.notifications.rev120206.NetconfCapabilityChangeBuilder;
+
+public class NotificationsTransformUtilTest {
+
+ private static final Date DATE = new Date();
+ private static final String innerNotification = "<netconf-capability-change xmlns=\"urn:ietf:params:xml:ns:yang:ietf-netconf-notifications\">" +
+ "<deleted-capability>uri3</deleted-capability>" +
+ "<deleted-capability>uri4</deleted-capability>" +
+ "<added-capability>uri1</added-capability>" +
+ "</netconf-capability-change>";
+
+ private static final String expectedNotification = "<notification xmlns=\"urn:ietf:params:netconf:capability:notification:1.0\">" +
+ innerNotification +
+ "<eventTime>" + new SimpleDateFormat(NetconfNotification.RFC3339_DATE_FORMAT_BLUEPRINT).format(DATE) + "</eventTime>" +
+ "</notification>";
+
+ @Test
+ public void testTransform() throws Exception {
+ final NetconfCapabilityChangeBuilder netconfCapabilityChangeBuilder = new NetconfCapabilityChangeBuilder();
+
+ netconfCapabilityChangeBuilder.setAddedCapability(Lists.newArrayList(new Uri("uri1"), new Uri("uri1")));
+ netconfCapabilityChangeBuilder.setDeletedCapability(Lists.newArrayList(new Uri("uri3"), new Uri("uri4")));
+
+ final NetconfCapabilityChange capabilityChange = netconfCapabilityChangeBuilder.build();
+ final NetconfNotification transform = NotificationsTransformUtil.transform(capabilityChange, DATE);
+
+ final String serialized = XmlUtil.toString(transform.getDocument());
+
+ XMLUnit.setIgnoreWhitespace(true);
+ final Diff diff = XMLUnit.compareXML(expectedNotification, serialized);
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+ @Test
+ public void testTransformFromDOM() throws Exception {
+ final NetconfNotification netconfNotification = new NetconfNotification(XmlUtil.readXmlToDocument(innerNotification), DATE);
+
+ XMLUnit.setIgnoreWhitespace(true);
+ final Diff diff = XMLUnit.compareXML(expectedNotification, netconfNotification.toString());
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+}
\ No newline at end of file
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-impl</artifactId>
+ </dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-config-api</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
</plugins>
</build>
--- /dev/null
+package org.opendaylight.controller.config.yang.netconf.northbound.ssh;
+
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.local.LocalAddress;
+import io.netty.util.concurrent.GenericFutureListener;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.concurrent.Executors;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
+import org.opendaylight.controller.netconf.api.NetconfServerDispatcher;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NetconfNorthboundSshModule extends org.opendaylight.controller.config.yang.netconf.northbound.ssh.AbstractNetconfNorthboundSshModule {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfNorthboundSshModule.class);
+
+ public NetconfNorthboundSshModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfNorthboundSshModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, final org.opendaylight.controller.config.yang.netconf.northbound.ssh.NetconfNorthboundSshModule oldModule, final java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ final NetconfServerDispatcher dispatch = getDispatcherDependency();
+
+ final LocalAddress localAddress = new LocalAddress(getPort().toString());
+ final ChannelFuture localServer = dispatch.createLocalServer(localAddress);
+
+ final SshProxyServer sshProxyServer = new SshProxyServer(Executors.newScheduledThreadPool(1), getWorkerThreadGroupDependency(), getEventExecutorDependency());
+
+ final InetSocketAddress bindingAddress = getInetAddress();
+ final SshProxyServerConfigurationBuilder sshProxyServerConfigurationBuilder = new SshProxyServerConfigurationBuilder();
+ sshProxyServerConfigurationBuilder.setBindingAddress(bindingAddress);
+ sshProxyServerConfigurationBuilder.setLocalAddress(localAddress);
+ sshProxyServerConfigurationBuilder.setAuthenticator(new UserAuthenticator(getUsername(), getPassword()));
+ sshProxyServerConfigurationBuilder.setIdleTimeout(Integer.MAX_VALUE);
+ sshProxyServerConfigurationBuilder.setKeyPairProvider(new PEMGeneratorHostKeyProvider());
+
+ localServer.addListener(new GenericFutureListener<ChannelFuture>() {
+
+ @Override
+ public void operationComplete(final ChannelFuture future) {
+ if(future.isDone() && !future.isCancelled()) {
+ try {
+ sshProxyServer.bind(sshProxyServerConfigurationBuilder.createSshProxyServerConfiguration());
+ LOG.info("Netconf SSH endpoint started successfully at {}", bindingAddress);
+ } catch (final IOException e) {
+ throw new RuntimeException("Unable to start SSH netconf server", e);
+ }
+ } else {
+ LOG.warn("Unable to start SSH netconf server at {}", bindingAddress, future.cause());
+ throw new RuntimeException("Unable to start SSH netconf server", future.cause());
+ }
+ }
+ });
+
+ return new NetconfServerCloseable(localServer, sshProxyServer);
+ }
+
+ private InetSocketAddress getInetAddress() {
+ try {
+ final InetAddress inetAd = InetAddress.getByName(getBindingAddress().getIpv4Address() == null ? getBindingAddress().getIpv6Address().getValue() : getBindingAddress().getIpv4Address().getValue());
+ return new InetSocketAddress(inetAd, getPort().getValue());
+ } catch (final UnknownHostException e) {
+ throw new IllegalArgumentException("Unable to bind netconf endpoint to address " + getBindingAddress(), e);
+ }
+ }
+
+ private static final class NetconfServerCloseable implements AutoCloseable {
+ private final ChannelFuture localServer;
+ private final SshProxyServer sshProxyServer;
+
+ public NetconfServerCloseable(final ChannelFuture localServer, final SshProxyServer sshProxyServer) {
+ this.localServer = localServer;
+ this.sshProxyServer = sshProxyServer;
+ }
+
+ @Override
+ public void close() throws Exception {
+ sshProxyServer.close();
+
+ if(localServer.isDone()) {
+ localServer.channel().close();
+ } else {
+ localServer.cancel(true);
+ }
+ }
+ }
+
+
+ private static final class UserAuthenticator implements PasswordAuthenticator {
+
+ private final String username;
+ private final String password;
+
+ public UserAuthenticator(final String username, final String password) {
+ this.username = username;
+ this.password = password;
+ }
+
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ // FIXME use aaa stuff here instead
+ return this.username.equals(username) && this.password.equals(password);
+ }
+ }
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netconf-northbound-ssh yang module local name: netconf-northbound-ssh
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Mon Feb 09 14:09:07 CET 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.netconf.northbound.ssh;
+public class NetconfNorthboundSshModuleFactory extends org.opendaylight.controller.config.yang.netconf.northbound.ssh.AbstractNetconfNorthboundSshModuleFactory {
+
+}
--- /dev/null
+module netconf-northbound-ssh {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netconf:northbound:ssh";
+ prefix "nni";
+
+ import netconf-northbound-mapper { prefix nnm; revision-date 2015-01-14; }
+ import netconf-northbound { prefix nn; revision-date 2015-01-14; }
+ import config { prefix config; revision-date 2013-04-05; }
+ import threadpool {prefix th;}
+ import netty {prefix netty;}
+ import ietf-inet-types { prefix inet; revision-date 2010-09-24; }
+
+ organization "Cisco Systems, Inc.";
+
+ description
+ "This module contains the base YANG definitions for
+ a default implementation of netconf northbound server";
+
+ revision "2015-01-14" {
+ description
+ "Initial revision.";
+ }
+
+ identity netconf-northbound-ssh {
+ base config:module-type;
+ config:java-name-prefix NetconfNorthboundSsh;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netconf-northbound-ssh {
+ when "/config:modules/config:module/config:type = 'netconf-northbound-ssh'";
+
+ leaf port {
+ type inet:port-number;
+ default 2830;
+ }
+
+ leaf binding-address {
+ type inet:ip-address;
+ default "0.0.0.0";
+ }
+
+ container processing-executor {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity th:scheduled-threadpool;
+ }
+ }
+
+ description "Required by the mina-ssh library used in SSH endpoint";
+ }
+
+ container event-executor {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity netty:netty-event-executor;
+ }
+ }
+ }
+
+ container worker-thread-group {
+ uses config:service-ref {
+ refine type {
+ config:required-identity netty:netty-threadgroup;
+ }
+ }
+ }
+
+ container dispatcher {
+ uses config:service-ref {
+ refine type {
+ config:required-identity nn:netconf-server-dispatcher;
+ }
+ }
+ }
+
+ // FIXME use auth provider from aaa instead
+ leaf username {
+ description "Specifies username credential";
+ type string;
+ }
+
+ leaf password {
+ description "Specifies password credential";
+ type string;
+ }
+
+
+ }
+ }
+
+}
\ No newline at end of file
final EchoClientHandler echoClientHandler = connectClient(addr);
- Stopwatch stopwatch = new Stopwatch().start();
+ Stopwatch stopwatch = Stopwatch.createStarted();
while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 30) {
Thread.sleep(500);
}
public void testClientWithoutServer() throws Exception {
final InetSocketAddress address = new InetSocketAddress(12345);
final EchoClientHandler echoClientHandler = connectClient(address);
- final Stopwatch stopwatch = new Stopwatch().start();
+ final Stopwatch stopwatch = Stopwatch.createStarted();
while(echoClientHandler.getState() == State.CONNECTING && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
Thread.sleep(100);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool;
+
+import com.google.common.base.Optional;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.confignetconfconnector.util.Util;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleBuilder;
+
+/**
+ * Can be passed instead of ModuleBuilderCapability when building capabilities
+ * in NetconfDeviceSimulator when testing various schema resolution related exceptions.
+ */
+public class FakeModuleBuilderCapability implements Capability{
+ private static final Date NO_REVISION = new Date(0);
+ private final ModuleBuilder input;
+ private final Optional<String> content;
+
+ public FakeModuleBuilderCapability(final ModuleBuilder input, final String inputStream) {
+ this.input = input;
+ this.content = Optional.of(inputStream);
+ }
+
+ @Override
+ public String getCapabilityUri() {
+ // FIXME capabilities in Netconf-impl need to check for NO REVISION
+ final String withoutRevision = getModuleNamespace().get() + "?module=" + getModuleName().get();
+ return hasRevision() ? withoutRevision + "&revision=" + Util.writeDate(input.getRevision()) : withoutRevision;
+ }
+
+ @Override
+ public Optional<String> getModuleNamespace() {
+ return Optional.of(input.getNamespace().toString());
+ }
+
+ @Override
+ public Optional<String> getModuleName() {
+ return Optional.of(input.getName());
+ }
+
+ @Override
+ public Optional<String> getRevision() {
+ return Optional.of(hasRevision() ? QName.formattedRevision(input.getRevision()) : "");
+ }
+
+ private boolean hasRevision() {
+ return !input.getRevision().equals(NO_REVISION);
+ }
+
+ /**
+ *
+ * @return empty schema source to trigger schema resolution exception.
+ */
+ @Override
+ public Optional<String> getCapabilitySchema() {
+ return Optional.absent();
+ }
+
+ @Override
+ public List<String> getLocation() {
+ return Collections.emptyList();
+ }
+}
@Arg(dest = "debug")
public boolean debug;
+ @Arg(dest = "notification-file")
+ public File notificationFile;
+
static ArgumentParser getParser() {
final ArgumentParser parser = ArgumentParsers.newArgumentParser("netconf testool");
.help("Directory containing yang schemas to describe simulated devices. Some schemas e.g. netconf monitoring and inet types are included by default")
.dest("schemas-dir");
+ parser.addArgument("--notification-file")
+ .type(File.class)
+ .help("Xml file containing notifications that should be sent to clients after create subscription is called")
+ .dest("notification-file");
+
parser.addArgument("--starting-port")
.type(Integer.class)
.setDefault(17830)
import java.util.Collections;
import java.util.Date;
import java.util.List;
+import org.opendaylight.controller.netconf.api.Capability;
import org.opendaylight.controller.netconf.confignetconfconnector.util.Util;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleBuilder;
import com.google.common.base.Charsets;
import com.google.common.base.Function;
-import com.google.common.base.Objects;
+import com.google.common.base.MoreObjects;
import com.google.common.base.Optional;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.util.HashedWheelTimer;
import java.io.Closeable;
+import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import org.apache.sshd.server.PasswordAuthenticator;
import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
import org.apache.sshd.server.session.ServerSession;
-import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
+import org.opendaylight.controller.netconf.api.Capability;
+import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
-import org.opendaylight.controller.netconf.impl.NetconfServerDispatcher;
+import org.opendaylight.controller.netconf.impl.NetconfServerDispatcherImpl;
import org.opendaylight.controller.netconf.impl.NetconfServerSessionNegotiatorFactory;
import org.opendaylight.controller.netconf.impl.SessionIdProvider;
+import org.opendaylight.controller.netconf.impl.osgi.AggregatedNetconfOperationServiceFactory;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
-import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringActivator;
import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringOperationService;
import org.opendaylight.controller.netconf.ssh.SshProxyServer;
import org.opendaylight.controller.netconf.ssh.SshProxyServerConfiguration;
import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
+import org.opendaylight.controller.netconf.test.tool.rpc.DataList;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedCommit;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedCreateSubscription;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedEditConfig;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedGet;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedGetConfig;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedLock;
+import org.opendaylight.controller.netconf.test.tool.rpc.SimulatedUnLock;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceRepresentation;
private final ScheduledExecutorService minaTimerExecutor;
private final ExecutorService nioExecutor;
+ private boolean sendFakeSchema = false;
+
public NetconfDeviceSimulator() {
// TODO make pool size configurable
this(new NioEventLoopGroup(), new HashedWheelTimer(),
this.nioExecutor = nioExecutor;
}
- private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
+ private NetconfServerDispatcherImpl createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout, final Optional<File> notificationsFile) {
final Set<Capability> capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function<ModuleBuilder, Capability>() {
@Override
public Capability apply(final ModuleBuilder input) {
- return new ModuleBuilderCapability(input, moduleBuilders.get(input));
+ if (sendFakeSchema) {
+ sendFakeSchema = false;
+ return new FakeModuleBuilderCapability(input, moduleBuilders.get(input));
+ } else {
+ return new ModuleBuilderCapability(input, moduleBuilders.get(input));
+ }
}
}));
final SessionIdProvider idProvider = new SessionIdProvider();
- final SimulatedOperationProvider simulatedOperationProvider = new SimulatedOperationProvider(idProvider, capabilities);
- final NetconfMonitoringOperationService monitoringService = new NetconfMonitoringOperationService(new NetconfMonitoringServiceImpl(simulatedOperationProvider));
- simulatedOperationProvider.addService(monitoringService);
+
+ final AggregatedNetconfOperationServiceFactory aggregatedNetconfOperationServiceFactory = new AggregatedNetconfOperationServiceFactory();
+ final SimulatedOperationProvider simulatedOperationProvider = new SimulatedOperationProvider(idProvider, capabilities, notificationsFile);
+
+ final NetconfMonitoringService monitoringService1 = new NetconfMonitoringServiceImpl(aggregatedNetconfOperationServiceFactory);
+ final NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory monitoringService =
+ new NetconfMonitoringActivator.NetconfMonitoringOperationServiceFactory(new NetconfMonitoringOperationService(monitoringService1));
+ aggregatedNetconfOperationServiceFactory.onAddNetconfOperationServiceFactory(simulatedOperationProvider);
+ aggregatedNetconfOperationServiceFactory.onAddNetconfOperationServiceFactory(monitoringService);
final DefaultCommitNotificationProducer commitNotifier = new DefaultCommitNotificationProducer(ManagementFactory.getPlatformMBeanServer());
: Sets.newHashSet(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0, XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
- hashedWheelTimer, simulatedOperationProvider, idProvider, generateConfigsTimeout, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
+ hashedWheelTimer, aggregatedNetconfOperationServiceFactory, idProvider, generateConfigsTimeout, commitNotifier, monitoringService1, serverCapabilities);
- final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
+ final NetconfServerDispatcherImpl.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcherImpl.ServerChannelInitializer(
serverNegotiatorFactory);
- return new NetconfServerDispatcher(serverChannelInitializer, nettyThreadgroup, nettyThreadgroup);
+ return new NetconfServerDispatcherImpl(serverChannelInitializer, nettyThreadgroup, nettyThreadgroup);
}
private Map<ModuleBuilder, String> toModuleBuilders(final Map<SourceIdentifier, Map.Entry<ASTSchemaSource, YangTextSchemaSource>> sources) {
final Map<ModuleBuilder, String> moduleBuilders = parseSchemasToModuleBuilders(params);
- final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout);
+ final NetconfServerDispatcherImpl dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout, Optional.fromNullable(params.notificationFile));
int currentPort = params.startingPort;
public CheckedFuture<? extends SchemaSourceRepresentation, SchemaSourceException> getSource(final SourceIdentifier sourceIdentifier) {
return Futures.immediateCheckedFuture(new YangTextSchemaSource(sourceId) {
@Override
- protected Objects.ToStringHelper addToStringAttributes(final Objects.ToStringHelper toStringHelper) {
+ protected MoreObjects.ToStringHelper addToStringAttributes(final MoreObjects.ToStringHelper toStringHelper) {
return toStringHelper;
}
// close Everything
}
- private static class SimulatedOperationProvider implements NetconfOperationProvider {
- private final SessionIdProvider idProvider;
- private final Set<NetconfOperationService> netconfOperationServices;
+ private static class SimulatedOperationProvider implements NetconfOperationServiceFactory {
+ private final Set<Capability> caps;
+ private final SimulatedOperationService simulatedOperationService;
- public SimulatedOperationProvider(final SessionIdProvider idProvider, final Set<Capability> caps) {
- this.idProvider = idProvider;
- final SimulatedOperationService simulatedOperationService = new SimulatedOperationService(caps, idProvider.getCurrentSessionId());
- this.netconfOperationServices = Sets.<NetconfOperationService>newHashSet(simulatedOperationService);
+ public SimulatedOperationProvider(final SessionIdProvider idProvider, final Set<Capability> caps, final Optional<File> notificationsFile) {
+ this.caps = caps;
+ simulatedOperationService = new SimulatedOperationService(idProvider.getCurrentSessionId(), notificationsFile);
}
@Override
- public NetconfOperationServiceSnapshot openSnapshot(final String sessionIdForReporting) {
- return new SimulatedServiceSnapshot(idProvider, netconfOperationServices);
+ public Set<Capability> getCapabilities() {
+ return caps;
}
- public void addService(final NetconfOperationService monitoringService) {
- netconfOperationServices.add(monitoringService);
+ @Override
+ public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
+ return new AutoCloseable() {
+ @Override
+ public void close() throws Exception {}
+ };
}
- private static class SimulatedServiceSnapshot implements NetconfOperationServiceSnapshot {
- private final SessionIdProvider idProvider;
- private final Set<NetconfOperationService> netconfOperationServices;
-
- public SimulatedServiceSnapshot(final SessionIdProvider idProvider, final Set<NetconfOperationService> netconfOperationServices) {
- this.idProvider = idProvider;
- this.netconfOperationServices = netconfOperationServices;
- }
-
- @Override
- public String getNetconfSessionIdForReporting() {
- return String.valueOf(idProvider.getCurrentSessionId());
- }
-
- @Override
- public Set<NetconfOperationService> getServices() {
- return netconfOperationServices;
- }
-
- @Override
- public void close() throws Exception {}
+ @Override
+ public NetconfOperationService createService(final String netconfSessionIdForReporting) {
+ return simulatedOperationService;
}
static class SimulatedOperationService implements NetconfOperationService {
- private final Set<Capability> capabilities;
private final long currentSessionId;
+ private final Optional<File> notificationsFile;
- public SimulatedOperationService(final Set<Capability> capabilities, final long currentSessionId) {
- this.capabilities = capabilities;
+ public SimulatedOperationService(final long currentSessionId, final Optional<File> notificationsFile) {
this.currentSessionId = currentSessionId;
- }
-
- @Override
- public Set<Capability> getCapabilities() {
- return capabilities;
+ this.notificationsFile = notificationsFile;
}
@Override
final SimulatedEditConfig sEditConfig = new SimulatedEditConfig(String.valueOf(currentSessionId), storage);
final SimulatedGetConfig sGetConfig = new SimulatedGetConfig(String.valueOf(currentSessionId), storage);
final SimulatedCommit sCommit = new SimulatedCommit(String.valueOf(currentSessionId));
- return Sets.<NetconfOperation>newHashSet(sGet, sGetConfig, sEditConfig, sCommit);
+ final SimulatedLock sLock = new SimulatedLock(String.valueOf(currentSessionId));
+ final SimulatedUnLock sUnlock = new SimulatedUnLock(String.valueOf(currentSessionId));
+ final SimulatedCreateSubscription sCreateSubs = new SimulatedCreateSubscription(String.valueOf(currentSessionId), notificationsFile);
+ return Sets.<NetconfOperation>newHashSet(sGet, sGetConfig, sEditConfig, sCommit, sLock, sUnlock, sCreateSubs);
}
@Override
}
}
- private class LoggingMonitoringService implements SessionMonitoringService {
- @Override
- public void onSessionUp(final NetconfManagementSession session) {
- LOG.debug("Session {} established", session);
- }
-
- @Override
- public void onSessionDown(final NetconfManagementSession session) {
- LOG.debug("Session {} down", session);
- }
- }
-
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.test.tool;
+package org.opendaylight.controller.netconf.test.tool.rpc;
import java.util.Collections;
import java.util.List;
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.test.tool;
+package org.opendaylight.controller.netconf.test.tool.rpc;
import com.google.common.base.Optional;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-class SimulatedCommit extends AbstractConfigNetconfOperation {
+public class SimulatedCommit extends AbstractConfigNetconfOperation {
- SimulatedCommit(final String netconfSessionIdForReporting) {
+ public SimulatedCommit(final String netconfSessionIdForReporting) {
super(null, netconfSessionIdForReporting);
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.rpc;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+import javax.xml.bind.annotation.XmlRootElement;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.mapping.operations.DefaultNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+
+public class SimulatedCreateSubscription extends AbstractLastNetconfOperation implements DefaultNetconfOperation {
+
+ private NetconfServerSession session;
+ private final Optional<Notifications> notifications;
+ private ScheduledExecutorService scheduledExecutorService;
+
+ public SimulatedCreateSubscription(final String id, final Optional<File> notificationsFile) {
+ super(id);
+ if(notificationsFile.isPresent()) {
+ notifications = Optional.of(loadNotifications(notificationsFile.get()));
+ scheduledExecutorService = Executors.newScheduledThreadPool(1);
+ } else {
+ notifications = Optional.absent();
+ }
+ }
+
+ private Notifications loadNotifications(final File file) {
+ try {
+ final JAXBContext jaxbContext = JAXBContext.newInstance(Notifications.class);
+ final Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
+ return (Notifications) jaxbUnmarshaller.unmarshal(file);
+ } catch (final JAXBException e) {
+ throw new IllegalArgumentException("Canot parse file " + file + " as a notifications file", e);
+ }
+ }
+
+ @Override
+ protected String getOperationName() {
+ return "create-subscription";
+ }
+
+ @Override
+ protected String getOperationNamespace() {
+ return "urn:ietf:params:xml:ns:netconf:notification:1.0";
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+
+
+ if(notifications.isPresent()) {
+ long delayAggregator = 0;
+ System.console().writer().println("Scheduling notifications " + notifications.get());
+
+ for (final Notification notification : notifications.get().getNotificationList()) {
+ for (int i = 0; i <= notification.getTimes(); i++) {
+
+ delayAggregator += notification.getDelayInSeconds();
+
+ System.console().writer().println("Times " + notification.getTimes());
+ scheduledExecutorService.schedule(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ System.console().writer().println("Sending actual notification " + notification);
+ Preconditions.checkState(session != null, "Session is not set, cannot process notifications");
+ session.sendMessage(parseNetconfNotification(notification.getContent()));
+ } catch (IOException | SAXException e) {
+ throw new IllegalStateException("Unable to process notification " + notification, e);
+ }
+ }
+ }, delayAggregator, TimeUnit.SECONDS);
+ }
+ }
+ }
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ private static NetconfMessage parseNetconfNotification(String content) throws IOException, SAXException {
+ final int startEventTime = content.indexOf("<eventTime>") + "<eventTime>".length();
+ final int endEventTime = content.indexOf("</eventTime>");
+ final String eventTime = content.substring(startEventTime, endEventTime);
+ if(eventTime.equals("XXXX")) {
+ content = content.replace(eventTime, new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssXXX").format(new Date()));
+ }
+
+ return new NetconfMessage(XmlUtil.readXmlToDocument(content));
+ }
+
+ @Override
+ public void setNetconfSession(final NetconfServerSession s) {
+ this.session = s;
+ }
+
+ @XmlRootElement(name = "notifications")
+ public static final class Notifications {
+
+ @javax.xml.bind.annotation.XmlElement(nillable = false, name = "notification", required = true)
+ private List<Notification> notificationList;
+
+ public List<Notification> getNotificationList() {
+ return notificationList;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuffer sb = new StringBuffer("Notifications{");
+ sb.append("notificationList=").append(notificationList);
+ sb.append('}');
+ return sb.toString();
+ }
+ }
+
+ public static final class Notification {
+
+ @javax.xml.bind.annotation.XmlElement(nillable = false, name = "delay")
+ private long delayInSeconds;
+
+ @javax.xml.bind.annotation.XmlElement(nillable = false, name = "times")
+ private long times;
+
+ @javax.xml.bind.annotation.XmlElement(nillable = false, name = "content", required = true)
+ private String content;
+
+ public long getDelayInSeconds() {
+ return delayInSeconds;
+ }
+
+ public long getTimes() {
+ return times;
+ }
+
+ public String getContent() {
+ return content;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuffer sb = new StringBuffer("Notification{");
+ sb.append("delayInSeconds=").append(delayInSeconds);
+ sb.append(", times=").append(times);
+ sb.append(", content='").append(content).append('\'');
+ sb.append('}');
+ return sb.toString();
+ }
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.test.tool;
+package org.opendaylight.controller.netconf.test.tool.rpc;
import com.google.common.base.Optional;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-class SimulatedEditConfig extends AbstractConfigNetconfOperation {
+public class SimulatedEditConfig extends AbstractConfigNetconfOperation {
private static final String DELETE_EDIT_CONFIG = "delete";
private static final String OPERATION = "operation";
private static final String REMOVE_EDIT_CONFIG = "remove";
private final DataList storage;
- SimulatedEditConfig(final String netconfSessionIdForReporting, final DataList storage) {
+ public SimulatedEditConfig(final String netconfSessionIdForReporting, final DataList storage) {
super(null, netconfSessionIdForReporting);
this.storage = storage;
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.test.tool;
+package org.opendaylight.controller.netconf.test.tool.rpc;
import com.google.common.base.Optional;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-class SimulatedGet extends AbstractConfigNetconfOperation {
+public class SimulatedGet extends AbstractConfigNetconfOperation {
private final DataList storage;
- SimulatedGet(final String netconfSessionIdForReporting, final DataList storage) {
+ public SimulatedGet(final String netconfSessionIdForReporting, final DataList storage) {
super(null, netconfSessionIdForReporting);
this.storage = storage;
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.test.tool;
+package org.opendaylight.controller.netconf.test.tool.rpc;
import com.google.common.base.Optional;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-class SimulatedGetConfig extends AbstractConfigNetconfOperation {
+public class SimulatedGetConfig extends AbstractConfigNetconfOperation {
private final DataList storage;
- SimulatedGetConfig(final String netconfSessionIdForReporting, final DataList storage) {
+ public SimulatedGetConfig(final String netconfSessionIdForReporting, final DataList storage) {
super(null, netconfSessionIdForReporting);
this.storage = storage;
}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.rpc;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class SimulatedLock extends AbstractConfigNetconfOperation {
+
+ public SimulatedLock(final String netconfSessionIdForReporting) {
+ super(null, netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return "lock";
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool.rpc;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class SimulatedUnLock extends AbstractConfigNetconfOperation {
+
+ public SimulatedUnLock(final String netconfSessionIdForReporting) {
+ super(null, netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return "unlock";
+ }
+}
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
-import com.google.common.io.CharStreams;
-import com.google.common.io.InputSupplier;
+import com.google.common.io.ByteSource;
import java.io.IOException;
import java.io.InputStream;
-import java.io.InputStreamReader;
import javax.xml.parsers.ParserConfigurationException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public static String fileToString(final String fileName) throws IOException {
try (InputStream resourceAsStream = XmlFileLoader.class.getClassLoader().getResourceAsStream(fileName)) {
Preconditions.checkNotNull(resourceAsStream);
-
- InputSupplier<? extends InputStream> supplier = new InputSupplier<InputStream>() {
+ return new ByteSource() {
@Override
- public InputStream getInput() throws IOException {
+ public InputStream openStream() {
return resourceAsStream;
}
- };
-
- InputSupplier<InputStreamReader> readerSupplier = CharStreams.newReaderSupplier(supplier, Charsets.UTF_8);
+ }.asCharSource(Charsets.UTF_8).read();
- return CharStreams.toString(readerSupplier);
}
}
<version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
- <prerequisites>
- <maven>3.0.4</maven>
- </prerequisites>
<modules>
<module>netconf-api</module>
<module>netconf-config</module>
<module>netconf-impl</module>
<module>config-netconf-connector</module>
+ <module>mdsal-netconf-connector</module>
+ <module>mdsal-netconf-monitoring</module>
<module>netconf-util</module>
<module>netconf-netty-util</module>
<module>config-persister-impl</module>
<module>netconf-ssh</module>
<module>netconf-tcp</module>
<module>netconf-monitoring</module>
+ <module>ietf-netconf</module>
<module>ietf-netconf-monitoring</module>
+ <module>ietf-netconf-notifications</module>
<module>ietf-netconf-monitoring-extension</module>
<module>netconf-connector-config</module>
+ <module>netconf-mdsal-config</module>
<module>netconf-auth</module>
<module>netconf-usermanager</module>
<module>netconf-testtool</module>
+ <module>netconf-notifications-impl</module>
+ <module>netconf-notifications-api</module>
<module>netconf-artifacts</module>
</modules>
<artifactId>networkconfig.neutron.implementation</artifactId>
<version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
- <properties>
- <enunciate.version>1.26.2</enunciate.version>
- </properties>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
+ <artifactId>enunciate-parent</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/enunciate-parent</relativePath>
</parent>
<artifactId>networkconfig.neutron.northbound</artifactId>
<version>0.5.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>networkconfig.neutron</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ </dependency>
</dependencies>
<build>
<manifestLocation>${project.basedir}/src/main/resources/META-INF</manifestLocation>
</configuration>
</plugin>
- <plugin>
- <groupId>org.codehaus.enunciate</groupId>
- <artifactId>maven-enunciate-plugin</artifactId>
- </plugin>
</plugins>
</build>
<scm>
import java.util.List;
+/**
+ * This interface defines the methods for Neutron Requests
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.northbound.api.INeutronRequest}
+ */
+@Deprecated
public interface INeutronRequest<T extends INeutronObject> {
public T getSingleton();
public boolean isSingleton();
firewallInterface.addNeutronFirewall(singleton);
Object[] instances = NeutronUtil.getInstances(INeutronFirewallAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallAware service = (INeutronFirewallAware) instance;
- int status = service.canCreateNeutronFirewall(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallAware service = (INeutronFirewallAware) instance;
+ int status = service.canCreateNeutronFirewall(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
firewallInterface.addNeutronFirewall(singleton);
if (instances != null) {
throw new BadRequestException("Firewall UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallAware service = (INeutronFirewallAware) instance;
- int status = service.canCreateNeutronFirewall(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallAware service = (INeutronFirewallAware) instance;
+ int status = service.canCreateNeutronFirewall(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
Object[] instances = NeutronUtil.getInstances(INeutronFirewallAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallAware service = (INeutronFirewallAware) instance;
- int status = service.canUpdateNeutronFirewall(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallAware service = (INeutronFirewallAware) instance;
+ int status = service.canUpdateNeutronFirewall(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronFirewall singleton = firewallInterface.getNeutronFirewall(firewallUUID);
Object[] instances = NeutronUtil.getInstances(INeutronFirewallAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallAware service = (INeutronFirewallAware) instance;
- int status = service.canDeleteNeutronFirewall(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallAware service = (INeutronFirewallAware) instance;
+ int status = service.canDeleteNeutronFirewall(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronFirewallPolicyAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
- int status = service.canCreateNeutronFirewallPolicy(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
+ int status = service.canCreateNeutronFirewallPolicy(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
firewallPolicyInterface.addNeutronFirewallPolicy(singleton);
if (instances != null) {
throw new BadRequestException("Firewall Policy UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
- int status = service.canCreateNeutronFirewallPolicy(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
+ int status = service.canCreateNeutronFirewallPolicy(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronFirewallPolicyAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
- int status = service.canUpdateNeutronFirewallPolicy(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
+ int status = service.canUpdateNeutronFirewallPolicy(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronFirewallPolicy singleton = firewallPolicyInterface.getNeutronFirewallPolicy(firewallPolicyUUID);
Object[] instances = NeutronUtil.getInstances(INeutronFirewallPolicyAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
- int status = service.canDeleteNeutronFirewallPolicy(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallPolicyAware service = (INeutronFirewallPolicyAware) instance;
+ int status = service.canDeleteNeutronFirewallPolicy(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
firewallPolicyInterface.removeNeutronFirewallPolicy(firewallPolicyUUID);
firewallRuleInterface.addNeutronFirewallRule(singleton);
Object[] instances = NeutronUtil.getInstances(INeutronFirewallRuleAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
- int status = service.canCreateNeutronFirewallRule(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
+ int status = service.canCreateNeutronFirewallRule(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// add rule to cache
singleton.initDefaults();
throw new BadRequestException("Firewall Rule UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
- int status = service.canCreateNeutronFirewallRule(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
+ int status = service.canCreateNeutronFirewallRule(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronFirewallRuleAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
- int status = service.canUpdateNeutronFirewallRule(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
+ int status = service.canUpdateNeutronFirewallRule(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronFirewallRule singleton = firewallRuleInterface.getNeutronFirewallRule(firewallRuleUUID);
Object[] instances = NeutronUtil.getInstances(INeutronFirewallRuleAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
- int status = service.canDeleteNeutronFirewallRule(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFirewallRuleAware service = (INeutronFirewallRuleAware) instance;
+ int status = service.canDeleteNeutronFirewallRule(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
}
Object[] instances = NeutronUtil.getInstances(INeutronFloatingIPAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFloatingIPAware service = (INeutronFloatingIPAware) instance;
- int status = service.canCreateFloatingIP(singleton);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFloatingIPAware service = (INeutronFloatingIPAware) instance;
+ int status = service.canCreateFloatingIP(singleton);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
floatingIPInterface.addFloatingIP(singleton);
if (instances != null) {
NeutronFloatingIP target = floatingIPInterface.getFloatingIP(floatingipUUID);
Object[] instances = NeutronUtil.getInstances(INeutronFloatingIPAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFloatingIPAware service = (INeutronFloatingIPAware) instance;
- int status = service.canUpdateFloatingIP(singleton, target);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFloatingIPAware service = (INeutronFloatingIPAware) instance;
+ int status = service.canUpdateFloatingIP(singleton, target);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
floatingIPInterface.updateFloatingIP(floatingipUUID, singleton);
target = floatingIPInterface.getFloatingIP(floatingipUUID);
NeutronFloatingIP singleton = floatingIPInterface.getFloatingIP(floatingipUUID);
Object[] instances = NeutronUtil.getInstances(INeutronFloatingIPAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronFloatingIPAware service = (INeutronFloatingIPAware) instance;
- int status = service.canDeleteFloatingIP(singleton);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronFloatingIPAware service = (INeutronFloatingIPAware) instance;
+ int status = service.canDeleteFloatingIP(singleton);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
floatingIPInterface.removeFloatingIP(floatingipUUID);
if (instances != null) {
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerHealthMonitorAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
- int status = service.canCreateNeutronLoadBalancerHealthMonitor(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canCreateNeutronLoadBalancerHealthMonitor(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(singleton);
if (instances != null) {
throw new BadRequestException("LoadBalancerHealthMonitor UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
- int status = service.canCreateNeutronLoadBalancerHealthMonitor(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canCreateNeutronLoadBalancerHealthMonitor(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerHealthMonitorAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
- int status = service.canUpdateNeutronLoadBalancerHealthMonitor(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canUpdateNeutronLoadBalancerHealthMonitor(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronLoadBalancerHealthMonitor singleton = loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerHealthMonitorAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
- int status = service.canDeleteNeutronLoadBalancerHealthMonitor(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerHealthMonitor(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
loadBalancerHealthMonitorInterface.removeNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
if (instances != null) {
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerListenerAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
- int status = service.canCreateNeutronLoadBalancerListener(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canCreateNeutronLoadBalancerListener(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
loadBalancerListenerInterface.addNeutronLoadBalancerListener(singleton);
if (instances != null) {
throw new BadRequestException("LoadBalancerListener UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
- int status = service.canCreateNeutronLoadBalancerListener(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canCreateNeutronLoadBalancerListener(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerListenerAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
- int status = service.canUpdateNeutronLoadBalancerListener(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canUpdateNeutronLoadBalancerListener(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronLoadBalancerListener singleton = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerListenerAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
- int status = service.canDeleteNeutronLoadBalancerListener(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerListener(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
loadBalancerListenerInterface.removeNeutronLoadBalancerListener(loadBalancerListenerID);
}
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
- int status = service.canCreateNeutronLoadBalancer(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canCreateNeutronLoadBalancer(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
+
loadBalancerInterface.addNeutronLoadBalancer(singleton);
if (instances != null) {
for (Object instance : instances) {
throw new BadRequestException("Load Balancer Pool UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
- int status = service.canCreateNeutronLoadBalancer(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canCreateNeutronLoadBalancer(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
- int status = service.canUpdateNeutronLoadBalancer(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canUpdateNeutronLoadBalancer(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronLoadBalancer singleton = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
- int status = service.canDeleteNeutronLoadBalancer(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canDeleteNeutronLoadBalancer(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
+
loadBalancerInterface.removeNeutronLoadBalancer(loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerPoolMemberAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
- int status = service.canCreateNeutronLoadBalancerPoolMember(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPoolMember(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
+
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
throw new BadRequestException("Load Balancer PoolMember UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
- int status = service.canCreateNeutronLoadBalancerPoolMember(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPoolMember(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerPoolMemberAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
- int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
if (instances != null) {
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerPoolAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
- int status = service.canCreateNeutronLoadBalancerPool(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPool(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
if (instances != null) {
throw new BadRequestException("Load Balancer Pool UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
- int status = service.canCreateNeutronLoadBalancerPool(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPool(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerPoolAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
- int status = service.canUpdateNeutronLoadBalancerPool(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canUpdateNeutronLoadBalancerPool(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronLoadBalancerPool singleton = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
Object[] instances = NeutronUtil.getInstances(INeutronLoadBalancerPoolAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
- int status = service.canDeleteNeutronLoadBalancerPool(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPool(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronNetworkAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronNetworkAware service = (INeutronNetworkAware) instance;
- int status = service.canCreateNetwork(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronNetworkAware service = (INeutronNetworkAware) instance;
+ int status = service.canCreateNetwork(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// add network to cache
throw new BadRequestException("network UUID already exists");
}
if (instances != null) {
- for (Object instance: instances) {
- INeutronNetworkAware service = (INeutronNetworkAware) instance;
- int status = service.canCreateNetwork(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance: instances) {
+ INeutronNetworkAware service = (INeutronNetworkAware) instance;
+ int status = service.canCreateNetwork(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
testMap.put(test.getID(),test);
}
Object[] instances = NeutronUtil.getInstances(INeutronNetworkAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronNetworkAware service = (INeutronNetworkAware) instance;
- NeutronNetwork original = networkInterface.getNetwork(netUUID);
- int status = service.canUpdateNetwork(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronNetworkAware service = (INeutronNetworkAware) instance;
+ NeutronNetwork original = networkInterface.getNetwork(netUUID);
+ int status = service.canUpdateNetwork(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// update network object and return the modified object
NeutronNetwork singleton = networkInterface.getNetwork(netUUID);
Object[] instances = NeutronUtil.getInstances(INeutronNetworkAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronNetworkAware service = (INeutronNetworkAware) instance;
- int status = service.canDeleteNetwork(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronNetworkAware service = (INeutronNetworkAware) instance;
+ int status = service.canDeleteNetwork(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
+
networkInterface.removeNetwork(netUUID);
if (instances != null) {
for (Object instance : instances) {
classes.add(NeutronLoadBalancerPoolNorthbound.class);
classes.add(NeutronLoadBalancerHealthMonitorNorthbound.class);
classes.add(NeutronLoadBalancerPoolMembersNorthbound.class);
+ classes.add(MOXyJsonProvider.class);
return classes;
}
moxyJsonProvider.setMarshalEmptyCollections(true);
moxyJsonProvider.setValueWrapper("$");
- Map<String, String> namespacePrefixMapper = new HashMap<String, String>(1);
+ Map<String, String> namespacePrefixMapper = new HashMap<String, String>(3);
namespacePrefixMapper.put("router", "router"); // FIXME: fill in with XSD
namespacePrefixMapper.put("provider", "provider"); // FIXME: fill in with XSD
+ namespacePrefixMapper.put("binding", "binding");
moxyJsonProvider.setNamespacePrefixMapper(namespacePrefixMapper);
moxyJsonProvider.setNamespaceSeparator(':');
Object[] instances = NeutronUtil.getInstances(INeutronPortAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronPortAware service = (INeutronPortAware) instance;
- int status = service.canCreatePort(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronPortAware service = (INeutronPortAware) instance;
+ int status = service.canCreatePort(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
-
// add the port to the cache
portInterface.addPort(singleton);
if (instances != null) {
}
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronPortAware service = (INeutronPortAware) instance;
- int status = service.canCreatePort(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronPortAware service = (INeutronPortAware) instance;
+ int status = service.canCreatePort(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
Object[] instances = NeutronUtil.getInstances(INeutronPortAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronPortAware service = (INeutronPortAware) instance;
- int status = service.canUpdatePort(singleton, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronPortAware service = (INeutronPortAware) instance;
+ int status = service.canUpdatePort(singleton, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// Verify the new fixed ips are valid
NeutronPort singleton = portInterface.getPort(portUUID);
Object[] instances = NeutronUtil.getInstances(INeutronPortAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronPortAware service = (INeutronPortAware) instance;
- int status = service.canDeletePort(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronPortAware service = (INeutronPortAware) instance;
+ int status = service.canDeletePort(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
portInterface.removePort(portUUID);
if (instances != null) {
}
Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronRouterAware service = (INeutronRouterAware) instance;
- int status = service.canCreateRouter(singleton);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canCreateRouter(singleton);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronRouterAware service = (INeutronRouterAware) instance;
- int status = service.canUpdateRouter(singleton, original);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canUpdateRouter(singleton, original);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
* if the external gateway info is being changed, verify that the new network
NeutronRouter singleton = routerInterface.getRouter(routerUUID);
Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronRouterAware service = (INeutronRouterAware) instance;
- int status = service.canDeleteRouter(singleton);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canDeleteRouter(singleton);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
routerInterface.removeRouter(routerUUID);
if (instances != null) {
throw new ResourceConflictException("Target Port already allocated");
Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronRouterAware service = (INeutronRouterAware) instance;
- int status = service.canAttachInterface(target, input);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canAttachInterface(target, input);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
//mark the port device id and device owner fields
Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronRouterAware service = (INeutronRouterAware) instance;
- int status = service.canDetachInterface(target, input);
- if (status < 200 || status > 299)
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canDetachInterface(target, input);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// reset the port ownership
input.setSubnetUUID(targetInterface.getSubnetUUID());
input.setID(target.getID());
input.setTenantID(target.getTenantID());
+ Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
+ if (instances != null) {
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canDetachInterface(target, input);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
+ }
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
+ }
NeutronPort port = portInterface.getPort(input.getPortUUID());
port.setDeviceID(null);
port.setDeviceOwner(null);
target.removeInterface(input.getPortUUID());
- Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
for (Object instance : instances) {
INeutronRouterAware service = (INeutronRouterAware) instance;
service.neutronRouterInterfaceDetached(target, input);
}
if (!subnet.isValidIP(port.getFixedIPs().get(0).getIpAddress()))
throw new ResourceConflictException("Target Port IP not in Target Subnet");
- input.setID(target.getID());
- input.setTenantID(target.getTenantID());
Object[] instances = NeutronUtil.getInstances(INeutronRouterAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronRouterAware service = (INeutronRouterAware) instance;
- service.canDetachInterface(target, input);
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ int status = service.canDetachInterface(target, input);
+ if (status < 200 || status > 299)
+ return Response.status(status).build();
+ }
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
+ input.setID(target.getID());
+ input.setTenantID(target.getTenantID());
port.setDeviceID(null);
port.setDeviceOwner(null);
target.removeInterface(input.getPortUUID());
- for (Object instance : instances) {
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronRouterAware service = (INeutronRouterAware) instance;
+ service.canDetachInterface(target, input);
+ }
+ } for (Object instance : instances) {
INeutronRouterAware service = (INeutronRouterAware) instance;
service.neutronRouterInterfaceDetached(target, input);
}
Object[] instances = NeutronUtil.getInstances(INeutronSecurityGroupAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
- int status = service.canCreateNeutronSecurityGroup(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
+ int status = service.canCreateNeutronSecurityGroup(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// Add to Neutron cache
securityGroupInterface.addNeutronSecurityGroup(singleton);
if (securityGroupInterface.neutronSecurityGroupExists(test.getSecurityGroupUUID())) {
throw new BadRequestException("Security Group UUID already is already created");
}
- if (instances != null) for (Object instance : instances) {
- INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
- int status = service.canCreateNeutronSecurityGroup(test);
- if ((status < 200) || (status > 299)) return Response.status(status).build();
+ if (instances != null) {
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
+ int status = service.canCreateNeutronSecurityGroup(test);
+ if ((status < 200) || (status > 299)) return Response.status(status).build();
+ }
+ } else {
+ throw new BadRequestException("No providers registered. Please try again later");
+ }
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
Object[] instances = NeutronUtil.getInstances(INeutronSecurityGroupAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
- int status = service.canUpdateNeutronSecurityGroup(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
+ int status = service.canUpdateNeutronSecurityGroup(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronSecurityGroup singleton = securityGroupInterface.getNeutronSecurityGroup(securityGroupUUID);
Object[] instances = NeutronUtil.getInstances(INeutronSecurityGroupAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
- int status = service.canDeleteNeutronSecurityGroup(singleton);
- if ((status < 200) || (status > 299)) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityGroupAware service = (INeutronSecurityGroupAware) instance;
+ int status = service.canDeleteNeutronSecurityGroup(singleton);
+ if ((status < 200) || (status > 299)) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
}
return Response.status(204).build();
}
-}
\ No newline at end of file
+}
}
Object[] instances = NeutronUtil.getInstances(INeutronSecurityRuleAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
- int status = service.canCreateNeutronSecurityRule(singleton);
- if ((status < 200) || (status > 299)) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
+ int status = service.canCreateNeutronSecurityRule(singleton);
+ if ((status < 200) || (status > 299)) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
// add rule to cache
throw new BadRequestException("Security Rule UUID already exists");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
- int status = service.canCreateNeutronSecurityRule(test);
- if ((status < 200) || (status > 299)) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
+ int status = service.canCreateNeutronSecurityRule(test);
+ if ((status < 200) || (status > 299)) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
Object[] instances = NeutronUtil.getInstances(INeutronSecurityRuleAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
- int status = service.canUpdateNeutronSecurityRule(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
+ int status = service.canUpdateNeutronSecurityRule(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronSecurityRule singleton = securityRuleInterface.getNeutronSecurityRule(securityRuleUUID);
Object[] instances = NeutronUtil.getInstances(INeutronSecurityRuleAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
- int status = service.canDeleteNeutronSecurityRule(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSecurityRuleAware service = (INeutronSecurityRuleAware) instance;
+ int status = service.canDeleteNeutronSecurityRule(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
+
/*
* remove it and return 204 status
*/
}
return Response.status(204).build();
}
-}
\ No newline at end of file
+}
}
Object[] instances = NeutronUtil.getInstances(INeutronSubnetAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSubnetAware service = (INeutronSubnetAware) instance;
- int status = service.canCreateSubnet(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSubnetAware service = (INeutronSubnetAware) instance;
+ int status = service.canCreateSubnet(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
subnetInterface.addSubnet(singleton);
if (instances != null) {
throw new ResourceConflictException("IP pool overlaps with gateway");
}
if (instances != null) {
- for (Object instance : instances) {
- INeutronSubnetAware service = (INeutronSubnetAware) instance;
- int status = service.canCreateSubnet(test);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSubnetAware service = (INeutronSubnetAware) instance;
+ int status = service.canCreateSubnet(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
}
Object[] instances = NeutronUtil.getInstances(INeutronSubnetAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSubnetAware service = (INeutronSubnetAware) instance;
- int status = service.canUpdateSubnet(delta, original);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSubnetAware service = (INeutronSubnetAware) instance;
+ int status = service.canUpdateSubnet(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
NeutronSubnet singleton = subnetInterface.getSubnet(subnetUUID);
Object[] instances = NeutronUtil.getInstances(INeutronSubnetAware.class, this);
if (instances != null) {
- for (Object instance : instances) {
- INeutronSubnetAware service = (INeutronSubnetAware) instance;
- int status = service.canDeleteSubnet(singleton);
- if (status < 200 || status > 299) {
- return Response.status(status).build();
+ if (instances.length > 0) {
+ for (Object instance : instances) {
+ INeutronSubnetAware service = (INeutronSubnetAware) instance;
+ int status = service.canDeleteSubnet(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
}
+ } else {
+ throw new ServiceUnavailableException("No providers registered. Please try again later");
}
+ } else {
+ throw new ServiceUnavailableException("Couldn't get providers list. Please try again later");
}
/*
<artifactId>networkconfig.neutron</artifactId>
<version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
- <properties>
- <enunciate.version>1.26.2</enunciate.version>
- </properties>
<dependencies>
<dependency>
<groupId>commons-net</groupId>
/**
* This interface defines the methods a service that wishes to be aware of Firewall Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallAware}
*/
+@Deprecated
public interface INeutronFirewallAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Firewall objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallCRUD}
*/
+@Deprecated
public interface INeutronFirewallCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Firewall Policys needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallPolicyAware}
*/
+@Deprecated
public interface INeutronFirewallPolicyAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Firewall Policy objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallPolicyCRUD}
*/
+@Deprecated
public interface INeutronFirewallPolicyCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Firewall Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallRuleAware}
*/
+@Deprecated
public interface INeutronFirewallRuleAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Firewall Rule objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFirewallRuleCRUD}
*/
+@Deprecated
public interface INeutronFirewallRuleCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron FloatingIPs needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFloatingIPAware}
*/
+@Deprecated
public interface INeutronFloatingIPAware {
/**
/**
* This interface defines the methods for CRUD of NB FloatingIP objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronFloatingIPCRUD}
*/
+@Deprecated
public interface INeutronFloatingIPCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancer Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerAware}
*/
+@Deprecated
public interface INeutronLoadBalancerAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancer objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancerHealthMonitor Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerHealthMonitorAware}
*/
+@Deprecated
public interface INeutronLoadBalancerHealthMonitorAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancerHealthMonitor objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerHealthMonitorCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerHealthMonitorCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancerListener Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerListenerAware}
*/
+@Deprecated
public interface INeutronLoadBalancerListenerAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancerListener objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerListenerCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerListenerCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of LoadBalancerPool Rules needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolAware}
*/
+@Deprecated
public interface INeutronLoadBalancerPoolAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack LoadBalancerPool objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolCRUD}
*/
+@Deprecated
public interface INeutronLoadBalancerPoolCRUD {
/**
* Applications call this interface method to determine if a particular
*/
package org.opendaylight.controller.networkconfig.neutron;
+/**
+ * This interface defines the methods for CRUD of NB OpenStack INeutronLoadBalancerPoolMemberAware objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolMemberAware}
+ */
+
+@Deprecated
public interface INeutronLoadBalancerPoolMemberAware {
import java.util.List;
+/**
+ * This interface defines the methods for CRUD of NB OpenStack INeutronLoadBalancerPoolMemberCRUD objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronLoadBalancerPoolMemberCRUD}
+ */
+
+@Deprecated
public interface INeutronLoadBalancerPoolMemberCRUD {
/**
/**
* This interface defines the methods a service that wishes to be aware of Neutron Networks needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronNetworkAware}
*/
+@Deprecated
public interface INeutronNetworkAware {
/**
/**
* This interface defines the methods for CRUD of NB network objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronNetworkCRUD}
*/
+@Deprecated
public interface INeutronNetworkCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This class contains behaviour common to Neutron configuration objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronObject}
*/
+@Deprecated
public interface INeutronObject {
public String getID();
public void setID(String id);
/**
* This interface defines the methods a service that wishes to be aware of Neutron Ports needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronPortAware}
*/
+@Deprecated
public interface INeutronPortAware {
/**
/**
* This interface defines the methods for CRUD of NB Port objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronPortCRUD}
*/
+@Deprecated
public interface INeutronPortCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron Routers needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronRouterAware}
*/
+@Deprecated
public interface INeutronRouterAware {
/**
/**
* This interface defines the methods for CRUD of NB Router objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronRouterCRUD}
*/
+@Deprecated
public interface INeutronRouterCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron Security Groups needs to implement
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityGroupAware}
*/
+@Deprecated
public interface INeutronSecurityGroupAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Security Group objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityGroupCRUD}
*/
+@Deprecated
public interface INeutronSecurityGroupCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods required to be aware of Neutron Security Rules
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityRuleAware}
*/
+@Deprecated
public interface INeutronSecurityRuleAware {
/**
/**
* This interface defines the methods for CRUD of NB OpenStack Security Rule objects
+ *
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSecurityRuleCRUD}
*/
+@Deprecated
public interface INeutronSecurityRuleCRUD {
/**
* Applications call this interface method to determine if a particular
/**
* This interface defines the methods a service that wishes to be aware of Neutron Subnets needs to implement
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSubnetAware}
*/
+@Deprecated
public interface INeutronSubnetAware {
/**
/**
* This interface defines the methods for CRUD of NB Subnet objects
*
+ * @deprecated Replaced by {@link org.opendaylight.neutron.neutron.spi.INeutronSubnetCRUD}
*/
+@Deprecated
public interface INeutronSubnetCRUD {
/**
* Applications call this interface method to determine if a particular
package org.opendaylight.controller.networkconfig.neutron;
+
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
@XmlElement (name="security_groups")
List<NeutronSecurityGroup> securityGroups;
+ @XmlElement (namespace= "binding", name="host_id")
+ String bindinghostID;
+
+ @XmlElement (namespace= "binding", name="vnic_type")
+ String bindingvnicType;
+
+ @XmlElement (namespace= "binding", name="vif_type")
+ String bindingvifType;
+
+
/* this attribute stores the floating IP address assigned to
* each fixed IP address
*/
this.securityGroups = securityGroups;
}
+ public String getBindinghostID() {
+ return bindinghostID;
+ }
+
+ public void setBindinghostID(String bindinghostID) {
+ this.bindinghostID = bindinghostID;
+ }
+
+ public String getBindingvnicType() {
+ return bindingvnicType;
+ }
+
+ public void setBindingvnicType(String bindingvnicType) {
+ this.bindingvnicType = bindingvnicType;
+ }
+
+ public String getBindingvifType() {
+ return bindingvifType;
+ }
+
+ public void setBindingvifType(String bindingvifType) {
+ this.bindingvifType = bindingvifType;
+ }
+
public NeutronFloatingIP getFloatingIP(String key) {
if (!floatingIPMap.containsKey(key)) {
return null;
return "NeutronPort [portUUID=" + portUUID + ", networkUUID=" + networkUUID + ", name=" + name
+ ", adminStateUp=" + adminStateUp + ", status=" + status + ", macAddress=" + macAddress
+ ", fixedIPs=" + fixedIPs + ", deviceID=" + deviceID + ", deviceOwner=" + deviceOwner + ", tenantID="
- + tenantID + ", floatingIPMap=" + floatingIPMap + ", securityGroups=" + securityGroups + "]";
+ + tenantID + ", floatingIPMap=" + floatingIPMap + ", securityGroups=" + securityGroups
+ + ", bindinghostID=" + bindinghostID + ", bindingvnicType=" + bindingvnicType
+ + ", bindingvnicType=" + bindingvnicType + "]";
}
}
<artifactId>releasepom</artifactId>
<version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
- <name>controller</name> <!-- Used by Sonar to set project name -->
-
- <prerequisites>
- <maven>3.0</maven>
- </prerequisites>
+ <name>controller</name>
+ <!-- Used by Sonar to set project name -->
<modules>
<module>opendaylight/networkconfiguration/neutron</module>
<module>opendaylight/networkconfiguration/neutron/implementation</module>
<module>opendaylight/networkconfiguration/neutron/northbound</module>
+ <module>opendaylight/networkconfiguration/neutron/features</module>
<!-- Parents -->
<module>opendaylight/commons/concepts</module>
<module>opendaylight/commons/logback_settings</module>
<module>opendaylight/commons/filter-valve</module>
<module>opendaylight/commons/liblldp</module>
+ <module>opendaylight/commons/enunciate-parent</module>
<!-- Karaf Distribution -->
<module>karaf</module>