<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-common</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-util</artifactId>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-api</artifactId>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
<feature name='odl-config-all' version='${project.version}' description="OpenDaylight :: Config :: All">
- <feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${project.version}'>odl-config-api</feature>
<feature version='${project.version}'>odl-config-netty-config-api</feature>
<feature version='${project.version}'>odl-config-core</feature>
<feature version='${project.version}'>odl-config-manager</feature>
</feature>
- <feature name='odl-mdsal-common' version='${mdsal.version}' description="OpenDaylight :: Config :: All">
- <feature version='${yangtools.version}'>odl-yangtools-data-binding</feature>
- <bundle>mvn:org.opendaylight.controller/sal-common/${mdsal.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-common-api/${mdsal.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-common-impl/${mdsal.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-common-util/${mdsal.version}</bundle>
- </feature>
-
<feature name='odl-config-api' version='${project.version}' description="OpenDaylight :: Config :: API">
<bundle>mvn:org.opendaylight.controller/config-api/${project.version}</bundle>
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
- <feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${project.version}'>odl-config-api</feature>
<bundle>mvn:org.opendaylight.controller/config-util/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/yang-jmx-generator/${project.version}</bundle>
<feature version='${project.version}'>odl-config-core</feature>
<bundle>mvn:org.opendaylight.controller/config-manager/${project.version}</bundle>
</feature>
-</features>
\ No newline at end of file
+</features>
<artifactId>sal-akka-raft</artifactId>
<version>${mdsal.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-util</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-core-spi</artifactId>
<feature version='${project.version}'>odl-mdsal-xsql</feature>
<feature version='${project.version}'>odl-toaster</feature>
</feature>
+ <feature name='odl-mdsal-common' version='${mdsal.version}' description="OpenDaylight :: Config :: All">
+ <feature version='${yangtools.version}'>odl-yangtools-data-binding</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-common/${mdsal.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-common-api/${mdsal.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-common-impl/${mdsal.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-common-util/${mdsal.version}</bundle>
+ </feature>
<feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
</properties>
<dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>networkconfig.neutron</artifactId>
- </dependency>
<dependency>
<groupId>org.osgi</groupId>
<artifactId>org.osgi.core</artifactId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-test</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>features-test</artifactId>
- <version>0.7.0-SNAPSHOT</version>
- </dependency>
</dependencies>
<build>
import javax.inject.Inject;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.apache.karaf.features.Feature;
import org.apache.karaf.features.FeaturesService;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>odlparent</artifactId>
+ <version>1.5.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>karaf-parent</artifactId>
+ <name>${project.artifactId}</name>
+ <packaging>pom</packaging>
+ <prerequisites>
+ <maven>3.1.1</maven>
+ </prerequisites>
+ <properties>
+ <branding.version>1.1.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
+ </properties>
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <!-- scope is compile so all features (there is only one) are installed
+ into startup.properties and the feature repo itself is not installed -->
+ <groupId>org.apache.karaf.features</groupId>
+ <artifactId>framework</artifactId>
+ <version>${karaf.version}</version>
+ <type>kar</type>
+ <exclusions>
+ <exclusion>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.sshd</groupId>
+ <artifactId>sshd-core</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+ <dependencies>
+ <!-- ODL Branding -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>karaf.branding</artifactId>
+ <version>${branding.version}</version>
+ <scope>compile</scope>
+ </dependency>
+
+ <!-- Resources needed -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>opendaylight-karaf-resources</artifactId>
+ <version>${karaf.resources.version}</version>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>cleanVersions</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>copy</goal>
+ <goal>unpack</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.karaf.tooling</groupId>
+ <artifactId>karaf-maven-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>commands-generate-help</goal>
+ <goal>features-add-to-repository</goal>
+ <goal>install-kars</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.fusesource.scalate</groupId>
+ <artifactId>maven-scalate-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>sitegen</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.apache.servicemix.tooling</groupId>
+ <artifactId>depends-maven-plugin</artifactId>
+ <versionRange>[0,)</versionRange>
+ <goals>
+ <goal>generate-depends-file</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-resources-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>copy-resources</id>
+ <!-- here the phase you need -->
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${basedir}/target/assembly</outputDirectory>
+ <resources>
+ <resource>
+ <directory>src/main/assembly</directory>
+ </resource>
+ </resources>
+ <overwrite>true</overwrite>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.karaf.tooling</groupId>
+ <artifactId>karaf-maven-plugin</artifactId>
+ <version>${karaf.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <!-- no startupFeatures -->
+ <bootFeatures>
+ <feature>standard</feature>
+ <feature>${karaf.localFeature}</feature>
+ </bootFeatures>
+ <!-- no installedFeatures -->
+ </configuration>
+ <executions>
+ <execution>
+ <id>populate-system</id>
+ <phase>generate-resources</phase>
+ <goals>
+ <goal>features-add-to-repository</goal>
+ </goals>
+ <configuration>
+ <descriptors>
+ <descriptor>mvn:org.apache.karaf.features/standard/${karaf.version}/xml/features</descriptor>
+ </descriptors>
+ <features>
+ <feature>standard</feature>
+ <feature>config</feature>
+ <feature>package</feature>
+ <feature>kar</feature>
+ <feature>ssh</feature>
+ <feature>management</feature>
+ <feature>war</feature>
+ </features>
+ <repository>target/assembly/system</repository>
+ </configuration>
+ </execution>
+ <execution>
+ <id>process-resources</id>
+ <goals>
+ <goal>install-kars</goal>
+ </goals>
+ <phase>process-resources</phase>
+ </execution>
+ <execution>
+ <id>package</id>
+ <goals>
+ <goal>instance-create-archive</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <version>${checkstyle.version}</version>
+ <configuration>
+ <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/configuration\/initial\/</excludes>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>copy</id>
+ <goals>
+ <goal>copy</goal>
+ </goals>
+ <!-- here the phase you need -->
+ <phase>generate-resources</phase>
+ <configuration>
+ <artifactItems>
+ <artifactItem>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>karaf.branding</artifactId>
+ <version>${karaf.branding.version}</version>
+ <outputDirectory>target/assembly/lib</outputDirectory>
+ <destFileName>karaf.branding-${branding.version}.jar</destFileName>
+ </artifactItem>
+ </artifactItems>
+ </configuration>
+ </execution>
+ <execution>
+ <id>unpack-karaf-resources</id>
+ <goals>
+ <goal>unpack-dependencies</goal>
+ </goals>
+ <phase>prepare-package</phase>
+ <configuration>
+ <outputDirectory>${project.build.directory}/assembly</outputDirectory>
+ <groupId>org.opendaylight.controller</groupId>
+ <includeArtifactIds>opendaylight-karaf-resources</includeArtifactIds>
+ <excludes>META-INF\/**</excludes>
+ <excludeTransitive>true</excludeTransitive>
+ <ignorePermissions>false</ignorePermissions>
+ </configuration>
+ </execution>
+ <execution>
+ <id>org.ops4j.pax.url.mvn.cfg</id>
+ <goals>
+ <goal>copy</goal>
+ </goals>
+ <phase>prepare-package</phase>
+ <configuration>
+ <artifactItems>
+ <artifactItem>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>opendaylight-karaf-resources</artifactId>
+ <type>properties</type>
+ <classifier>config</classifier>
+ <overWrite>true</overWrite>
+ <outputDirectory>${project.build.directory}/assembly/etc/</outputDirectory>
+ <destFileName>org.ops4j.pax.url.mvn.cfg</destFileName>
+ </artifactItem>
+ </artifactItems>
+ <overWriteReleases>true</overWriteReleases>
+ <overWriteSnapshots>true</overWriteSnapshots>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ <configuration>
+ <tasks>
+ <chmod perm="755">
+ <fileset dir="${project.build.directory}/assembly/bin">
+ <include name="karaf"/>
+ <include name="instance"/>
+ <include name="start"/>
+ <include name="stop"/>
+ <include name="status"/>
+ <include name="client"/>
+ <include name="shell"/>
+ </fileset>
+ </chmod>
+ </tasks>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <artifactId>maven-resources-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.karaf.tooling</groupId>
+ <artifactId>karaf-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-antrun-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>opendaylight-karaf-empty</artifactId>
<packaging>pom</packaging>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>opendaylight-karaf-resources</artifactId>
<description>Resources for opendaylight-karaf</description>
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>src/main/assembly/etc/org.ops4j.pax.url.mvn.cfg</file>
+ <type>properties</type>
+ <classifier>config</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
</plugins>
</build>
</project>
--- /dev/null
+################################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+################################################################################
+
+#
+# If set to true, the following property will not allow any certificate to be used
+# when accessing Maven repositories through SSL
+#
+#org.ops4j.pax.url.mvn.certificateCheck=
+
+#
+# Path to the local Maven settings file.
+# The repositories defined in this file will be automatically added to the list
+# of default repositories if the 'org.ops4j.pax.url.mvn.repositories' property
+# below is not set.
+# The following locations are checked for the existence of the settings.xml file
+# * 1. looks for the specified url
+# * 2. if not found looks for ${user.home}/.m2/settings.xml
+# * 3. if not found looks for ${maven.home}/conf/settings.xml
+# * 4. if not found looks for ${M2_HOME}/conf/settings.xml
+#
+#org.ops4j.pax.url.mvn.settings=
+
+#
+# Path to the local Maven repository which is used to avoid downloading
+# artifacts when they already exist locally.
+# The value of this property will be extracted from the settings.xml file
+# above, or defaulted to:
+# System.getProperty( "user.home" ) + "/.m2/repository"
+#
+org.ops4j.pax.url.mvn.localRepository=${karaf.home}/${karaf.default.repository}
+
+#
+# Default this to false. It's just weird to use undocumented repos
+#
+org.ops4j.pax.url.mvn.useFallbackRepositories=false
+
+#
+# Uncomment if you don't wanna use the proxy settings
+# from the Maven conf/settings.xml file
+#
+# org.ops4j.pax.url.mvn.proxySupport=false
+
+#
+# Disable aether support by default. This ensure that the defaultRepositories
+# below will be used
+#
+#org.ops4j.pax.url.mvn.disableAether=true
+
+#
+# Comma separated list of repositories scanned when resolving an artifact.
+# Those repositories will be checked before iterating through the
+# below list of repositories and even before the local repository
+# A repository url can be appended with zero or more of the following flags:
+# @snapshots : the repository contains snaphots
+# @noreleases : the repository does not contain any released artifacts
+#
+# The following property value will add the system folder as a repo.
+#
+#org.ops4j.pax.url.mvn.defaultRepositories=
+
+# Use the default local repo (e.g.~/.m2/repository) as a "remote" repo
+org.ops4j.pax.url.mvn.defaultLocalRepoAsRemote=false
+
+#
+# Comma separated list of repositories scanned when resolving an artifact.
+# The default list includes the following repositories containing releases:
+# http://repo1.maven.org/maven2
+# http://repository.apache.org/content/groups/snapshots-group
+# http://svn.apache.org/repos/asf/servicemix/m2-repo
+# http://repository.springsource.com/maven/bundles/release
+# http://repository.springsource.com/maven/bundles/external
+# To add repositories to the default ones, prepend '+' to the list of repositories
+# to add.
+# A repository url can be appended with zero or more of the following flags:
+# @snapshots : the repository contains snaphots
+# @noreleases : the repository does not contain any released artifacts
+# @id=reponid : the id for the repository, just like in the settings.xml this is optional but recomendet
+#
+# The default list doesn't contain any repository containing snapshots as it can impact the artifacts resolution.
+# You may want to add the following repositories containing snapshots:
+# http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases
+# http://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@norelease
+# http://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases
+#
+org.ops4j.pax.url.mvn.repositories= \
+ file:${karaf.home}/${karaf.default.repository}@id=system.repository, \
+ file:${karaf.data}/kar@id=kar.repository@multi, \
+ http://repo1.maven.org/maven2@id=central, \
+ http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, \
+ http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>distribution.opendaylight-karaf</artifactId>
<packaging>pom</packaging>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.parent</artifactId>
+ <version>1.1.0-SNAPSHOT</version>
+ <relativePath>../opendaylight/commons/parent</relativePath>
+ </parent>
+
+ <artifactId>karaf-aggregator</artifactId>
+ <packaging>pom</packaging>
+ <modules>
+ <module>karaf-branding</module>
+ <module>karaf-parent</module>
+ <module>opendaylight-karaf</module>
+ <module>opendaylight-karaf-empty</module>
+ <module>opendaylight-karaf-resources</module>
+ </modules>
+</project>
\r
import java.util.Hashtable;\r
\r
-import static junit.framework.Assert.assertNull;\r
+import static org.junit.Assert.assertNull;\r
import static org.junit.Assert.assertEquals;\r
\r
public class ContainerImplTest {\r
<?xml version="1.0" encoding="UTF-8"?>
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<?xml version="1.0" encoding="UTF-8"?>
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<?xml version="1.0" encoding="UTF-8"?>
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<?xml version="1.0" encoding="UTF-8"?>
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<?xml version="1.0" encoding="UTF-8"?>
-<project>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.opendaylight.controller.sal.utils.NodeCreator;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.fail;
public class StaticRouteTest {
*/
package org.opendaylight.controller.forwardingrulesmanager.internal;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.fail;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import org.junit.Test;
-import junit.framework.Assert;
+import org.junit.Assert;
public class ARPTest {
package org.opendaylight.controller.sal.packet;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.match.Match;
import java.util.Arrays;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
package org.opendaylight.controller.sal.packet;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.match.Match;
import java.util.Arrays;
import java.util.Map;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.match.Match;
package org.opendaylight.controller.sal.packet;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.match.Match;
package org.opendaylight.controller.sal.packet;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.match.Match;
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
import org.opendaylight.controller.sal.topology.IListenTopoUpdates;
import org.opendaylight.controller.sal.topology.ITopologyService;
+import org.opendaylight.controller.switchmanager.IInventoryListener;
import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.controller.topologymanager.ITopologyManager;
import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
props.put("cachenames", propSet);
c.setInterface(new String[] { IListenTopoUpdates.class.getName(),
+ IInventoryListener.class.getName(),
ITopologyManager.class.getName(),
ITopologyManagerShell.class.getName(),
IConfigurationContainerAware.class.getName(),
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.opendaylight.controller.sal.utils.Status;
import org.opendaylight.controller.sal.utils.StatusCode;
+import org.opendaylight.controller.switchmanager.IInventoryListener;
import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.controller.topologymanager.ITopologyManager;
import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
IConfigurationContainerAware,
IListenTopoUpdates,
IObjectReader,
+ IInventoryListener,
CommandProvider {
protected static final String TOPOEDGESDB = "topologymanager.edgesDB";
protected static final String TOPOHOSTSDB = "topologymanager.hostsDB";
protected static final String TOPOUSERLINKSDB = "topologymanager.userLinksDB";
private static final String USER_LINKS_FILE_NAME = "userTopology.conf";
private static final Logger log = LoggerFactory.getLogger(TopologyManagerImpl.class);
+ private static final long PENDING_UPDATE_TIMEOUT = 5000L;
+
private ITopologyService topoService;
private IClusterContainerServices clusterContainerService;
private IConfigurationContainerService configurationService;
private BlockingQueue<TopoEdgeUpdate> notifyQ = new LinkedBlockingQueue<TopoEdgeUpdate>();
private volatile Boolean shuttingDown = false;
private Thread notifyThread;
+ private final Map<NodeConnector, List<PendingUpdateTask>> pendingUpdates =
+ new HashMap<NodeConnector, List<PendingUpdateTask>>();
+ private final BlockingQueue<TopoEdgeUpdate> updateQ =
+ new LinkedBlockingQueue<TopoEdgeUpdate>();
+ private Timer pendingTimer;
+ private Thread updateThread;
+
+ private class PendingEdgeUpdate extends TopoEdgeUpdate {
+ private PendingEdgeUpdate(Edge e, Set<Property> p, UpdateType t) {
+ super(e, p, t);
+ }
+ }
+
+ private class UpdateTopology implements Runnable {
+ @Override
+ public void run() {
+ log.trace("Start topology update thread");
+
+ while (!shuttingDown) {
+ try {
+ List<TopoEdgeUpdate> list = new ArrayList<TopoEdgeUpdate>();
+ TopoEdgeUpdate teu = updateQ.take();
+ for (; teu != null; teu = updateQ.poll()) {
+ list.add(teu);
+ }
+
+ if (!list.isEmpty()) {
+ log.trace("Update edges: {}", list);
+ doEdgeUpdate(list);
+ }
+ } catch (InterruptedException e) {
+ if (shuttingDown) {
+ break;
+ }
+ log.warn("Topology update thread interrupted", e);
+ } catch (Exception e) {
+ log.error("Exception on topology update thread", e);
+ }
+ }
+
+ log.trace("Exit topology update thread");
+ }
+ }
+
+ private class PendingUpdateTask extends TimerTask {
+ private final Edge edge;
+ private final Set<Property> props;
+ private final UpdateType type;
+
+ private PendingUpdateTask(Edge e, Set<Property> p, UpdateType t) {
+ edge = e;
+ props = p;
+ type = t;
+ }
+
+ private NodeConnector getHeadNodeConnector() {
+ return edge.getHeadNodeConnector();
+ }
+
+ private void flush() {
+ log.info("Flush pending topology update: edge {}, type {}",
+ edge, type);
+ updateQ.add(new PendingEdgeUpdate(edge, props, type));
+ }
+ @Override
+ public void run() {
+ if (removePendingEvent(this)) {
+ log.warn("Pending topology update timed out: edge{}, type {}",
+ edge, type);
+ }
+ }
+ }
void nonClusterObjectCreate() {
edgesDB = new ConcurrentHashMap<Edge, Set<Property>>();
// Restore the shuttingDown status on init of the component
shuttingDown = false;
notifyThread = new Thread(new TopologyNotify(notifyQ));
+ pendingTimer = new Timer("Topology Pending Update Timer");
+ updateThread = new Thread(new UpdateTopology(), "Topology Update");
}
@SuppressWarnings({ "unchecked" })
*
*/
void started() {
+ updateThread.start();
+
// Start the batcher thread for the cluster wide topology updates
notifyThread.start();
// SollicitRefresh MUST be called here else if called at init
void stop() {
shuttingDown = true;
+ updateThread.interrupt();
notifyThread.interrupt();
+ pendingTimer.cancel();
}
/**
*
*/
void destroy() {
+ updateQ.clear();
+ updateThread = null;
+ pendingTimer = null;
notifyQ.clear();
notifyThread = null;
}
return (switchManager.doesNodeConnectorExist(head));
}
+ private void addPendingEvent(Edge e, Set<Property> p, UpdateType t) {
+ NodeConnector head = e.getHeadNodeConnector();
+ PendingUpdateTask task = new PendingUpdateTask(e, p, t);
+ synchronized (pendingUpdates) {
+ List<PendingUpdateTask> list = pendingUpdates.get(head);
+ if (list == null) {
+ list = new LinkedList<PendingUpdateTask>();
+ pendingUpdates.put(head, list);
+ }
+ list.add(task);
+ pendingTimer.schedule(task, PENDING_UPDATE_TIMEOUT);
+ }
+ }
+
+ private boolean enqueueEventIfPending(Edge e, Set<Property> p, UpdateType t) {
+ NodeConnector head = e.getHeadNodeConnector();
+ synchronized (pendingUpdates) {
+ List<PendingUpdateTask> list = pendingUpdates.get(head);
+ if (list != null) {
+ log.warn("Enqueue edge update: edge {}, type {}", e, t);
+ PendingUpdateTask task = new PendingUpdateTask(e, p, t);
+ list.add(task);
+ pendingTimer.schedule(task, PENDING_UPDATE_TIMEOUT);
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ private boolean removePendingEvent(PendingUpdateTask t) {
+ t.cancel();
+ NodeConnector head = t.getHeadNodeConnector();
+ boolean removed = false;
+
+ synchronized (pendingUpdates) {
+ List<PendingUpdateTask> list = pendingUpdates.get(head);
+ if (list != null) {
+ removed = list.remove(t);
+ if (list.isEmpty()) {
+ pendingUpdates.remove(head);
+ }
+ }
+ }
+
+ return removed;
+ }
+
+ private void removePendingEvent(NodeConnector head, boolean doFlush) {
+ List<PendingUpdateTask> list;
+ synchronized (pendingUpdates) {
+ list = pendingUpdates.remove(head);
+ }
+
+ if (list != null) {
+ for (PendingUpdateTask task : list) {
+ if (task.cancel() && doFlush) {
+ task.flush();
+ }
+ }
+ pendingTimer.purge();
+ }
+ }
+
private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
- switch (type) {
- case ADDED:
+ return edgeUpdate(e, type, props, false);
+ }
+ private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type, Set<Property> props, boolean isPending) {
+ if (!type.equals(UpdateType.ADDED) &&
+ enqueueEventIfPending(e, props, type)) {
+ return null;
+ }
+ switch (type) {
+ case ADDED:
if (this.edgesDB.containsKey(e)) {
// Avoid redundant updates (e.g. cluster switch-over) as notifications trigger expensive tasks
log.trace("Skipping redundant edge addition: {}", e);
return null;
}
+ // Ensure that head node connector exists
+ if (!isPending) {
+ if (headNodeConnectorExist(e)) {
+ removePendingEvent(e.getHeadNodeConnector(), true);
+ } else {
+ log.warn("Ignore edge that contains invalid node connector: {}",
+ e);
+ addPendingEvent(e, props, type);
+ return null;
+ }
+ }
+
// Make sure the props are non-null or create a copy
if (props == null) {
props = new HashSet<Property>();
props = new HashSet<Property>(props);
}
-
- // Ensure that head node connector exists
- if (!headNodeConnectorExist(e)) {
- log.warn("Ignore edge that contains invalid node connector: {}", e);
- return null;
- }
-
// Check if nodeConnectors of the edge were correctly categorized
// by protocol plugin
crossCheckNodeConnectors(e);
return new TopoEdgeUpdate(e, props, type);
}
- @Override
- public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
+ private void doEdgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
List<TopoEdgeUpdate> teuList = new ArrayList<TopoEdgeUpdate>();
- for (int i = 0; i < topoedgeupdateList.size(); i++) {
- Edge e = topoedgeupdateList.get(i).getEdge();
- Set<Property> p = topoedgeupdateList.get(i).getProperty();
- UpdateType type = topoedgeupdateList.get(i).getUpdateType();
- TopoEdgeUpdate teu = edgeUpdate(e, type, p);
- if (teu != null) {
- teuList.add(teu);
+ for (TopoEdgeUpdate teu : topoedgeupdateList) {
+ boolean isPending = (teu instanceof PendingEdgeUpdate);
+ Edge e = teu.getEdge();
+ Set<Property> p = teu.getProperty();
+ UpdateType type = teu.getUpdateType();
+ TopoEdgeUpdate update = edgeUpdate(e, type, p, isPending);
+ if (update != null) {
+ teuList.add(update);
}
}
}
}
+ @Override
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
+ updateQ.addAll(topoedgeupdateList);
+ }
+
private Edge getReverseLinkTuple(TopologyUserLinkConfig link) {
TopologyUserLinkConfig rLink = new TopologyUserLinkConfig(
link.getName(), link.getDstNodeConnector(), link.getSrcNodeConnector());
notifyQ.add(upd);
}
+ @Override
+ public void notifyNode(Node node, UpdateType type, Map<String, Property> propMap) {
+ // NOP
+ }
+
+ @Override
+ public void notifyNodeConnector(NodeConnector nc, UpdateType type, Map<String, Property> propMap) {
+ // Remove pending edge updates for the given node connector.
+ // Pending events should be notified if the node connector exists.
+ boolean doFlush = !type.equals(UpdateType.REMOVED);
+ removePendingEvent(nc, doFlush);
+ }
+
@Override
public void entryCreated(final Object key, final String cacheName, final boolean originLocal) {
if (cacheName.equals(TOPOEDGESDB)) {
return result;
}
+ // Only for unit test.
+ void startTest() {
+ pendingTimer = new Timer("Topology Pending Update Timer");
+ updateThread = new Thread(new UpdateTopology(), "Topology Update");
+ updateThread.start();
+ }
+
+ void stopTest() {
+ shuttingDown = true;
+ updateThread.interrupt();
+ pendingTimer.cancel();
+ }
+
+ boolean flushUpdateQueue(long timeout) {
+ long limit = System.currentTimeMillis() + timeout;
+ long cur;
+ do {
+ if (updateQ.peek() == null) {
+ return true;
+ }
+
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ break;
+ }
+ cur = System.currentTimeMillis();
+ } while (cur < limit);
+
+ return false;
+ }
}
package org.opendaylight.controller.topologymanager.internal;
import org.junit.Assert;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.ConstructionException;
import java.util.concurrent.ConcurrentMap;
public class TopologyManagerImplTest {
+ private TopologyManagerImpl topoManagerImpl;
+
/**
* Mockup of switch manager that only maintains existence of node
* connector.
}
}
+ private void clear() {
+ nodeSet.clear();
+ nodeConnectorSet.clear();
+ }
+
@Override
public Status addSubnet(SubnetConfig configObject) {
return null;
}
}
+ @Before
+ public void setUp() {
+ topoManagerImpl = new TopologyManagerImpl();
+ topoManagerImpl.startTest();
+ }
+
+ @After
+ public void tearDown() {
+ if (topoManagerImpl != null) {
+ topoManagerImpl.stopTest();
+ topoManagerImpl = null;
+ }
+ }
+
/*
* Sets the node, edges and properties for edges here: Edge <SwitchId :
* NodeConnectorId> : <1:1>--><11:11>; <1:2>--><11:12>; <3:3>--><13:13>;
topoedgeupdateList.add(teu2);
topoManagerImpl.edgeUpdate(topoedgeupdateList);
}
+
+ Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
}
@Test
public void testGetNodeEdges() throws ConstructionException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
setNodeEdges(topoManagerImpl, swMgr);
@Test
public void testGetEdges() throws ConstructionException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
setNodeEdges(topoManagerImpl, swMgr);
TopologyUserLinkConfig link4 = new TopologyUserLinkConfig("default20",
"OF|10@OF|20", "OF|10@OF|30");
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
public void testGetUserLink() {
TopologyUserLinkConfig[] link = new TopologyUserLinkConfig[5];
TopologyUserLinkConfig[] reverseLink = new TopologyUserLinkConfig[5];
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
@Test
public void testHostLinkMethods() throws ConstructionException,
UnknownHostException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
@Test
public void testGetNodesWithNodeConnectorHost()
throws ConstructionException, UnknownHostException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
}
@Test
- public void bug1348FixTest() throws ConstructionException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ public void bug1348FixTest() throws ConstructionException,
+ InterruptedException {
TestSwitchManager swMgr = new TestSwitchManager();
topoManagerImpl.setSwitchManager(swMgr);
topoManagerImpl.nonClusterObjectCreate();
Assert.fail("Exception was raised when trying to update edge properties: " + e.getMessage());
}
+ Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
+ // Give TopologyManger time to update its edges DB.
+ Thread.sleep(1000);
Assert.assertEquals(1, topoManagerImpl.getEdges().size());
Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
}
+
+ @Test
+ public void testNotifyNodeConnector() throws ConstructionException,
+ InterruptedException {
+ TestSwitchManager swMgr = new TestSwitchManager();
+ topoManagerImpl.setSwitchManager(swMgr);
+ topoManagerImpl.nonClusterObjectCreate();
+
+ // Test NodeConnector notification in the case that there are no
+ // related edge updates.
+ NodeConnector nc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 1, NodeCreator.createOFNode(1000L));
+ Map<String, Property> propMap = new HashMap<>();
+ swMgr.addNodeConnectors(nc1);
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.ADDED, propMap);
+ Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.CHANGED, propMap);
+ Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+
+ swMgr.clear();
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
+ Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+
+ // Test NodeConnector notification in the case that there is a related
+ // edge update just before the notification.
+ NodeConnector nc2 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 2, NodeCreator.createOFNode(2000L));
+ Edge edge1 = new Edge(nc1, nc2);
+ Edge edge2 = new Edge(nc2, nc1);
+ Set<Property> props = new HashSet<Property>();
+ TopoEdgeUpdate teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.ADDED);
+ TopoEdgeUpdate teu2 = new TopoEdgeUpdate(edge2, props, UpdateType.ADDED);
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ topoedgeupdateList.add(teu1);
+ topoedgeupdateList.add(teu2);
+ topoManagerImpl.edgeUpdate(topoedgeupdateList);
+ swMgr.addNodeConnectors(nc1);
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.ADDED, propMap);
+ swMgr.addNodeConnectors(nc2);
+ topoManagerImpl.notifyNodeConnector(nc2, UpdateType.CHANGED, propMap);
+ Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
+ // Give TopologyManger time to update its edges DB.
+ Thread.sleep(1000);
+ Assert.assertEquals(2, topoManagerImpl.getEdges().size());
+
+ teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.REMOVED);
+ teu2 = new TopoEdgeUpdate(edge2, props, UpdateType.REMOVED);
+ topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ topoedgeupdateList.add(teu1);
+ topoedgeupdateList.add(teu2);
+ topoManagerImpl.edgeUpdate(topoedgeupdateList);
+ Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
+ // Give TopologyManger time to update its edges DB.
+ Thread.sleep(1000);
+ Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
+ topoManagerImpl.notifyNodeConnector(nc2, UpdateType.REMOVED, propMap);
+
+ swMgr.clear();
+
+ // Test NodeConnector notification in the case that there are multiple
+ // edge updates related to the NodeConnector just before the notification.
+ teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.ADDED);
+ teu2 = new TopoEdgeUpdate(edge2, props, UpdateType.ADDED);
+ TopoEdgeUpdate teu3 = new TopoEdgeUpdate(edge1, props, UpdateType.CHANGED);
+ TopoEdgeUpdate teu4 = new TopoEdgeUpdate(edge2, props, UpdateType.CHANGED);
+ TopoEdgeUpdate teu5 = new TopoEdgeUpdate(edge1, props, UpdateType.REMOVED);
+ TopoEdgeUpdate teu6 = new TopoEdgeUpdate(edge2, props, UpdateType.REMOVED);
+ topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ topoedgeupdateList.add(teu1);
+ topoedgeupdateList.add(teu2);
+ topoedgeupdateList.add(teu3);
+ topoedgeupdateList.add(teu4);
+ topoedgeupdateList.add(teu5);
+ topoedgeupdateList.add(teu6);
+ topoManagerImpl.edgeUpdate(topoedgeupdateList);
+ swMgr.addNodeConnectors(nc1);
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.ADDED, propMap);
+ swMgr.addNodeConnectors(nc2);
+ topoManagerImpl.notifyNodeConnector(nc2, UpdateType.CHANGED, propMap);
+ Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
+ // Give TopologyManger time to update its edges DB.
+ Thread.sleep(1000);
+ Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ topoManagerImpl.notifyNodeConnector(nc1, UpdateType.REMOVED, propMap);
+ topoManagerImpl.notifyNodeConnector(nc2, UpdateType.REMOVED, propMap);
+ Assert.assertTrue(topoManagerImpl.flushUpdateQueue(5000));
+ // Give TopologyManger time to update its edges DB.
+ Thread.sleep(1000);
+ Assert.assertEquals(0, topoManagerImpl.getEdges().size());
+ }
}
-<?xml version="1.0" encoding="UTF-8"?>\r
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\r
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">\r
- <modelVersion>4.0.0</modelVersion>\r
- <artifactId>${artifactId}</artifactId>\r
- <groupId>${groupId}</groupId>\r
- <version>${version}</version>\r
- <packaging>bundle</packaging>\r
- <properties>\r
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\r
- <nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>${artifactId}</artifactId>
+ <groupId>${groupId}</groupId>
+ <version>${version}</version>
+ <packaging>bundle</packaging>
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
<nexus.repository.release>opendaylight.release</nexus.repository.release>
- <nexus.repository.snapshot>opendaylight.release</nexus.repository.snaphot>
- <yang.version>0.7.0-SNAPSHOT</yang.version>\r
- <yang.codegen.version>0.7.0-SNAPSHOT</yang.codegen.version>\r
- <bundle.plugin.version>2.3.7</bundle.plugin.version>\r
- </properties>\r
- <scm>\r
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>\r
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>\r
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>\r
- </scm>\r
-\r
- <build>\r
- <plugins>\r
- <plugin>\r
- <groupId>org.apache.felix</groupId>\r
- <artifactId>maven-bundle-plugin</artifactId>\r
- <version>${bundle.plugin.version}</version>\r
- <extensions>true</extensions>\r
- <configuration>\r
- <instructions>\r
- <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>\r
- </instructions>\r
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>\r
- </configuration>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-compiler-plugin</artifactId>\r
- <version>2.5.1</version>\r
- <inherited>true</inherited>\r
- <configuration>\r
- <source>1.7</source>\r
- <target>1.7</target>\r
- </configuration>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-javadoc-plugin</artifactId>\r
- <version>2.8.1</version>\r
- <configuration>\r
- <stylesheet>maven</stylesheet>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <goals>\r
- <goal>aggregate</goal>\r
- </goals>\r
- <phase>site</phase>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.opendaylight.yangtools</groupId>\r
- <artifactId>yang-maven-plugin</artifactId>\r
- <version>${yang.version}</version>\r
- <executions>\r
- <execution>\r
- <goals>\r
- <goal>generate-sources</goal>\r
- </goals>\r
- <configuration>\r
- <yangFilesRootDir>src/main/yang</yangFilesRootDir>\r
- <codeGenerators>\r
- <generator>\r
- <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>\r
- <outputBaseDir>target/generated-sources/sal</outputBaseDir>\r
- </generator>\r
- </codeGenerators>\r
- <inspectDependencies>false</inspectDependencies>\r
- </configuration>\r
- </execution>\r
- </executions>\r
-\r
- <dependencies>\r
- <dependency>\r
- <groupId>org.opendaylight.yangtools</groupId>\r
- <artifactId>maven-sal-api-gen-plugin</artifactId>\r
- <version>${yang.codegen.version}</version>\r
- <type>jar</type>\r
- </dependency>\r
- </dependencies>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.codehaus.mojo</groupId>\r
- <artifactId>build-helper-maven-plugin</artifactId>\r
- <version>1.7</version>\r
- <executions>\r
- <execution>\r
- <phase>generate-sources</phase>\r
- <goals>\r
- <goal>add-source</goal>\r
- </goals>\r
- <configuration>\r
- <sources>\r
- <source>target/generated-sources/sal</source>\r
- </sources>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- </plugins>\r
- <pluginManagement>\r
- <plugins>\r
- <!--This plugin's configuration is used to store Eclipse m2e settings\r
- only. It has no influence on the Maven build itself. -->\r
- <plugin>\r
- <groupId>org.eclipse.m2e</groupId>\r
- <artifactId>lifecycle-mapping</artifactId>\r
- <version>1.0.0</version>\r
- <configuration>\r
- <lifecycleMappingMetadata>\r
- <pluginExecutions>\r
- <pluginExecution>\r
- <pluginExecutionFilter>\r
- <groupId>org.opendaylight.yangtools</groupId>\r
- <artifactId>yang-maven-plugin</artifactId>\r
- <versionRange>[0.5,)</versionRange>\r
- <goals>\r
- <goal>generate-sources</goal>\r
- </goals>\r
- </pluginExecutionFilter>\r
- <action>\r
- <ignore></ignore>\r
- </action>\r
- </pluginExecution>\r
- </pluginExecutions>\r
- </lifecycleMappingMetadata>\r
- </configuration>\r
- </plugin>\r
- </plugins>\r
- </pluginManagement>\r
- </build>\r
- <pluginRepositories>\r
- <!-- OpenDayLight Repo Mirror -->\r
- <pluginRepository>\r
- <id>opendaylight-mirror</id>\r
- <name>opendaylight-mirror</name>\r
- <url>${nexusproxy}/groups/public/</url>\r
- <snapshots>\r
- <enabled>false</enabled>\r
- </snapshots>\r
- <releases>\r
- <enabled>true</enabled>\r
- <updatePolicy>never</updatePolicy>\r
- </releases>\r
- </pluginRepository>\r
- <!-- OpenDayLight Snapshot artifact -->\r
- <pluginRepository>\r
- <id>opendaylight-snapshot</id>\r
- <name>opendaylight-snapshot</name>\r
+ <nexus.repository.snapshot>opendaylight.release</nexus.repository.snapshot>
+ <yang.version>0.7.0-SNAPSHOT</yang.version>
+ <yang.codegen.version>0.7.0-SNAPSHOT</yang.codegen.version>
+ <bundle.plugin.version>2.3.7</bundle.plugin.version>
+ </properties>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+ </scm>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>2.5.1</version>
+ <inherited>true</inherited>
+ <configuration>
+ <source>1.7</source>
+ <target>1.7</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.8.1</version>
+ <configuration>
+ <stylesheet>maven</stylesheet>
+ </configuration>
+ <executions>
+ <execution>
+ <goals>
+ <goal>aggregate</goal>
+ </goals>
+ <phase>site</phase>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yang.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/generated-sources/sal</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>false</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yang.codegen.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.7</version>
+ <executions>
+ <execution>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>target/generated-sources/sal</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!--This plugin's configuration is used to store Eclipse m2e settings
+ only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+ <pluginRepositories>
+ <!-- OpenDayLight Repo Mirror -->
+ <pluginRepository>
+ <id>opendaylight-mirror</id>
+ <name>opendaylight-mirror</name>
+ <url>${nexusproxy}/groups/public/</url>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ </pluginRepository>
+ <!-- OpenDayLight Snapshot artifact -->
+ <pluginRepository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
<url>${nexusproxy}/repositories/${nexus.repository.snapshot}/</url>
- <snapshots>\r
- <enabled>true</enabled>\r
- </snapshots>\r
- <releases>\r
- <enabled>false</enabled>\r
- </releases>\r
- </pluginRepository>\r
- </pluginRepositories>\r
-\r
- <repositories>\r
- <!-- OpenDayLight Repo Mirror -->\r
- <repository>\r
- <id>opendaylight-mirror</id>\r
- <name>opendaylight-mirror</name>\r
- <url>${nexusproxy}/groups/public/</url>\r
- <snapshots>\r
- <enabled>false</enabled>\r
- </snapshots>\r
- <releases>\r
- <enabled>true</enabled>\r
- <updatePolicy>never</updatePolicy>\r
- </releases>\r
- </repository>\r
- <!-- OpenDayLight Snapshot artifact -->\r
- <repository>\r
- <id>opendaylight-snapshot</id>\r
- <name>opendaylight-snapshot</name>\r
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ </pluginRepository>
+ </pluginRepositories>
+
+ <repositories>
+ <!-- OpenDayLight Repo Mirror -->
+ <repository>
+ <id>opendaylight-mirror</id>
+ <name>opendaylight-mirror</name>
+ <url>${nexusproxy}/groups/public/</url>
+ <snapshots>
+ <enabled>false</enabled>
+ </snapshots>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ </releases>
+ </repository>
+ <!-- OpenDayLight Snapshot artifact -->
+ <repository>
+ <id>opendaylight-snapshot</id>
+ <name>opendaylight-snapshot</name>
<url>${nexusproxy}/repositories/${nexus.repository.snapshot}/</url>
- <snapshots>\r
- <enabled>true</enabled>\r
- </snapshots>\r
- <releases>\r
- <enabled>false</enabled>\r
- </releases>\r
- </repository>\r
- </repositories>\r
-\r
- <distributionManagement>\r
- <!-- OpenDayLight Released artifact -->\r
- <repository>\r
- <id>opendaylight-release</id>\r
+ <snapshots>
+ <enabled>true</enabled>
+ </snapshots>
+ <releases>
+ <enabled>false</enabled>
+ </releases>
+ </repository>
+ </repositories>
+
+ <distributionManagement>
+ <!-- OpenDayLight Released artifact -->
+ <repository>
+ <id>opendaylight-release</id>
<url>${nexusproxy}/repositories/${nexus.repository.release}/</url>
- </repository>\r
- <!-- OpenDayLight Snapshot artifact -->\r
- <snapshotRepository>\r
- <id>opendaylight-snapshot</id>\r
+ </repository>
+ <!-- OpenDayLight Snapshot artifact -->
+ <snapshotRepository>
+ <id>opendaylight-snapshot</id>
<url>${nexusproxy}/repositories/${nexus.repository.snapshot}/</url>
- </snapshotRepository>\r
- <!-- Site deployment -->\r
- <site>\r
- <id>website</id>\r
- <url>${sitedeploy}</url>\r
- </site>\r
- </distributionManagement>\r
- <dependencies>\r
- <dependency>\r
- <groupId>org.opendaylight.yangtools</groupId>\r
- <artifactId>yang-binding</artifactId>\r
- <version>${yang.codegen.version}</version>\r
- </dependency>\r
- </dependencies>\r
-</project>\r
+ </snapshotRepository>
+ <!-- Site deployment -->
+ <site>
+ <id>website</id>
+ <url>${sitedeploy}</url>
+ </site>
+ </distributionManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yang.codegen.version}</version>
+ </dependency>
+ </dependencies>
+</project>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
<sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
- <sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
public void handleNotification(final Notification n, final Object handback) {
if (n instanceof MBeanServerNotification
&& n.getType()
- .equals(MBeanServerNotification.UNREGISTRATION_NOTIFICATION)) {
+ .equals(MBeanServerNotification.UNREGISTRATION_NOTIFICATION)) {
if (((MBeanServerNotification) n).getMBeanName().equals(
thisWrapperObjectName)) {
try {
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadPoolExecutor;
-import javax.management.DynamicMBean;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>binding-parent</artifactId>
+ <version>0.7.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-parent</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <properties>
+ <config.version>0.3.0-SNAPSHOT</config.version>
+ <mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
+ <jmxGeneratorPath>src/main/yang-gen-config</jmxGeneratorPath>
+ <config.file>src/main/config/default-config.xml</config.file>
+ </properties>
+
+ <dependencyManagement>
+ <dependencies>
+ <!-- project specific dependencies -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-artifacts</artifactId>
+ <version>${config.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-artifacts</artifactId>
+ <version>${mdsal.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ </dependencies>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-clean-plugin</artifactId>
+ <configuration>
+ <filesets>
+ <fileset>
+ <directory>${jmxGeneratorPath}</directory>
+ <includes>
+ <include>**</include>
+ </includes>
+ </fileset>
+ </filesets>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+ <profiles>
+ <profile>
+ <activation>
+ <file>
+ <exists>${config.file}</exists>
+ </file>
+ </activation>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${config.file}</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+</project>
return new ConfigSnapshot(cfg.getConfigSnapshot(), cfg.getCapabilities());
}
+
@XmlAnyElement(SnapshotHandler.class)
public String getConfigSnapshot() {
return configSnapshot;
*/
package org.opendaylight.controller.config.persist.storage.file.xml.model;
+import com.google.common.base.Preconditions;
import java.io.StringReader;
import java.io.StringWriter;
import javax.xml.bind.ValidationEventHandler;
String xml = rt.getWriter().toString();
int beginIndex = xml.indexOf(START_TAG) + START_TAG.length();
int endIndex = xml.indexOf(END_TAG);
+ Preconditions.checkArgument(beginIndex != -1 && endIndex != -1,
+ "Unknown element present in config snapshot(expected only configuration): %s", xml);
return xml.substring(beginIndex, endIndex);
}
--- /dev/null
+package org.opendaylight.controller.config.persist.storage.file.xml.model;
+
+import java.io.File;
+import org.junit.Test;
+
+public class ConfigTest {
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testFromXml() throws Exception {
+ Config.fromXml(new File(getClass().getResource("/illegalSnapshot.xml").getFile()));
+ }
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-service-provider">
+ prefix:clustering-service-provider
+ </type>
+ <name>clustering-service-provider</name>
+
+ <rpc-registry>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <name>binding-rpc-broker</name>
+ </rpc-registry>
+
+ </module>
+ </modules>
+ </data>
+
+ </configuration>
+
+ <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <service>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-service-provider">prefix:clustering-service-change-registry</type>
+ <instance>
+ <name>openflow-role-change-registry</name>
+ <provider>/modules/module[type='clustering-service-provider'][name='clustering-service-provider']</provider>
+ </instance>
+ </service>
+ </services>
+
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:clustering-service-provider?module=clustering-service-provider&revision=2014-11-19</capability>
+
+ </required-capabilities>
+
+</snapshot>
\ No newline at end of file
<module>config-netty-config</module>
<module>config-artifacts</module>
+ <module>config-parent</module>
</modules>
<dependencies>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<failsOnError>false</failsOnError>
- <failOnViolation>false</failOnViolation>
+ <failOnViolation>true</failOnViolation>
<configLocation>checkstyle-logging.xml</configLocation>
<consoleOutput>true</consoleOutput>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<!-- excluding logback-config, has several checkstyle warnings
regarding Logger/LoggerFactory, which couldn't be removed due necessity/intention
to use the particular implementation/library of Logger/LoggerFactory -->
- <excludes>**\/logback-config\/,**\/target\/,**\/bin\/,**\/target-ide\/,**\/${jmxGeneratorPath}\/,**\/${salGeneratorPath}\/</excludes>
+ <excludes>**\/config\/yang\/logback\/config\/**,**\/target\/,**\/bin\/,**\/target-ide\/,**\/${jmxGeneratorPath}\/,**\/${salGeneratorPath}\/</excludes>
</configuration>
<dependencies>
<dependency>
import akka.actor.UntypedActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
-import akka.japi.Creator;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
this.target = target;
}
- public static Props props(final ActorRef target){
- return Props.create(new Creator<ClientActor>(){
- private static final long serialVersionUID = 1L;
-
- @Override public ClientActor create() throws Exception {
- return new ClientActor(target);
- }
- });
+ public static Props props(final ActorRef target) {
+ return Props.create(ClientActor.class, target);
}
@Override public void onReceive(Object message) throws Exception {
import akka.actor.ActorRef;
import akka.actor.Props;
-import akka.japi.Creator;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
import java.io.ByteArrayInputStream;
}
public static Props props(final String id, final Map<String, String> peerAddresses,
- final Optional<ConfigParams> configParams){
- return Props.create(new Creator<ExampleActor>(){
-
- @Override public ExampleActor create() throws Exception {
- return new ExampleActor(id, peerAddresses, configParams);
- }
- });
+ final Optional<ConfigParams> configParams) {
+ return Props.create(ExampleActor.class, id, peerAddresses, configParams);
}
@Override public void onReceiveCommand(Object message) throws Exception{
package org.opendaylight.controller.cluster.example;
-import akka.actor.Actor;
import akka.actor.ActorRef;
import akka.actor.Cancellable;
import akka.actor.Props;
-import akka.japi.Creator;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
}
public static Props getProps(final String memberName) {
- return Props.create(new Creator<Actor>() {
- @Override
- public Actor create() throws Exception {
- return new ExampleRoleChangeListener(memberName);
- }
- });
+ return Props.create(ExampleRoleChangeListener.class, memberName);
}
@Override
import javassist.CtClass;
import javassist.CtMethod;
import javassist.NotFoundException;
-import org.eclipse.xtext.xbase.lib.Extension;
import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory;
@GuardedBy("this")
private final Map<Class<? extends NotificationListener>, RuntimeGeneratedInvokerPrototype> invokerClasses = new WeakHashMap<>();
private final CtClass brokerNotificationListener;
-
- @Extension
protected final JavassistUtils utils;
protected AbstractRuntimeCodeGenerator(final ClassPool pool) {
*/
package org.opendaylight.controller.sal.binding.codegen.impl;
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
import java.util.Set;
-
-import org.eclipse.xtext.xbase.lib.util.ToStringHelper;
import org.opendaylight.controller.sal.binding.codegen.RuntimeCodeHelper;
import org.opendaylight.controller.sal.binding.spi.NotificationInvokerFactory.NotificationInvoker;
import org.opendaylight.yangtools.yang.binding.Notification;
import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import com.google.common.base.Preconditions;
-
final class RuntimeGeneratedInvoker implements NotificationInvoker {
private final org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> invocationProxy;
private final RuntimeGeneratedInvokerPrototype prototype;
@Override
public String toString() {
- String result = new ToStringHelper().toString(this);
- return result;
+ return Objects.toStringHelper(this).toString();
}
}
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
-
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
-
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.impl.codec.BindingIndependentMappingService;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-
public class RpcInvocationStrategyTest {
@Mock
public class MockRpcService implements RpcService {
- public Future<?> rpcnameWithInputNoOutput(DataObject input) {
+ public Future<?> rpcnameWithInputNoOutput(final DataObject input) {
return futureDataObj;
}
- public Future<RpcResult<DataObject>> rpcnameWithInputWithOutput(DataObject input) {
+ public Future<RpcResult<DataObject>> rpcnameWithInputWithOutput(final DataObject input) {
return futureDataObj;
}
urn = new URI(new String("urn:a:valid:urn"));
}
- private void setupForForwardToDom(boolean hasOutput, boolean hasInput, int expectedErrorSize) {
+ private void setupForForwardToDom(final boolean hasOutput, final boolean hasInput, final int expectedErrorSize) {
if (expectedErrorSize > 0) {
errors.add(rpcError);
}
- private void validateForwardToDomBroker(ListenableFuture<RpcResult<?>> forwardToDomBroker,
- boolean expectedSuccess, DataObject expectedResult, int expectedErrorSize)
+ private void validateForwardToDomBroker(final ListenableFuture<RpcResult<?>> forwardToDomBroker,
+ final boolean expectedSuccess, final DataObject expectedResult, final int expectedErrorSize)
throws InterruptedException, ExecutionException {
assertNotNull(forwardToDomBroker);
assertEquals(expectedSuccess, forwardToDomBroker.get().isSuccessful());
assertEquals(expectedErrorSize, forwardToDomBroker.get().getErrors().size());
}
- private void setupTestMethod(String rpcName, String testMethodName, boolean hasInput)
+ private void setupTestMethod(final String rpcName, final String testMethodName, final boolean hasInput)
throws NoSuchMethodException {
- mockQName = new QName(urn, new Date(0L), new String("prefix"), new String(rpcName));
+ mockQName = QName.create(urn, new Date(0L), new String(rpcName));
java.lang.reflect.Method rpcMethod = hasInput ? MockRpcService.class.getMethod(rpcName,
DataObject.class) : MockRpcService.class.getMethod(rpcName);
rpcInvocationStrategy = new RpcInvocationStrategy(mockQName, rpcMethod, mockMappingService,
/*
* invokeOn Tests
*/
- private void setupRpcResultsWithOutput(int expectedErrorSize) {
+ private void setupRpcResultsWithOutput(final int expectedErrorSize) {
if (expectedErrorSize > 0) {
errors.add(rpcError);
}
when(mockMappingService.toDataDom(toDataDomInput)).thenReturn(outputInvokeOn);
}
- private void setupRpcResultsNoOutput(int expectedErrorSize) {
+ private void setupRpcResultsNoOutput(final int expectedErrorSize) {
if (expectedErrorSize > 0) {
errors.add(rpcError);
}
}
private void validateReturnedImmediateFuture(
- ListenableFuture<RpcResult<CompositeNode>> immediateFuture, boolean expectedSuccess,
- CompositeNode expectedReturn, int expectedErrorSize) throws InterruptedException,
+ final ListenableFuture<RpcResult<CompositeNode>> immediateFuture, final boolean expectedSuccess,
+ final CompositeNode expectedReturn, final int expectedErrorSize) throws InterruptedException,
ExecutionException {
assertNotNull(immediateFuture);
assertEquals(expectedSuccess, immediateFuture.get().isSuccessful());
*/
package org.opendaylight.controller.md.sal.binding.data;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import java.util.Collections;
import java.util.HashSet;
*/
package org.opendaylight.controller.sal.binding.test.connect.dom;
-import static junit.framework.Assert.assertNotNull;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertEquals;
import java.math.BigInteger;
<!--If the dependencies are test scoped, they are not visible to other maven modules depending on sal-binding-it-->
<!--TODO Create generic utilities(extract from this module) for integration tests on the controller-->
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>xtend-lib-osgi</artifactId>
- </dependency>
<dependency>
<groupId>org.openexi</groupId>
<artifactId>nagasena</artifactId>
mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), // //
mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(), //
- mavenBundle("com.google.guava", "guava").versionAsInProject(), // //
- mavenBundle(YANGTOOLS + ".thirdparty", "xtend-lib-osgi").versionAsInProject() //
+ mavenBundle("com.google.guava", "guava").versionAsInProject()
);
}
package org.opendaylight.controller.cluster.notifications;
-import akka.actor.Actor;
import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.Props;
-import akka.japi.Creator;
import akka.serialization.Serialization;
import com.google.common.collect.Maps;
import java.util.Map;
}
public static Props getProps(final String memberId) {
- return Props.create(new Creator<Actor>() {
- @Override
- public Actor create() throws Exception {
- return new RoleChangeNotifier(memberId);
- }
- });
+ return Props.create(RoleChangeNotifier.class, memberId);
}
@Override
String encodeQName(final QName qname) {
String prefix = prefixes.get(qname.getNamespace());
if (prefix == null) {
- prefix = qname.getPrefix();
- if (prefix == null || prefix.isEmpty() || prefixes.containsValue(prefix)) {
- final ThreadLocalRandom random = ThreadLocalRandom.current();
- do {
- final StringBuilder sb = new StringBuilder();
- for (int i = 0; i < 4; i++) {
- sb.append((char)('a' + random.nextInt(25)));
- }
-
- prefix = sb.toString();
- } while (prefixes.containsValue(prefix));
- }
-
+ final ThreadLocalRandom random = ThreadLocalRandom.current();
+ do {
+ final StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < 4; i++) {
+ sb.append((char)('a' + random.nextInt(25)));
+ }
+
+ prefix = sb.toString();
+ } while (prefixes.containsValue(prefix));
prefixes.put(qname.getNamespace(), prefix);
}
import com.google.common.annotations.Beta;
import com.google.common.base.Preconditions;
+import java.net.URI;
+import java.util.Map.Entry;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.AttributesContainer;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.xml.stream.XMLStreamException;
-import javax.xml.stream.XMLStreamWriter;
-import java.net.URI;
-import java.util.Map.Entry;
-
/**
* Utility class for bridging JAXP Stream and YANG Data APIs. Note that the definition of this class
* by no means final and subject to change as more functionality is centralized here.
*/
public void writeElement(final XMLStreamWriter writer, final @Nonnull Node<?> data, final SchemaNode schema) throws XMLStreamException {
final QName qname = data.getNodeType();
- final String pfx = qname.getPrefix() != null ? qname.getPrefix() : "";
final String ns = qname.getNamespace() != null ? qname.getNamespace().toString() : "";
if (isEmptyElement(data)) {
- writer.writeEmptyElement(pfx, qname.getLocalName(), ns);
+ writer.writeEmptyElement("", qname.getLocalName(), ns);
return;
}
- writer.writeStartElement(pfx, qname.getLocalName(), ns);
+ writer.writeStartElement("", qname.getLocalName(), ns);
if (data instanceof AttributesContainer && ((AttributesContainer) data).getAttributes() != null) {
for (Entry<QName, String> attribute : ((AttributesContainer) data).getAttributes().entrySet()) {
writer.writeAttribute(attribute.getKey().getNamespace().toString(), attribute.getKey().getLocalName(), attribute.getValue());
private static void write(final @Nonnull XMLStreamWriter writer, final @Nonnull IdentityrefTypeDefinition type, final @Nonnull Object value) throws XMLStreamException {
if (value instanceof QName) {
final QName qname = (QName) value;
- final String prefix;
- if (qname.getPrefix() != null && !qname.getPrefix().isEmpty()) {
- prefix = qname.getPrefix();
- } else {
- prefix = "x";
- }
- writer.writeNamespace(prefix, qname.getNamespace().toString());
- writer.writeCharacters(prefix + ':' + qname.getLocalName());
+ writer.writeNamespace("x", qname.getNamespace().toString());
+ writer.writeCharacters("x:" + qname.getLocalName());
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
import akka.actor.DeadLetter;
import akka.actor.Props;
import akka.actor.UntypedActor;
-import akka.japi.Creator;
import akka.testkit.JavaTestKit;
import org.junit.After;
import org.junit.Before;
}
public static Props props(final ReentrantLock lock){
- return Props.create(new Creator<PingPongActor>(){
- private static final long serialVersionUID = 1L;
- @Override
- public PingPongActor create() throws Exception {
- return new PingPongActor(lock);
- }
- });
+ return Props.create(PingPongActor.class, lock);
}
@Override
<data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-inmemory-data-broker</type>
- <name>inmemory-data-broker</name>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:concurrent-data-broker">prefix:dom-concurrent-data-broker</type>
+ <name>concurrent-data-broker</name>
<schema-service>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
<type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
<name>distributed-operational-store-service</name>
</operational-data-store>
-
- <allow-concurrent-commits>true</allow-concurrent-commits>
</module>
<module>
</module>
</modules>
+
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
</instance>
</service>
+ <!-- Overrides the definition from 01-md-sal.xml -->
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
+ <instance>
+ <name>inmemory-data-broker</name>
+ <provider>/modules/module[type='dom-concurrent-data-broker'][name='concurrent-data-broker']</provider>
+ </instance>
+ </service>
+
</services>
</data>
</configuration>
<required-capabilities>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl?module=opendaylight-sal-dom-broker-impl&revision=2013-10-28</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:concurrent-data-broker?module=odl-concurrent-data-broker-cfg&revision=2014-11-24</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider?module=distributed-datastore-privider&revision=2014-06-12</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store?module=opendaylight-config-dom-datastore&revision=2014-06-17</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store?module=opendaylight-operational-dom-datastore&revision=2014-06-17</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom?module=opendaylight-md-sal-dom&revision=2013-10-28</capability>
</required-capabilities>
</snapshot>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-common-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-broker-impl</artifactId>
+ </dependency>
+
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-core-spi</artifactId>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.broker.impl;
+package org.opendaylight.controller.cluster.datastore;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.broker.impl.AbstractDOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.broker.impl.TransactionCommitFailedExceptionMapper;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.util.DurationStatisticsTracker;
LOG.debug("Sending change notification {} to listener {}", change, listener);
- this.listener.onDataChanged(change);
+ try {
+ this.listener.onDataChanged(change);
+ } catch (RuntimeException e) {
+ LOG.error( String.format( "Error notifying listener %s", this.listener ), e );
+ }
// It seems the sender is never null but it doesn't hurt to check. If the caller passes in
// a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
}
@Override
- public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
- final YangInstanceIdentifier path) {
+ public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
LOG.debug("Tx {} read {}", identifier, path);
TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
-
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future;
- if(transactionContext != null) {
- future = transactionContext.readData(path);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture = SettableFuture.create();
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- Futures.addCallback(transactionContext.readData(path),
- new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
- @Override
- public void onSuccess(Optional<NormalizedNode<?, ?>> data) {
- proxyFuture.set(data);
- }
-
- @Override
- public void onFailure(Throwable t) {
- proxyFuture.setException(t);
- }
- });
- }
- });
-
- future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
- }
-
- return future;
+ return txFutureCallback.enqueueReadOperation(new ReadOperation<Optional<NormalizedNode<?, ?>>>() {
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> invoke(
+ TransactionContext transactionContext) {
+ return transactionContext.readData(path);
+ }
+ });
}
@Override
LOG.debug("Tx {} exists {}", identifier, path);
TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
-
- CheckedFuture<Boolean, ReadFailedException> future;
- if(transactionContext != null) {
- future = transactionContext.dataExists(path);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- final SettableFuture<Boolean> proxyFuture = SettableFuture.create();
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- Futures.addCallback(transactionContext.dataExists(path),
- new FutureCallback<Boolean>() {
- @Override
- public void onSuccess(Boolean exists) {
- proxyFuture.set(exists);
- }
-
- @Override
- public void onFailure(Throwable t) {
- proxyFuture.setException(t);
- }
- });
- }
- });
-
- future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
- }
-
- return future;
+ return txFutureCallback.enqueueReadOperation(new ReadOperation<Boolean>() {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> invoke(TransactionContext transactionContext) {
+ return transactionContext.dataExists(path);
+ }
+ });
}
+
private void checkModificationState() {
Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
"Modification operation on read-only transaction is not allowed");
LOG.debug("Tx {} write {}", identifier, path);
TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- transactionContext.writeData(path, data);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.writeData(path, data);
- }
- });
- }
+ txFutureCallback.enqueueModifyOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.writeData(path, data);
+ }
+ });
}
@Override
LOG.debug("Tx {} merge {}", identifier, path);
TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- transactionContext.mergeData(path, data);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.mergeData(path, data);
- }
- });
- }
+ txFutureCallback.enqueueModifyOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.mergeData(path, data);
+ }
+ });
}
@Override
LOG.debug("Tx {} delete {}", identifier, path);
TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- transactionContext.deleteData(path);
- } else {
- // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
- // callback to be executed after the Tx is created.
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.deleteData(path);
- }
- });
- }
+ txFutureCallback.enqueueModifyOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.deleteData(path);
+ }
+ });
}
@Override
LOG.debug("Tx {} Readying transaction for shard {} chain {}", identifier,
txFutureCallback.getShardName(), transactionChainId);
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- cohortFutures.add(transactionContext.readyTransaction());
- } else {
- // The shard Tx hasn't been created yet so create a promise to ready the Tx later
- // after it's created.
- final Promise<ActorSelection> cohortPromise = akka.dispatch.Futures.promise();
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- cohortPromise.completeWith(transactionContext.readyTransaction());
- }
- });
+ Future<ActorSelection> future = txFutureCallback.enqueueFutureOperation(new FutureOperation<ActorSelection>() {
+ @Override
+ public Future<ActorSelection> invoke(TransactionContext transactionContext) {
+ return transactionContext.readyTransaction();
+ }
+ });
- cohortFutures.add(cohortPromise.future());
- }
+ cohortFutures.add(future);
}
onTransactionReady(cohortFutures);
@Override
public void close() {
- for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
- TransactionContext transactionContext = txFutureCallback.getTransactionContext();
- if(transactionContext != null) {
- transactionContext.closeTransaction();
- } else {
- txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
- @Override
- public void invoke(TransactionContext transactionContext) {
- transactionContext.closeTransaction();
- }
- });
- }
+ for (TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+ txFutureCallback.enqueueModifyOperation(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.closeTransaction();
+ }
+ });
}
txFutureCallbackMap.clear();
}
/**
- * Interface for a transaction operation to be invoked later.
+ * Interfaces for transaction operations to be invoked later.
*/
private static interface TransactionOperation {
void invoke(TransactionContext transactionContext);
}
+ /**
+ * This interface returns a Guava Future
+ */
+ private static interface ReadOperation<T> {
+ CheckedFuture<T, ReadFailedException> invoke(TransactionContext transactionContext);
+ }
+
+ /**
+ * This interface returns a Scala Future
+ */
+ private static interface FutureOperation<T> {
+ Future<T> invoke(TransactionContext transactionContext);
+ }
+
/**
* Implements a Future OnComplete callback for a CreateTransaction message. This class handles
* retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
}
}
+
+ <T> Future<T> enqueueFutureOperation(final FutureOperation<T> op) {
+
+ Future<T> future;
+
+ if (transactionContext != null) {
+ future = op.invoke(transactionContext);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ final Promise<T> promise = akka.dispatch.Futures.promise();
+ addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ promise.completeWith(op.invoke(transactionContext));
+ }
+ });
+
+ future = promise.future();
+ }
+
+ return future;
+ }
+
+ <T> CheckedFuture<T, ReadFailedException> enqueueReadOperation(final ReadOperation<T> op) {
+
+ CheckedFuture<T, ReadFailedException> future;
+
+ if (transactionContext != null) {
+ future = op.invoke(transactionContext);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ final SettableFuture<T> proxyFuture = SettableFuture.create();
+ addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ Futures.addCallback(op.invoke(transactionContext), new FutureCallback<T>() {
+ @Override
+ public void onSuccess(T data) {
+ proxyFuture.set(data);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ proxyFuture.setException(t);
+ }
+ });
+ }
+ });
+
+ future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
+ }
+
+ return future;
+ }
+
+ void enqueueModifyOperation(final TransactionOperation op) {
+
+ if (transactionContext != null) {
+ op.invoke(transactionContext);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ addTxOperationOnComplete(op);
+ }
+ }
+
+
+
+
+
/**
* Performs a CreateTransaction try async.
*/
return actor;
}
+ private Future<Object> executeOperationAsync(SerializableMessage msg) {
+ return actorContext.executeOperationAsync(getActor(), isTxActorLocal ? msg : msg.toSerializable());
+ }
+
@Override
public void closeTransaction() {
LOG.debug("Tx {} closeTransaction called", identifier);
// Send the ReadyTransaction message to the Tx actor.
- ReadyTransaction readyTransaction = new ReadyTransaction();
- final Future<Object> replyFuture = actorContext.executeOperationAsync(getActor(),
- isTxActorLocal ? readyTransaction : readyTransaction.toSerializable());
+ final Future<Object> replyFuture = executeOperationAsync(new ReadyTransaction());
// Combine all the previously recorded put/merge/delete operation reply Futures and the
// ReadyTransactionReply Future into one Future. If any one fails then the combined
public void deleteData(YangInstanceIdentifier path) {
LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- DeleteData deleteData = new DeleteData(path);
- recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
- isTxActorLocal ? deleteData : deleteData.toSerializable()));
+ recordedOperationFutures.add(executeOperationAsync(new DeleteData(path)));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- MergeData mergeData = new MergeData(path, data, schemaContext);
- recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
- isTxActorLocal ? mergeData : mergeData.toSerializable()));
+ recordedOperationFutures.add(executeOperationAsync(new MergeData(path, data, schemaContext)));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
LOG.debug("Tx {} writeData called path = {}", identifier, path);
- WriteData writeData = new WriteData(path, data, schemaContext);
- recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
- isTxActorLocal ? writeData : writeData.toSerializable()));
+ recordedOperationFutures.add(executeOperationAsync(new WriteData(path, data, schemaContext)));
}
@Override
}
};
- ReadData readData = new ReadData(path);
- Future<Object> readFuture = actorContext.executeOperationAsync(getActor(),
- isTxActorLocal ? readData : readData.toSerializable());
+ Future<Object> readFuture = executeOperationAsync(new ReadData(path));
readFuture.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
}
}
};
- DataExists dataExists = new DataExists(path);
- Future<Object> future = actorContext.executeOperationAsync(getActor(),
- isTxActorLocal ? dataExists : dataExists.toSerializable());
+ Future<Object> future = executeOperationAsync(new DataExists(path));
future.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-public class ReadData {
+public class ReadData implements SerializableMessage {
public static final Class<ShardTransactionMessages.ReadData> SERIALIZABLE_CLASS =
ShardTransactionMessages.ReadData.class;
private final YangInstanceIdentifier path;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.config.yang.config.concurrent_data_broker;
+
+import com.google.common.collect.Lists;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import org.opendaylight.controller.cluster.datastore.ConcurrentDOMDataBroker;
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
+import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+
+public class DomConcurrentDataBrokerModule extends AbstractDomConcurrentDataBrokerModule {
+ private static final String JMX_BEAN_TYPE = "DOMDataBroker";
+
+ public DomConcurrentDataBrokerModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public DomConcurrentDataBrokerModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final DependencyResolver dependencyResolver, final DomConcurrentDataBrokerModule oldModule, final AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public AutoCloseable createInstance() {
+ //Initializing Operational DOM DataStore defaulting to InMemoryDOMDataStore if one is not configured
+ DOMStore operStore = getOperationalDataStoreDependency();
+ if(operStore == null){
+ //we will default to InMemoryDOMDataStore creation
+ operStore = InMemoryDOMDataStoreFactory.create("DOM-OPER", getSchemaServiceDependency());
+ }
+
+ DOMStore configStore = getConfigDataStoreDependency();
+ if(configStore == null){
+ //we will default to InMemoryDOMDataStore creation
+ configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
+ }
+
+ final Map<LogicalDatastoreType, DOMStore> datastores = new EnumMap<>(LogicalDatastoreType.class);
+ datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
+ datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
+
+ /*
+ * We use an executor for commit ListenableFuture callbacks that favors reusing available
+ * threads over creating new threads at the expense of execution time. The assumption is
+ * that most ListenableFuture callbacks won't execute a lot of business logic where we want
+ * it to run quicker - many callbacks will likely just handle error conditions and do
+ * nothing on success. The executor queue capacity is bounded and, if the capacity is
+ * reached, subsequent submitted tasks will block the caller.
+ */
+ ExecutorService listenableFutureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(
+ getMaxDataBrokerFutureCallbackPoolSize(), getMaxDataBrokerFutureCallbackQueueSize(),
+ "CommitFutures");
+
+ final List<AbstractMXBean> mBeans = Lists.newArrayList();
+
+ final DurationStatisticsTracker commitStatsTracker;
+ final ConcurrentDOMDataBroker cdb = new ConcurrentDOMDataBroker(datastores, listenableFutureExecutor);
+ commitStatsTracker = cdb.getCommitStatsTracker();
+
+ if(commitStatsTracker != null) {
+ final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
+ commitStatsTracker, JMX_BEAN_TYPE);
+ commitStatsMXBean.registerMBean();
+ mBeans.add(commitStatsMXBean);
+ }
+
+ final AbstractMXBean commitFutureStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
+ "CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
+ if(commitFutureStatsMXBean != null) {
+ mBeans.add(commitFutureStatsMXBean);
+ }
+
+ cdb.setCloseable(new AutoCloseable() {
+ @Override
+ public void close() {
+ for(AbstractMXBean mBean: mBeans) {
+ mBean.unregisterMBean();
+ }
+ }
+ });
+
+ return cdb;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.config.yang.config.concurrent_data_broker;
+
+public class DomConcurrentDataBrokerModuleFactory extends AbstractDomConcurrentDataBrokerModuleFactory {
+
+}
--- /dev/null
+module odl-concurrent-data-broker-cfg {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:concurrent-data-broker";
+ prefix "cdb";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-config-dom-datastore {prefix config-dom-store-spi;}
+ import opendaylight-operational-dom-datastore {prefix operational-dom-store-spi;}
+ import opendaylight-md-sal-dom {prefix sal; }
+ import opendaylight-sal-dom-broker-impl { prefix broker; }
+ import rpc-context { prefix rpcx; revision-date 2013-06-17; }
+
+ description
+ "Service definition for concurrent dom broker.";
+
+ revision "2014-11-24" {
+ description
+ "Initial revision";
+ }
+
+ identity dom-concurrent-data-broker {
+ base config:module-type;
+ config:provided-service sal:dom-async-data-broker;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case dom-concurrent-data-broker {
+ when "/config:modules/config:module/config:type = 'dom-concurrent-data-broker'";
+
+ uses broker:dom-broker-config;
+ }
+ }
+
+ augment "/config:modules/config:module/config:state" {
+ case dom-concurrent-data-broker {
+ when "/config:modules/config:module/config:type = 'dom-concurrent-data-broker'";
+
+ uses broker:dom-broker-operational;
+ }
+ }
+}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.sal.dom.broker.impl;
+package org.opendaylight.controller.cluster.datastore;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import org.opendaylight.controller.md.cluster.datastore.model.CompositeModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public class DataChangeListenerTest extends AbstractActorTest {
}
}};
}
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWithListenerRuntimeEx(){
+ new JavaTestKit(getSystem()) {{
+ AsyncDataChangeEvent mockChangeEvent1 = Mockito.mock(AsyncDataChangeEvent.class);
+ AsyncDataChangeEvent mockChangeEvent2 = Mockito.mock(AsyncDataChangeEvent.class);
+ AsyncDataChangeEvent mockChangeEvent3 = Mockito.mock(AsyncDataChangeEvent.class);
+
+ AsyncDataChangeListener mockListener = Mockito.mock(AsyncDataChangeListener.class);
+ Mockito.doThrow(new RuntimeException("mock")).when(mockListener).onDataChanged(mockChangeEvent2);
+
+ Props props = DataChangeListener.props(mockListener);
+ ActorRef subject = getSystem().actorOf(props, "testDataChangedWithListenerRuntimeEx");
+
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), getRef());
+
+ SchemaContext schemaContext = CompositeModel.createTestContext();
+
+ subject.tell(new DataChanged(schemaContext, mockChangeEvent1),getRef());
+ expectMsgClass(DataChangedReply.class);
+
+ subject.tell(new DataChanged(schemaContext, mockChangeEvent2),getRef());
+ expectMsgClass(DataChangedReply.class);
+
+ subject.tell(new DataChanged(schemaContext, mockChangeEvent3),getRef());
+ expectMsgClass(DataChangedReply.class);
+
+ Mockito.verify(mockListener).onDataChanged(mockChangeEvent1);
+ Mockito.verify(mockListener).onDataChanged(mockChangeEvent2);
+ Mockito.verify(mockListener).onDataChanged(mockChangeEvent3);
+ }};
+ }
}
}
@Test(expected=IllegalStateException.class)
- public void testxistsPreConditionCheck() {
+ public void testExistsPreConditionCheck() {
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
<artifactId>yang-parser-impl</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ </dependency>
+
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<!-- TODO Remove sal.broker.impl from export when SchemaAwareRpcRegistry is not used in connector anymore -->
org.opendaylight.controller.sal.dom.broker.impl,
org.opendaylight.controller.sal.dom.broker.impl.*,
+
+ <!-- Temporary until we move abstract classes out into SPI -->
+ org.opendaylight.controller.md.sal.dom.broker.impl,
+ org.opendaylight.controller.md.sal.dom.broker.impl.jmx,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.dom.impl.rev131028.*,
</Export-Package>
<Private-Package>org.opendaylight.controller.sal.dom.broker,
org.opendaylight.controller.sal.dom.broker.osgi,
org.opendaylight.controller.md.sal.dom.broker.impl,
org.opendaylight.controller.md.sal.dom.broker.impl.*,
org.opendaylight.yangtools.yang.util,
- org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.dom.impl.rev131028.*,
org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.dom.pingpong.rev141107.*
</Private-Package>
<Import-Package>*</Import-Package>
*/
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+import com.google.common.collect.Lists;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
-import org.opendaylight.controller.md.sal.dom.broker.impl.ConcurrentDOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.broker.impl.AbstractDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.yangtools.util.DurationStatisticsTracker;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import com.google.common.collect.Lists;
/**
*
"CommitFutures");
final List<AbstractMXBean> mBeans = Lists.newArrayList();
-
final DurationStatisticsTracker commitStatsTracker;
- final AbstractDOMDataBroker broker;
-
- if (getAllowConcurrentCommits()) {
- final ConcurrentDOMDataBroker cdb = new ConcurrentDOMDataBroker(datastores, listenableFutureExecutor);
- commitStatsTracker = cdb.getCommitStatsTracker();
- broker = cdb;
- } else {
- /*
- * We use a single-threaded executor for commits with a bounded queue capacity. If the
- * queue capacity is reached, subsequent commit tasks will be rejected and the commits will
- * fail. This is done to relieve back pressure. This should be an extreme scenario - either
- * there's deadlock(s) somewhere and the controller is unstable or some rogue component is
- * continuously hammering commits too fast or the controller is just over-capacity for the
- * system it's running on.
- */
- ExecutorService commitExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- getMaxDataBrokerCommitQueueSize(), "WriteTxCommit");
-
- SerializedDOMDataBroker sdb = new SerializedDOMDataBroker(datastores,
- new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
- listenableFutureExecutor));
- commitStatsTracker = sdb.getCommitStatsTracker();
- broker = sdb;
-
- final AbstractMXBean commitExecutorStatsMXBean =
- ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
- JMX_BEAN_TYPE, null);
- if(commitExecutorStatsMXBean != null) {
- mBeans.add(commitExecutorStatsMXBean);
- }
+
+ /*
+ * We use a single-threaded executor for commits with a bounded queue capacity. If the
+ * queue capacity is reached, subsequent commit tasks will be rejected and the commits will
+ * fail. This is done to relieve back pressure. This should be an extreme scenario - either
+ * there's deadlock(s) somewhere and the controller is unstable or some rogue component is
+ * continuously hammering commits too fast or the controller is just over-capacity for the
+ * system it's running on.
+ */
+ ExecutorService commitExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
+ getMaxDataBrokerCommitQueueSize(), "WriteTxCommit");
+
+ SerializedDOMDataBroker sdb = new SerializedDOMDataBroker(datastores,
+ new DeadlockDetectingListeningExecutorService(commitExecutor,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
+ listenableFutureExecutor));
+ commitStatsTracker = sdb.getCommitStatsTracker();
+
+ final AbstractMXBean commitExecutorStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
+ JMX_BEAN_TYPE, null);
+ if(commitExecutorStatsMXBean != null) {
+ mBeans.add(commitExecutorStatsMXBean);
}
if(commitStatsTracker != null) {
mBeans.add(commitFutureStatsMXBean);
}
- broker.setCloseable(new AutoCloseable() {
+ sdb.setCloseable(new AutoCloseable() {
@Override
public void close() {
for(AbstractMXBean mBean: mBeans) {
}
});
- return broker;
+ return sdb;
}
}
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import javax.annotation.Nonnull;
import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
private static final Logger LOG = LoggerFactory.getLogger(PingPongTransactionChain.class);
private final DOMTransactionChain delegate;
- @GuardedBy("this")
- private PingPongTransaction bufferTransaction;
- @GuardedBy("this")
- private PingPongTransaction inflightTransaction;
- @GuardedBy("this")
- private boolean haveLocked;
@GuardedBy("this")
private boolean failed;
+ /**
+ * This updater is used to manipulate the "ready" transaction. We perform only atomic
+ * get-and-set on it.
+ */
+ private static final AtomicReferenceFieldUpdater<PingPongTransactionChain, PingPongTransaction> READY_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(PingPongTransactionChain.class, PingPongTransaction.class, "readyTx");
+ @SuppressWarnings("unused") // Accessed via READY_UPDATER
+ private volatile PingPongTransaction readyTx;
+
+ /**
+ * This updater is used to manipulate the "locked" transaction. A locked transaction
+ * means we know that the user still holds a transaction and should at some point call
+ * us. We perform on compare-and-swap to ensure we properly detect when a user is
+ * attempting to allocated multiple transactions concurrently.
+ */
+ private static final AtomicReferenceFieldUpdater<PingPongTransactionChain, PingPongTransaction> LOCKED_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(PingPongTransactionChain.class, PingPongTransaction.class, "lockedTx");
+ private volatile PingPongTransaction lockedTx;
+
+ /**
+ * This updater is used to manipulate the "inflight" transaction. There can be at most
+ * one of these at any given time. We perform only compare-and-swap on these.
+ */
+ private static final AtomicReferenceFieldUpdater<PingPongTransactionChain, PingPongTransaction> INFLIGHT_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(PingPongTransactionChain.class, PingPongTransaction.class, "inflightTx");
+ private volatile PingPongTransaction inflightTx;
+
PingPongTransactionChain(final DOMDataBroker broker, final TransactionChainListener listener) {
this.delegate = broker.createTransactionChain(new TransactionChainListener() {
@Override
LOG.debug("Delegate chain {} reported failure in {}", chain, transaction, cause);
final DOMDataReadWriteTransaction frontend;
- if (inflightTransaction == null) {
+ final PingPongTransaction tx = inflightTx;
+ if (tx == null) {
LOG.warn("Transaction chain {} failed with no pending transactions", chain);
frontend = null;
} else {
- frontend = inflightTransaction.getFrontendTransaction();
+ frontend = tx.getFrontendTransaction();
}
- listener.onTransactionChainFailed(PingPongTransactionChain.this, frontend , cause);
+ listener.onTransactionChainFailed(PingPongTransactionChain.this, frontend, cause);
delegateFailed();
}
private synchronized void delegateFailed() {
failed = true;
- if (!haveLocked) {
- processBuffer();
+
+ /*
+ * If we do not have a locked transaction, we need to ensure that
+ * the backend transaction is cancelled. Otherwise we can defer
+ * until the user calls us.
+ */
+ if (lockedTx == null) {
+ processIfReady();
}
}
- private synchronized PingPongTransaction allocateTransaction() {
- Preconditions.checkState(!haveLocked, "Attempted to start a transaction while a previous one is still outstanding");
- Preconditions.checkState(!failed, "Attempted to use a failed chain");
+ private synchronized PingPongTransaction slowAllocateTransaction() {
+ final DOMDataReadWriteTransaction delegateTx = delegate.newReadWriteTransaction();
+ final PingPongTransaction newTx = new PingPongTransaction(delegateTx);
- if (bufferTransaction == null) {
- bufferTransaction = new PingPongTransaction(delegate.newReadWriteTransaction());
+ if (!LOCKED_UPDATER.compareAndSet(this, null, newTx)) {
+ delegateTx.cancel();
+ throw new IllegalStateException(String.format("New transaction %s raced with transacion %s", newTx, lockedTx));
}
- haveLocked = true;
- return bufferTransaction;
+ return newTx;
}
- @GuardedBy("this")
- private void processBuffer() {
- final PingPongTransaction tx = bufferTransaction;
+ private PingPongTransaction allocateTransaction() {
+ // Step 1: acquire current state
+ final PingPongTransaction oldTx = READY_UPDATER.getAndSet(this, null);
- if (tx != null) {
- if (failed) {
- LOG.debug("Cancelling transaction {}", tx);
- tx.getTransaction().cancel();
- bufferTransaction = null;
- return;
- }
+ // Slow path: allocate a delegate transaction
+ if (oldTx == null) {
+ return slowAllocateTransaction();
+ }
- LOG.debug("Submitting transaction {}", tx);
- final CheckedFuture<Void, ?> f = tx.getTransaction().submit();
- bufferTransaction = null;
- inflightTransaction = tx;
+ // Fast path: reuse current transaction. We will check
+ // failures and similar on submit().
+ if (!LOCKED_UPDATER.compareAndSet(this, null, oldTx)) {
+ // Ouch. Delegate chain has not detected a duplicate
+ // transaction allocation. This is the best we can do.
+ oldTx.getTransaction().cancel();
+ throw new IllegalStateException(String.format("Reusable transaction %s raced with transaction %s", oldTx, lockedTx));
+ }
- Futures.addCallback(f, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void result) {
- transactionSuccessful(tx, result);
- }
+ return oldTx;
+ }
- @Override
- public void onFailure(final Throwable t) {
- transactionFailed(tx, t);
- }
- });
+ // This forces allocateTransaction() on a slow path
+ @GuardedBy("this")
+ private void processIfReady() {
+ final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
+ if (tx != null) {
+ processTransaction(tx);
}
}
+ /**
+ * Process a ready transaction. The caller needs to ensure that
+ * each transaction is seen only once by this method.
+ *
+ * @param tx Transaction which needs processing.
+ */
+ @GuardedBy("this")
+ private void processTransaction(final @Nonnull PingPongTransaction tx) {
+ if (failed) {
+ LOG.debug("Cancelling transaction {}", tx);
+ tx.getTransaction().cancel();
+ return;
+ }
+
+ LOG.debug("Submitting transaction {}", tx);
+ if (!INFLIGHT_UPDATER.compareAndSet(this, null, tx)) {
+ LOG.warn("Submitting transaction {} while {} is still running", tx, inflightTx);
+ }
+
+ Futures.addCallback(tx.getTransaction().submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ transactionSuccessful(tx, result);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ transactionFailed(tx, t);
+ }
+ });
+ }
+
private void transactionSuccessful(final PingPongTransaction tx, final Void result) {
LOG.debug("Transaction {} completed successfully", tx);
- synchronized (this) {
- Preconditions.checkState(inflightTransaction == tx, "Successful transaction %s while %s was submitted", tx, inflightTransaction);
- inflightTransaction = null;
+ final boolean success = INFLIGHT_UPDATER.compareAndSet(this, tx, null);
+ Preconditions.checkState(success, "Successful transaction %s while %s was submitted", tx, inflightTx);
- if (!haveLocked) {
- processBuffer();
- }
+ synchronized (this) {
+ processIfReady();
}
// Can run unsynchronized
private void transactionFailed(final PingPongTransaction tx, final Throwable t) {
LOG.debug("Transaction {} failed", tx, t);
- synchronized (this) {
- Preconditions.checkState(inflightTransaction == tx, "Failed transaction %s while %s was submitted", tx, inflightTransaction);
- inflightTransaction = null;
- }
+ final boolean success = INFLIGHT_UPDATER.compareAndSet(this, tx, null);
+ Preconditions.checkState(success, "Failed transaction %s while %s was submitted", tx, inflightTx);
tx.onFailure(t);
}
- private synchronized void readyTransaction(final PingPongTransaction tx) {
- Preconditions.checkState(haveLocked, "Attempted to submit transaction while it is not outstanding");
- Preconditions.checkState(bufferTransaction == tx, "Attempted to submit transaction %s while we have %s", tx, bufferTransaction);
+ private void readyTransaction(final @Nonnull PingPongTransaction tx) {
+ final boolean lockedMatch = LOCKED_UPDATER.compareAndSet(this, tx, null);
+ Preconditions.checkState(lockedMatch, "Attempted to submit transaction %s while we have %s", tx, lockedTx);
- haveLocked = false;
- LOG.debug("Transaction {} unlocked", bufferTransaction);
+ LOG.debug("Transaction {} unlocked", tx);
- if (inflightTransaction == null) {
- processBuffer();
+ if (inflightTx == null) {
+ synchronized (this) {
+ processTransaction(tx);
+ }
}
}
@Override
- public synchronized void close() {
- Preconditions.checkState(!haveLocked, "Attempted to close chain while a transaction is outstanding");
- processBuffer();
- delegate.close();
+ public void close() {
+ final PingPongTransaction notLocked = lockedTx;
+ Preconditions.checkState(notLocked == null, "Attempted to close chain with outstanding transaction %s", notLocked);
+
+ synchronized (this) {
+ processIfReady();
+ delegate.close();
+ }
}
@Override
*
* @see ExceptionMapper
*/
-final class TransactionCommitFailedExceptionMapper
+public final class TransactionCommitFailedExceptionMapper
extends ExceptionMapper<TransactionCommitFailedException> {
- static final TransactionCommitFailedExceptionMapper PRE_COMMIT_MAPPER = create("preCommit");
+ public static final TransactionCommitFailedExceptionMapper PRE_COMMIT_MAPPER = create("preCommit");
- static final TransactionCommitFailedExceptionMapper CAN_COMMIT_ERROR_MAPPER = create("canCommit");
+ public static final TransactionCommitFailedExceptionMapper CAN_COMMIT_ERROR_MAPPER = create("canCommit");
- static final TransactionCommitFailedExceptionMapper COMMIT_ERROR_MAPPER = create("commit");
+ public static final TransactionCommitFailedExceptionMapper COMMIT_ERROR_MAPPER = create("commit");
private TransactionCommitFailedExceptionMapper(final String opName) {
super( opName, TransactionCommitFailedException.class );
}
@Override
- protected TransactionCommitFailedException newWithCause( String message, Throwable cause ) {
+ protected TransactionCommitFailedException newWithCause( final String message, final Throwable cause ) {
return new TransactionCommitFailedException( message, cause );
}
}
\ No newline at end of file
module opendaylight-sal-dom-broker-impl {
- yang-version 1;
+ yang-version 1;
namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl";
- prefix "binding-impl";
+ prefix "broker";
- import config { prefix config; revision-date 2013-04-05; }
- import opendaylight-md-sal-dom {prefix sal;}
- import opendaylight-md-sal-common {prefix common;}
- import opendaylight-config-dom-datastore {prefix config-dom-store-spi;}
- import opendaylight-operational-dom-datastore {prefix operational-dom-store-spi;}
+ import config { prefix config; revision-date 2013-04-05; }
+ import ietf-yang-types { prefix yang; }
+ import opendaylight-md-sal-dom {prefix sal;}
+ import opendaylight-md-sal-common {prefix common;}
+ import opendaylight-config-dom-datastore {prefix config-dom-store-spi;}
+ import opendaylight-operational-dom-datastore {prefix operational-dom-store-spi;}
+ import rpc-context { prefix rpcx; revision-date 2013-06-17; }
description
"Service definition for Binding Aware MD-SAL.
Note: The dom-inmemory-data-broker utilizes configurable config-dom-datastore
and operation-dom-datastore. If configuration is not done for this stores
then it defaults to InMemoryDOMDataStore";
-
+
revision "2013-10-28" {
description
"Initial revision";
base config:module-type;
config:provided-service sal:dom-broker-osgi-registry;
config:java-name-prefix DomBrokerImpl;
- }
-
-
+ }
+
+
identity dom-inmemory-data-broker {
base config:module-type;
config:provided-service sal:dom-async-data-broker;
}
-
+
identity schema-service-singleton {
base config:module-type;
config:provided-service sal:schema-service;
}
}
}
-
- augment "/config:modules/config:module/config:configuration" {
- case dom-inmemory-data-broker {
- when "/config:modules/config:module/config:type = 'dom-inmemory-data-broker'";
-
- container schema-service {
- uses config:service-ref {
- refine type {
- mandatory false;
- config:required-identity sal:schema-service;
- }
+ grouping dom-broker-config {
+ container schema-service {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity sal:schema-service;
}
-
}
+ }
- container config-data-store{
- uses config:service-ref {
- refine type {
- mandatory false;
- config:required-identity config-dom-store-spi:config-dom-datastore;
- }
+ container config-data-store {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity config-dom-store-spi:config-dom-datastore;
}
- }
+ }
+ }
- container operational-data-store{
- uses config:service-ref {
- refine type {
- mandatory false;
- config:required-identity operational-dom-store-spi:operational-dom-datastore;
- }
+ container operational-data-store {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity operational-dom-store-spi:operational-dom-datastore;
}
- }
+ }
+ }
- leaf max-data-broker-future-callback-queue-size {
- default 1000;
- type uint16;
- description "The maximum queue size for the data broker's commit future callback executor.";
- }
+ leaf max-data-broker-future-callback-queue-size {
+ default 1000;
+ type uint16;
+ description "The maximum queue size for the data broker's commit future callback executor.";
+ }
- leaf max-data-broker-future-callback-pool-size {
- default 20;
- type uint16;
- description "The maximum thread pool size for the data broker's commit future callback executor.";
- }
+ leaf max-data-broker-future-callback-pool-size {
+ default 20;
+ type uint16;
+ description "The maximum thread pool size for the data broker's commit future callback executor.";
+ }
- leaf max-data-broker-commit-queue-size {
- default 5000;
- type uint16;
- description "The maximum queue size for the data broker's commit executor.";
- }
+ leaf max-data-broker-commit-queue-size {
+ default 5000;
+ type uint16;
+ description "The maximum queue size for the data broker's commit executor.";
+ }
+ }
+
+ grouping dom-broker-operational {
+ leaf total-commits {
+ type uint64;
+ }
+
+ leaf average-commit {
+ type uint64;
+ units ns;
+ }
- leaf allow-concurrent-commits {
- default false;
- type boolean;
- description "Specifies whether or not to allow 3-phrase commits to run concurrently.
- Use with caution. If set to true, the data store implementations must be prepared
- to handle concurrent commits. The default is false";
+ leaf longest-commit-duration {
+ type uint64;
+ units ns;
+ }
+
+ leaf longest-commit-timestamp {
+ type yang:date-and-time;
+ }
+
+ leaf shortest-commit-duration {
+ type uint64;
+ units ns;
+ }
+
+ leaf shortest-commit-timestamp {
+ type yang:date-and-time;
+ }
+
+ rpcx:rpc-context-instance dom-broker-rpc-ctx;
+ }
+
+ identity dom-broker-rpc-ctx;
+
+ rpc reset-statistics {
+ description
+ "JMX call to clear the toasts-made counter.";
+
+ input {
+ uses rpcx:rpc-context-ref {
+ refine context-instance {
+ rpcx:rpc-context-instance dom-broker-rpc-ctx;
+ }
}
}
}
-
+
+ augment "/config:modules/config:module/config:configuration" {
+ case dom-inmemory-data-broker {
+ when "/config:modules/config:module/config:type = 'dom-inmemory-data-broker'";
+
+ uses dom-broker-config;
+ }
+ }
+
+ augment "/config:modules/config:module/config:state" {
+ case dom-inmemory-data-broker {
+ when "/config:modules/config:module/config:type = 'dom-inmemory-data-broker'";
+
+ uses dom-broker-operational;
+ }
+ }
+
augment "/config:modules/config:module/config:state" {
case schema-service-singleton {
when "/config:modules/config:module/config:type = 'schema-service-singleton'";
}
}
-
+
augment "/config:modules/config:module/config:state" {
case dom-broker-impl {
when "/config:modules/config:module/config:type = 'dom-broker-impl'";
container data {
uses common:data-state;
- }
+ }
}
}
}
return result;
}
- // XSQLAdapter.log(""+node);
Map<?, ?> children = XSQLODLUtils.getChildren(node);
for (Object c : children.values()) {
+ result.add(c);
+ /* I don't remember why i did this... possibly to prevent different siblings queried together
Map<?, ?> sons = XSQLODLUtils.getChildren(c);
for (Object child : sons.values()) {
result.add(child);
- }
+ }*/
}
return result;
return result;
}
- public List<Record> addRecords(Object element, XSQLBluePrintNode node,
- boolean root, String tableName, XSQLBluePrint bluePrint) {
-
+ public List<Record> addRecords(Object element, XSQLBluePrintNode node,boolean root, String tableName, XSQLBluePrint bluePrint) {
List<Record> result = new LinkedList<Record>();
+ //In case this is a sibling to the requested table, the elenment type
+ //won't be in the path of the leaf node
+ if(node==null){
+ return result;
+ }
String nodeID = XSQLODLUtils.getNodeIdentiofier(element);
if (node.getODLTableName().equals(nodeID)) {
- XSQLBluePrintNode bluePrintNode = bluePrint
- .getBluePrintNodeByODLTableName(nodeID)[0];
+ XSQLBluePrintNode bluePrintNode = bluePrint.getBluePrintNodeByODLTableName(nodeID)[0];
Record rec = new Record();
rec.element = element;
- XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode
- .getBluePrintNodeName());
- if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName())
- || bpn != null) {
+ XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode.getBluePrintNodeName());
+ if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName()) || bpn != null) {
Map<String, Object> allKeyValues = collectColumnValues(element, bpn);
if (!(isObjectFitCriteria(allKeyValues,
bpn.getBluePrintNodeName()) == 1)) {
}
XSQLBluePrintNode parent = node.getParent();
- List<Record> subRecords = addRecords(element, parent, false, tableName,
- bluePrint);
+ List<Record> subRecords = addRecords(element, parent, false, tableName,bluePrint);
for (Record subRec : subRecords) {
List<Object> subO = getChildren(subRec.element, tableName,
bluePrint);
.add("moduleBasedCapabilities", moduleBasedCaps)
.add("rollback", isRollbackSupported())
.add("monitoring", isMonitoringSupported())
+ .add("candidate", isCandidateSupported())
+ .add("writableRunning", isRunningWritable())
.toString();
}
return containsNonModuleCapability(NetconfMessageTransformUtil.NETCONF_CANDIDATE_URI.toString());
}
+ public boolean isRunningWritable() {
+ return containsNonModuleCapability(NetconfMessageTransformUtil.NETCONF_RUNNING_WRITABLE_URI.toString());
+ }
+
public boolean isMonitoringSupported() {
return containsModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING)
|| containsNonModuleCapability(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING.getNamespace().toString());
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
-import org.opendaylight.controller.sal.connect.netconf.sal.tx.NetconfDeviceReadOnlyTx;
-import org.opendaylight.controller.sal.connect.netconf.sal.tx.NetconfDeviceReadWriteTx;
-import org.opendaylight.controller.sal.connect.netconf.sal.tx.NetconfDeviceWriteOnlyTx;
+import org.opendaylight.controller.sal.connect.netconf.sal.tx.ReadOnlyTx;
+import org.opendaylight.controller.sal.connect.netconf.sal.tx.ReadWriteTx;
+import org.opendaylight.controller.sal.connect.netconf.sal.tx.WriteCandidateTx;
+import org.opendaylight.controller.sal.connect.netconf.sal.tx.WriteCandidateRunningTx;
+import org.opendaylight.controller.sal.connect.netconf.sal.tx.WriteRunningTx;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
final class NetconfDeviceDataBroker implements DOMDataBroker {
private final RemoteDeviceId id;
- private final RpcImplementation rpc;
+ private final NetconfBaseOps netconfOps;
private final NetconfSessionCapabilities netconfSessionPreferences;
private final DataNormalizer normalizer;
- public NetconfDeviceDataBroker(final RemoteDeviceId id, final RpcImplementation rpc, final SchemaContext schemaContext, NetconfSessionCapabilities netconfSessionPreferences) {
+ public NetconfDeviceDataBroker(final RemoteDeviceId id, final RpcImplementation rpc, final SchemaContext schemaContext, final NetconfSessionCapabilities netconfSessionPreferences) {
this.id = id;
- this.rpc = rpc;
+ this.netconfOps = new NetconfBaseOps(rpc);
this.netconfSessionPreferences = netconfSessionPreferences;
normalizer = new DataNormalizer(schemaContext);
}
@Override
public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
- return new NetconfDeviceReadOnlyTx(rpc, normalizer, id);
+ return new ReadOnlyTx(netconfOps, normalizer, id);
}
@Override
public DOMDataReadWriteTransaction newReadWriteTransaction() {
- return new NetconfDeviceReadWriteTx(newReadOnlyTransaction(), newWriteOnlyTransaction());
+ return new ReadWriteTx(newReadOnlyTransaction(), newWriteOnlyTransaction());
}
@Override
public DOMDataWriteTransaction newWriteOnlyTransaction() {
- return new NetconfDeviceWriteOnlyTx(id, rpc, normalizer, netconfSessionPreferences.isCandidateSupported(), netconfSessionPreferences.isRollbackSupported());
+ if(netconfSessionPreferences.isCandidateSupported()) {
+ if(netconfSessionPreferences.isRunningWritable()) {
+ return new WriteCandidateRunningTx(id, netconfOps, normalizer, netconfSessionPreferences);
+ } else {
+ return new WriteCandidateTx(id, netconfOps, normalizer, netconfSessionPreferences);
+ }
+ } else {
+ return new WriteRunningTx(id, netconfOps, normalizer, netconfSessionPreferences);
+ }
}
@Override
public ListenerRegistration<DOMDataChangeListener> registerDataChangeListener(final LogicalDatastoreType store, final YangInstanceIdentifier path, final DOMDataChangeListener listener, final DataChangeScope triggeringScope) {
- throw new UnsupportedOperationException("Data change listeners not supported for netconf mount point");
+ throw new UnsupportedOperationException(id + ": Data change listeners not supported for netconf mount point");
}
@Override
public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
- // TODO implement
- throw new UnsupportedOperationException("Transaction chains not supported for netconf mount point");
+ throw new UnsupportedOperationException(id + ": Transaction chains not supported for netconf mount point");
}
+
}
--- /dev/null
+package org.opendaylight.controller.sal.connect.netconf.sal.tx;
+
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.createEditConfigStructure;
+
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.ModifyAction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public abstract class AbstractWriteTx implements DOMDataWriteTransaction {
+ protected final RemoteDeviceId id;
+ protected final NetconfBaseOps netOps;
+ protected final DataNormalizer normalizer;
+ protected final NetconfSessionCapabilities netconfSessionPreferences;
+ // Allow commit to be called only once
+ protected boolean finished = false;
+
+ public AbstractWriteTx(final NetconfBaseOps netOps, final RemoteDeviceId id, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ this.netOps = netOps;
+ this.id = id;
+ this.normalizer = normalizer;
+ this.netconfSessionPreferences = netconfSessionPreferences;
+ init();
+ }
+
+ protected void checkNotFinished() {
+ Preconditions.checkState(!isFinished(), "%s: Transaction %s already finished", id, getIdentifier());
+ }
+
+ protected boolean isFinished() {
+ return finished;
+ }
+
+ protected void invokeBlocking(final String msg, final Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>> op) throws NetconfDocumentedException {
+ try {
+ final RpcResult<CompositeNode> compositeNodeRpcResult = op.apply(netOps).get(1L, TimeUnit.MINUTES);
+ if(compositeNodeRpcResult.isSuccessful() == false) {
+ throw new NetconfDocumentedException(id + ": " + msg + " failed: " + compositeNodeRpcResult.getErrors(), NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_failed, NetconfDocumentedException.ErrorSeverity.warning);
+ }
+ } catch (final InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new RuntimeException(e);
+ } catch (final ExecutionException | TimeoutException e) {
+ throw new NetconfDocumentedException(id + ": " + msg + " failed: " + e.getMessage(), e, NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_failed, NetconfDocumentedException.ErrorSeverity.warning);
+ }
+ }
+
+ @Override
+ public synchronized boolean cancel() {
+ if(isFinished()) {
+ return false;
+ }
+
+ finished = true;
+ cleanup();
+ return true;
+ }
+
+ protected abstract void init();
+
+ protected abstract void cleanup();
+
+ @Override
+ public Object getIdentifier() {
+ return this;
+ }
+
+ @Override
+ public synchronized void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ checkEditable(store);
+
+ try {
+ final YangInstanceIdentifier legacyPath = ReadOnlyTx.toLegacyPath(normalizer, path, id);
+ final CompositeNode legacyData = normalizer.toLegacy(path, data);
+ editConfig(
+ createEditConfigStructure(legacyPath, Optional.of(ModifyAction.REPLACE), Optional.fromNullable(legacyData)), Optional.of(ModifyAction.NONE));
+ } catch (final NetconfDocumentedException e) {
+ handleEditException(path, data, e, "putting");
+ }
+ }
+
+ protected abstract void handleEditException(YangInstanceIdentifier path, NormalizedNode<?, ?> data, NetconfDocumentedException e, String editType);
+ protected abstract void handleDeleteException(YangInstanceIdentifier path, NetconfDocumentedException e);
+
+ @Override
+ public synchronized void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ checkEditable(store);
+
+ try {
+ final YangInstanceIdentifier legacyPath = ReadOnlyTx.toLegacyPath(normalizer, path, id);
+ final CompositeNode legacyData = normalizer.toLegacy(path, data);
+ editConfig(
+ createEditConfigStructure(legacyPath, Optional.<ModifyAction>absent(), Optional.fromNullable(legacyData)), Optional.<ModifyAction>absent());
+ } catch (final NetconfDocumentedException e) {
+ handleEditException(path, data, e, "merge");
+ }
+ }
+
+ @Override
+ public synchronized void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ checkEditable(store);
+
+ try {
+ editConfig(createEditConfigStructure(
+ ReadOnlyTx.toLegacyPath(normalizer, path, id), Optional.of(ModifyAction.DELETE),
+ Optional.<CompositeNode>absent()), Optional.of(ModifyAction.NONE));
+ } catch (final NetconfDocumentedException e) {
+ handleDeleteException(path, e);
+ }
+ }
+
+ @Override
+ public final ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ checkNotFinished();
+ finished = true;
+
+ return performCommit();
+ }
+
+ protected abstract ListenableFuture<RpcResult<TransactionStatus>> performCommit();
+
+ private void checkEditable(final LogicalDatastoreType store) {
+ checkNotFinished();
+ Preconditions.checkArgument(store == LogicalDatastoreType.CONFIGURATION, "Can edit only configuration data, not %s", store);
+ }
+
+ protected abstract void editConfig(CompositeNode editStructure, Optional<ModifyAction> defaultOperation) throws NetconfDocumentedException;
+}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html.
- */
-
-package org.opendaylight.controller.sal.connect.netconf.sal.tx;
-
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.DISCARD_CHANGES_RPC_CONTENT;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_CANDIDATE_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_CONFIG_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DEFAULT_OPERATION_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DISCARD_CHANGES_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_EDIT_CONFIG_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_ERROR_OPTION_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_OPERATION_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_RUNNING_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_TARGET_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.ROLLBACK_ON_ERROR_OPTION;
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
-import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.ModifyAction;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
-import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class NetconfDeviceWriteOnlyTx implements DOMDataWriteTransaction, FutureCallback<RpcResult<TransactionStatus>> {
-
- private static final Logger LOG = LoggerFactory.getLogger(NetconfDeviceWriteOnlyTx.class);
-
- private final RemoteDeviceId id;
- private final RpcImplementation rpc;
- private final DataNormalizer normalizer;
-
- private final boolean rollbackSupported;
- private final boolean candidateSupported;
- private final CompositeNode targetNode;
-
- // Allow commit to be called only once
- private final AtomicBoolean finished = new AtomicBoolean(false);
-
- public NetconfDeviceWriteOnlyTx(final RemoteDeviceId id, final RpcImplementation rpc, final DataNormalizer normalizer, final boolean candidateSupported, final boolean rollbackOnErrorSupported) {
- this.id = id;
- this.rpc = rpc;
- this.normalizer = normalizer;
-
- this.candidateSupported = candidateSupported;
- this.targetNode = getTargetNode(this.candidateSupported);
- this.rollbackSupported = rollbackOnErrorSupported;
- }
-
- @Override
- public boolean cancel() {
- if(isFinished()) {
- return false;
- }
-
- return discardChanges();
- }
-
- private boolean isFinished() {
- return finished.get();
- }
-
- private boolean discardChanges() {
- finished.set(true);
-
- if(candidateSupported) {
- sendDiscardChanges();
- }
- return true;
- }
-
- // TODO should the edit operations be blocking ?
- // TODO should the discard-changes operations be blocking ?
-
- @Override
- public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotFinished();
- Preconditions.checkArgument(store == LogicalDatastoreType.CONFIGURATION, "Can merge only configuration, not %s", store);
-
- try {
- final YangInstanceIdentifier legacyPath = NetconfDeviceReadOnlyTx.toLegacyPath(normalizer, path, id);
- final CompositeNode legacyData = normalizer.toLegacy(path, data);
- sendEditRpc(
- createEditConfigStructure(legacyPath, Optional.of(ModifyAction.REPLACE), Optional.fromNullable(legacyData)), Optional.of(ModifyAction.NONE));
- } catch (final ExecutionException e) {
- LOG.warn("{}: Error putting data to {}, data: {}, discarding changes", id, path, data, e);
- discardChanges();
- throw new RuntimeException(id + ": Error while replacing " + path, e);
- }
- }
-
- private void checkNotFinished() {
- Preconditions.checkState(isFinished() == false, "%s: Transaction %s already finished", id, getIdentifier());
- }
-
- @Override
- public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotFinished();
- Preconditions.checkArgument(store == LogicalDatastoreType.CONFIGURATION, "%s: Can merge only configuration, not %s", id, store);
-
- try {
- final YangInstanceIdentifier legacyPath = NetconfDeviceReadOnlyTx.toLegacyPath(normalizer, path, id);
- final CompositeNode legacyData = normalizer.toLegacy(path, data);
- sendEditRpc(
- createEditConfigStructure(legacyPath, Optional.<ModifyAction> absent(), Optional.fromNullable(legacyData)), Optional.<ModifyAction> absent());
- } catch (final ExecutionException e) {
- LOG.warn("{}: Error merging data to {}, data: {}, discarding changes", id, path, data, e);
- discardChanges();
- throw new RuntimeException(id + ": Error while merging " + path, e);
- }
- }
-
- @Override
- public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- checkNotFinished();
- Preconditions.checkArgument(store == LogicalDatastoreType.CONFIGURATION, "%s: Can merge only configuration, not %s", id, store);
-
- try {
- sendEditRpc(
- createEditConfigStructure(NetconfDeviceReadOnlyTx.toLegacyPath(normalizer, path, id), Optional.of(ModifyAction.DELETE), Optional.<CompositeNode>absent()), Optional.of(ModifyAction.NONE));
- } catch (final ExecutionException e) {
- LOG.warn("{}: Error deleting data {}, discarding changes", id, path, e);
- discardChanges();
- throw new RuntimeException(id + ": Error while deleting " + path, e);
- }
- }
-
- @Override
- public CheckedFuture<Void, TransactionCommitFailedException> submit() {
- final ListenableFuture<Void> commmitFutureAsVoid = Futures.transform(commit(), new Function<RpcResult<TransactionStatus>, Void>() {
- @Override
- public Void apply(final RpcResult<TransactionStatus> input) {
- return null;
- }
- });
-
- return Futures.makeChecked(commmitFutureAsVoid, new Function<Exception, TransactionCommitFailedException>() {
- @Override
- public TransactionCommitFailedException apply(final Exception input) {
- return new TransactionCommitFailedException("Submit of transaction " + getIdentifier() + " failed", input);
- }
- });
- }
-
- @Override
- public ListenableFuture<RpcResult<TransactionStatus>> commit() {
- checkNotFinished();
- finished.set(true);
-
- if(candidateSupported == false) {
- return Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
- }
-
- final ListenableFuture<RpcResult<CompositeNode>> rpcResult = rpc.invokeRpc(
- NetconfMessageTransformUtil.NETCONF_COMMIT_QNAME, NetconfMessageTransformUtil.COMMIT_RPC_CONTENT);
-
- final ListenableFuture<RpcResult<TransactionStatus>> transformed = Futures.transform(rpcResult,
- new Function<RpcResult<CompositeNode>, RpcResult<TransactionStatus>>() {
- @Override
- public RpcResult<TransactionStatus> apply(final RpcResult<CompositeNode> input) {
- if (input.isSuccessful()) {
- return RpcResultBuilder.success(TransactionStatus.COMMITED).build();
- } else {
- final RpcResultBuilder<TransactionStatus> failed = RpcResultBuilder.failed();
- for (final RpcError rpcError : input.getErrors()) {
- failed.withError(rpcError.getErrorType(), rpcError.getTag(), rpcError.getMessage(),
- rpcError.getApplicationTag(), rpcError.getInfo(), rpcError.getCause());
- }
- return failed.build();
- }
- }
- });
-
- Futures.addCallback(transformed, this);
- return transformed;
- }
-
- @Override
- public void onSuccess(final RpcResult<TransactionStatus> result) {
- LOG.debug("{}: Write successful, transaction: {}", id, getIdentifier());
- }
-
- @Override
- public void onFailure(final Throwable t) {
- LOG.warn("{}: Write failed, transaction {}, discarding changes", id, getIdentifier(), t);
- discardChanges();
- }
-
- private void sendEditRpc(final CompositeNode editStructure, final Optional<ModifyAction> defaultOperation) throws ExecutionException {
- final CompositeNode editConfigRequest = createEditConfigRequest(editStructure, defaultOperation);
- final RpcResult<CompositeNode> rpcResult;
- try {
- rpcResult = rpc.invokeRpc(NETCONF_EDIT_CONFIG_QNAME, editConfigRequest).get();
- } catch (final InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new RuntimeException(id + ": Interrupted while waiting for response", e);
- }
-
- // Check result
- if(rpcResult.isSuccessful() == false) {
- throw new ExecutionException(
- String.format("%s: Pre-commit rpc failed, request: %s, errors: %s", id, editConfigRequest, rpcResult.getErrors()), null);
- }
- }
-
- private void sendDiscardChanges() {
- final ListenableFuture<RpcResult<CompositeNode>> discardFuture = rpc.invokeRpc(NETCONF_DISCARD_CHANGES_QNAME, DISCARD_CHANGES_RPC_CONTENT);
- Futures.addCallback(discardFuture, new FutureCallback<RpcResult<CompositeNode>>() {
- @Override
- public void onSuccess(final RpcResult<CompositeNode> result) {
- LOG.debug("{}: Discarding transaction: {}", id, getIdentifier());
- }
-
- @Override
- public void onFailure(final Throwable t) {
- LOG.error("{}: Discarding changes failed, transaction: {}. Device configuration might be corrupted", id, getIdentifier(), t);
- throw new RuntimeException(id + ": Discarding changes failed, transaction " + getIdentifier(), t);
- }
- });
- }
-
- private CompositeNode createEditConfigStructure(final YangInstanceIdentifier dataPath, final Optional<ModifyAction> operation,
- final Optional<CompositeNode> lastChildOverride) {
- Preconditions.checkArgument(Iterables.isEmpty(dataPath.getPathArguments()) == false, "Instance identifier with empty path %s", dataPath);
-
- // Create deepest edit element with expected edit operation
- CompositeNode previous = getDeepestEditElement(dataPath.getLastPathArgument(), operation, lastChildOverride);
-
- Iterator<PathArgument> it = dataPath.getReversePathArguments().iterator();
- // Remove already processed deepest child
- it.next();
-
- // Create edit structure in reversed order
- while (it.hasNext()) {
- final YangInstanceIdentifier.PathArgument arg = it.next();
- final CompositeNodeBuilder<ImmutableCompositeNode> builder = ImmutableCompositeNode.builder();
- builder.setQName(arg.getNodeType());
-
- addPredicatesToCompositeNodeBuilder(getPredicates(arg), builder);
-
- builder.add(previous);
- previous = builder.toInstance();
- }
- return ImmutableCompositeNode.create(NETCONF_CONFIG_QNAME, ImmutableList.<Node<?>>of(previous));
- }
-
- private void addPredicatesToCompositeNodeBuilder(final Map<QName, Object> predicates, final CompositeNodeBuilder<ImmutableCompositeNode> builder) {
- for (final Map.Entry<QName, Object> entry : predicates.entrySet()) {
- builder.addLeaf(entry.getKey(), entry.getValue());
- }
- }
-
- private Map<QName, Object> getPredicates(final YangInstanceIdentifier.PathArgument arg) {
- Map<QName, Object> predicates = Collections.emptyMap();
- if (arg instanceof YangInstanceIdentifier.NodeIdentifierWithPredicates) {
- predicates = ((YangInstanceIdentifier.NodeIdentifierWithPredicates) arg).getKeyValues();
- }
- return predicates;
- }
-
- private CompositeNode getDeepestEditElement(final YangInstanceIdentifier.PathArgument arg, final Optional<ModifyAction> operation, final Optional<CompositeNode> lastChildOverride) {
- final CompositeNodeBuilder<ImmutableCompositeNode> builder = ImmutableCompositeNode.builder();
- builder.setQName(arg.getNodeType());
-
- final Map<QName, Object> predicates = getPredicates(arg);
- addPredicatesToCompositeNodeBuilder(predicates, builder);
-
- if (operation.isPresent()) {
- builder.setAttribute(NETCONF_OPERATION_QNAME, modifyOperationToXmlString(operation.get()));
- }
- if (lastChildOverride.isPresent()) {
- final List<Node<?>> children = lastChildOverride.get().getValue();
- for(final Node<?> child : children) {
- if(!predicates.containsKey(child.getKey())) {
- builder.add(child);
- }
- }
- }
-
- return builder.toInstance();
- }
-
- private CompositeNode createEditConfigRequest(final CompositeNode editStructure, final Optional<ModifyAction> defaultOperation) {
- final CompositeNodeBuilder<ImmutableCompositeNode> ret = ImmutableCompositeNode.builder();
-
- // Target
- final Node<?> targetWrapperNode = ImmutableCompositeNode.create(NETCONF_TARGET_QNAME, ImmutableList.<Node<?>>of(targetNode));
- ret.add(targetWrapperNode);
-
- // Default operation
- if(defaultOperation.isPresent()) {
- final SimpleNode<String> defOp = NodeFactory.createImmutableSimpleNode(NETCONF_DEFAULT_OPERATION_QNAME, null, modifyOperationToXmlString(defaultOperation.get()));
- ret.add(defOp);
- }
-
- // Error option
- if(rollbackSupported) {
- ret.addLeaf(NETCONF_ERROR_OPTION_QNAME, ROLLBACK_ON_ERROR_OPTION);
- }
-
- ret.setQName(NETCONF_EDIT_CONFIG_QNAME);
- // Edit content
- ret.add(editStructure);
- return ret.toInstance();
- }
-
- private String modifyOperationToXmlString(final ModifyAction operation) {
- return operation.name().toLowerCase();
- }
-
- public CompositeNode getTargetNode(final boolean candidateSupported) {
- if(candidateSupported) {
- return ImmutableCompositeNode.create(NETCONF_CANDIDATE_QNAME, ImmutableList.<Node<?>>of());
- } else {
- return ImmutableCompositeNode.create(NETCONF_RUNNING_QNAME, ImmutableList.<Node<?>>of());
- }
- }
-
- @Override
- public Object getIdentifier() {
- return this;
- }
-}
*/
package org.opendaylight.controller.sal.connect.netconf.sal.tx;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DATA_QNAME;
+
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.concurrent.ExecutionException;
-
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.CONFIG_SOURCE_RUNNING;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DATA_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.toFilterStructure;
-
-public final class NetconfDeviceReadOnlyTx implements DOMDataReadOnlyTransaction {
+public final class ReadOnlyTx implements DOMDataReadOnlyTransaction {
- private static final Logger LOG = LoggerFactory.getLogger(NetconfDeviceReadOnlyTx.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyTx.class);
- private final RpcImplementation rpc;
+ private final NetconfBaseOps netconfOps;
private final DataNormalizer normalizer;
private final RemoteDeviceId id;
+ private final FutureCallback<RpcResult<CompositeNode>> loggingCallback;
- public NetconfDeviceReadOnlyTx(final RpcImplementation rpc, final DataNormalizer normalizer, final RemoteDeviceId id) {
- this.rpc = rpc;
+ public ReadOnlyTx(final NetconfBaseOps netconfOps, final DataNormalizer normalizer, final RemoteDeviceId id) {
+ this.netconfOps = netconfOps;
this.normalizer = normalizer;
this.id = id;
+ // Simple logging callback to log result of read operation
+ loggingCallback = new FutureCallback<RpcResult<CompositeNode>>() {
+ @Override
+ public void onSuccess(final RpcResult<CompositeNode> result) {
+ if(result.isSuccessful()) {
+ LOG.trace("{}: Reading data successful", id);
+ } else {
+ LOG.warn("{}: Reading data unsuccessful: {}", id, result.getErrors());
+ }
+
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ LOG.warn("{}: Reading data failed", id, t);
+ }
+ };
}
private CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readConfigurationData(
final YangInstanceIdentifier path) {
- final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_GET_CONFIG_QNAME,
- NetconfMessageTransformUtil.wrap(NETCONF_GET_CONFIG_QNAME, CONFIG_SOURCE_RUNNING, toFilterStructure(path)));
-
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
+ final ListenableFuture<RpcResult<CompositeNode>> configRunning = netconfOps.getConfigRunning(loggingCallback, Optional.fromNullable(path));
+ // Find data node and normalize its content
+ final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(configRunning, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
@Override
public Optional<NormalizedNode<?, ?>> apply(final RpcResult<CompositeNode> result) {
checkReadSuccess(result, path);
private void checkReadSuccess(final RpcResult<CompositeNode> result, final YangInstanceIdentifier path) {
try {
Preconditions.checkArgument(result.isSuccessful(), "%s: Unable to read data: %s, errors: %s", id, path, result.getErrors());
- } catch (IllegalArgumentException e) {
+ } catch (final IllegalArgumentException e) {
LOG.warn("{}: Unable to read data: {}, errors: {}", id, path, result.getErrors());
throw e;
}
private CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readOperationalData(
final YangInstanceIdentifier path) {
- final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_GET_QNAME, NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, toFilterStructure(path)));
+ final ListenableFuture<RpcResult<CompositeNode>> configCandidate = netconfOps.getConfigRunning(loggingCallback, Optional.fromNullable(path));
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
+ // Find data node and normalize its content
+ final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(configCandidate, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
@Override
public Optional<NormalizedNode<?, ?>> apply(final RpcResult<CompositeNode> result) {
checkReadSuccess(result, path);
throw new IllegalArgumentException(String.format("%s, Cannot read data %s for %s datastore, unknown datastore type", id, path, store));
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
data = read(store, path);
try {
import java.util.concurrent.ExecutionException;
-public class NetconfDeviceReadWriteTx implements DOMDataReadWriteTransaction {
+public class ReadWriteTx implements DOMDataReadWriteTransaction {
private final DOMDataReadTransaction delegateReadTx;
private final DOMDataWriteTransaction delegateWriteTx;
- public NetconfDeviceReadWriteTx(final DOMDataReadTransaction delegateReadTx, final DOMDataWriteTransaction delegateWriteTx) {
+ public ReadWriteTx(final DOMDataReadTransaction delegateReadTx, final DOMDataWriteTransaction delegateWriteTx) {
this.delegateReadTx = delegateReadTx;
this.delegateWriteTx = delegateWriteTx;
}
}
@Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
+ final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
data = read(store, path);
try {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal.tx;
+
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tx implementation for netconf devices that support only candidate datastore and writable running
+ * The sequence goes exactly as with only candidate supported, with one addition:
+ * <ul>
+ * <li>Running datastore is locked as the first thing and this lock has to succeed</li>
+ * </ul>
+ */
+public class WriteCandidateRunningTx extends WriteCandidateTx {
+
+ private static final Logger LOG = LoggerFactory.getLogger(WriteCandidateRunningTx.class);
+
+ public WriteCandidateRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ super(id, netOps, normalizer, netconfSessionPreferences);
+ }
+
+ @Override
+ protected synchronized void init() {
+ lockRunning();
+ super.init();
+ }
+
+ @Override
+ protected void cleanupOnSuccess() {
+ super.cleanupOnSuccess();
+ unlockRunning();
+ }
+
+ private void lockRunning() {
+ try {
+ invokeBlocking("Lock running", new Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>>() {
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> apply(final NetconfBaseOps input) {
+ return input.lockRunning(new NetconfRpcFutureCallback("Lock running", id));
+ }
+ });
+ } catch (final NetconfDocumentedException e) {
+ LOG.warn("{}: Failed to lock running. Failed to initialize transaction", e);
+ finished = true;
+ throw new RuntimeException(id + ": Failed to lock running. Failed to initialize transaction", e);
+ }
+ }
+
+ /**
+ * This has to be non blocking since it is called from a callback on commit and its netty threadpool that is really sensitive to blocking calls
+ */
+ private void unlockRunning() {
+ netOps.unlockRunning(new NetconfRpcFutureCallback("Unlock running", id));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal.tx;
+
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.ModifyAction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tx implementation for netconf devices that support only candidate datastore and no writable running
+ * The sequence goes as:
+ * <ol>
+ * <li/> Lock candidate datastore on tx construction
+ * <ul>
+ * <li/> Lock has to succeed, if it does not, an attempt to discard changes is made
+ * <li/> Discard changes has to succeed
+ * <li/> If discard is successful, lock is reattempted
+ * <li/> Second lock attempt has to succeed
+ * </ul>
+ * <li/> Edit-config in candidate N times
+ * <ul>
+ * <li/> If any issue occurs during edit, datastore is discarded using discard-changes rpc, unlocked and an exception is thrown async
+ * </ul>
+ * <li/> Commit and Unlock candidate datastore async
+ * </ol>
+ */
+public class WriteCandidateTx extends AbstractWriteTx {
+
+ private static final Logger LOG = LoggerFactory.getLogger(WriteCandidateTx.class);
+
+ private static final Function<RpcResult<CompositeNode>, RpcResult<TransactionStatus>> RPC_RESULT_TO_TX_STATUS = new Function<RpcResult<CompositeNode>, RpcResult<TransactionStatus>>() {
+ @Override
+ public RpcResult<TransactionStatus> apply(final RpcResult<CompositeNode> input) {
+ if (input.isSuccessful()) {
+ return RpcResultBuilder.success(TransactionStatus.COMMITED).build();
+ } else {
+ final RpcResultBuilder<TransactionStatus> failed = RpcResultBuilder.failed();
+ for (final RpcError rpcError : input.getErrors()) {
+ failed.withError(rpcError.getErrorType(), rpcError.getTag(), rpcError.getMessage(),
+ rpcError.getApplicationTag(), rpcError.getInfo(), rpcError.getCause());
+ }
+ return failed.build();
+ }
+ }
+ };
+
+ public WriteCandidateTx(final RemoteDeviceId id, final NetconfBaseOps rpc, final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ super(rpc, id, normalizer, netconfSessionPreferences);
+ }
+
+ @Override
+ protected synchronized void init() {
+ LOG.trace("{}: Initializing {} transaction", id, getClass().getSimpleName());
+
+ try {
+ lock();
+ } catch (final NetconfDocumentedException e) {
+ try {
+ LOG.warn("{}: Failed to lock candidate, attempting discard changes", id);
+ discardChanges();
+ LOG.warn("{}: Changes discarded successfully, attempting lock", id);
+ lock();
+ } catch (final NetconfDocumentedException secondE) {
+ LOG.error("{}: Failed to prepare candidate. Failed to initialize transaction", id, secondE);
+ throw new RuntimeException(id + ": Failed to prepare candidate. Failed to initialize transaction", secondE);
+ }
+ }
+ }
+
+ private void lock() throws NetconfDocumentedException {
+ try {
+ invokeBlocking("Lock candidate", new Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>>() {
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> apply(final NetconfBaseOps input) {
+ return input.lockCandidate(new NetconfRpcFutureCallback("Lock candidate", id));
+ }
+ });
+ } catch (final NetconfDocumentedException e) {
+ LOG.warn("{}: Failed to lock candidate", id, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected void cleanup() {
+ discardChanges();
+ cleanupOnSuccess();
+ }
+
+ @Override
+ protected void handleEditException(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final NetconfDocumentedException e, final String editType) {
+ LOG.warn("{}: Error " + editType + " data to (candidate){}, data: {}, canceling", id, path, data, e);
+ cancel();
+ throw new RuntimeException(id + ": Error while " + editType + ": (candidate)" + path, e);
+ }
+
+ @Override
+ protected void handleDeleteException(final YangInstanceIdentifier path, final NetconfDocumentedException e) {
+ LOG.warn("{}: Error deleting data (candidate){}, canceling", id, path, e);
+ cancel();
+ throw new RuntimeException(id + ": Error while deleting (candidate)" + path, e);
+ }
+
+ @Override
+ public synchronized CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final ListenableFuture<Void> commitFutureAsVoid = Futures.transform(commit(), new Function<RpcResult<TransactionStatus>, Void>() {
+ @Override
+ public Void apply(final RpcResult<TransactionStatus> input) {
+ return null;
+ }
+ });
+
+ return Futures.makeChecked(commitFutureAsVoid, new Function<Exception, TransactionCommitFailedException>() {
+ @Override
+ public TransactionCommitFailedException apply(final Exception input) {
+ return new TransactionCommitFailedException("Submit of transaction " + getIdentifier() + " failed", input);
+ }
+ });
+ }
+
+ /**
+ * This has to be non blocking since it is called from a callback on commit and its netty threadpool that is really sensitive to blocking calls
+ */
+ private void discardChanges() {
+ netOps.discardChanges(new NetconfRpcFutureCallback("Discarding candidate", id));
+ }
+
+ @Override
+ public synchronized ListenableFuture<RpcResult<TransactionStatus>> performCommit() {
+ final ListenableFuture<RpcResult<CompositeNode>> rpcResult = netOps.commit(new NetconfRpcFutureCallback("Commit", id) {
+ @Override
+ public void onSuccess(final RpcResult<CompositeNode> result) {
+ super.onSuccess(result);
+ LOG.debug("{}: Write successful, transaction: {}. Unlocking", id, getIdentifier());
+ cleanupOnSuccess();
+ }
+
+ @Override
+ protected void onUnsuccess(final RpcResult<CompositeNode> result) {
+ LOG.error("{}: Write failed, transaction {}, discarding changes, unlocking: {}", id, getIdentifier(), result.getErrors());
+ cleanup();
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ LOG.error("{}: Write failed, transaction {}, discarding changes, unlocking", id, getIdentifier(), t);
+ cleanup();
+ }
+ });
+
+ return Futures.transform(rpcResult, RPC_RESULT_TO_TX_STATUS);
+ }
+
+ protected void cleanupOnSuccess() {
+ unlock();
+ }
+
+ @Override
+ protected void editConfig(final CompositeNode editStructure, final Optional<ModifyAction> defaultOperation) throws NetconfDocumentedException {
+ invokeBlocking("Edit candidate", new Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>>() {
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> apply(final NetconfBaseOps input) {
+ return defaultOperation.isPresent()
+ ? input.editConfigCandidate(new NetconfRpcFutureCallback("Edit candidate", id), editStructure, defaultOperation.get(),
+ netconfSessionPreferences.isRollbackSupported())
+ : input.editConfigCandidate(new NetconfRpcFutureCallback("Edit candidate", id), editStructure,
+ netconfSessionPreferences.isRollbackSupported());
+ }
+ });
+ }
+
+ /**
+ * This has to be non blocking since it is called from a callback on commit and its netty threadpool that is really sensitive to blocking calls
+ */
+ private void unlock() {
+ netOps.unlockCandidate(new NetconfRpcFutureCallback("Unlock candidate", id));
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.sal.tx;
+
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfRpcFutureCallback;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.ModifyAction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tx implementation for netconf devices that support only writable-running with no candidate
+ * The sequence goes as:
+ * <ol>
+ * <li/> Lock running datastore on tx construction
+ * <ul>
+ * <li/> Lock has to succeed, if it does not, transaction is failed
+ * </ul>
+ * <li/> Edit-config in running N times
+ * <ul>
+ * <li/> If any issue occurs during edit, datastore is unlocked and an exception is thrown
+ * </ul>
+ * <li/> Unlock running datastore on tx commit
+ * </ol>
+ */
+public class WriteRunningTx extends AbstractWriteTx {
+
+ private static final Logger LOG = LoggerFactory.getLogger(WriteRunningTx.class);
+
+ public WriteRunningTx(final RemoteDeviceId id, final NetconfBaseOps netOps,
+ final DataNormalizer normalizer, final NetconfSessionCapabilities netconfSessionPreferences) {
+ super(netOps, id, normalizer, netconfSessionPreferences);
+ }
+
+ @Override
+ protected synchronized void init() {
+ lock();
+ }
+
+ private void lock() {
+ try {
+ invokeBlocking("Lock running", new Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>>() {
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> apply(final NetconfBaseOps input) {
+ return input.lockRunning(new NetconfRpcFutureCallback("Lock running", id));
+ }
+ });
+ } catch (final NetconfDocumentedException e) {
+ LOG.warn("{}: Failed to initialize netconf transaction (lock running)", e);
+ finished = true;
+ throw new RuntimeException(id + ": Failed to initialize netconf transaction (lock running)", e);
+ }
+ }
+
+ @Override
+ protected void cleanup() {
+ unlock();
+ }
+
+ @Override
+ protected void handleEditException(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final NetconfDocumentedException e, final String editType) {
+ LOG.warn("{}: Error " + editType + " data to (running){}, data: {}, canceling", id, path, data, e);
+ cancel();
+ throw new RuntimeException(id + ": Error while " + editType + ": (running)" + path, e);
+ }
+
+ @Override
+ protected void handleDeleteException(final YangInstanceIdentifier path, final NetconfDocumentedException e) {
+ LOG.warn("{}: Error deleting data (running){}, canceling", id, path, e);
+ cancel();
+ throw new RuntimeException(id + ": Error while deleting (running)" + path, e);
+ }
+
+ @Override
+ public synchronized CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final ListenableFuture<Void> commmitFutureAsVoid = Futures.transform(commit(), new Function<RpcResult<TransactionStatus>, Void>() {
+ @Override
+ public Void apply(final RpcResult<TransactionStatus> input) {
+ return null;
+ }
+ });
+
+ return Futures.makeChecked(commmitFutureAsVoid, new Function<Exception, TransactionCommitFailedException>() {
+ @Override
+ public TransactionCommitFailedException apply(final Exception input) {
+ return new TransactionCommitFailedException("Submit of transaction " + getIdentifier() + " failed", input);
+ }
+ });
+ }
+
+ @Override
+ public synchronized ListenableFuture<RpcResult<TransactionStatus>> performCommit() {
+ unlock();
+ return Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
+ }
+
+ @Override
+ protected void editConfig(final CompositeNode editStructure, final Optional<ModifyAction> defaultOperation) throws NetconfDocumentedException {
+ invokeBlocking("Edit running", new Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>>() {
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> apply(final NetconfBaseOps input) {
+ return defaultOperation.isPresent()
+ ? input.editConfigRunning(new NetconfRpcFutureCallback("Edit running", id), editStructure, defaultOperation.get(),
+ netconfSessionPreferences.isRollbackSupported())
+ : input.editConfigRunning(new NetconfRpcFutureCallback("Edit running", id), editStructure,
+ netconfSessionPreferences.isRollbackSupported());
+ }
+ });
+ }
+
+ private void unlock() {
+ try {
+ invokeBlocking("Unlocking running", new Function<NetconfBaseOps, ListenableFuture<RpcResult<CompositeNode>>>() {
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> apply(final NetconfBaseOps input) {
+ return input.unlockRunning(new NetconfRpcFutureCallback("Unlock running", id));
+ }
+ });
+ } catch (final NetconfDocumentedException e) {
+ LOG.warn("{}: Failed to unlock running datastore", e);
+ throw new RuntimeException(id + ": Failed to unlock running datastore", e);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.util;
+
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.DISCARD_CHANGES_RPC_CONTENT;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_CANDIDATE_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DEFAULT_OPERATION_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DISCARD_CHANGES_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_EDIT_CONFIG_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_ERROR_OPTION_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_LOCK_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_RUNNING_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_SOURCE_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_TARGET_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_UNLOCK_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_VALIDATE_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.ROLLBACK_ON_ERROR_OPTION;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.toFilterStructure;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
+import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.ModifyAction;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.SimpleNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
+import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+import org.opendaylight.yangtools.yang.data.impl.SimpleNodeTOImpl;
+import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
+
+/**
+ * Provides base operations for netconf e.g. get, get-config, edit-config, (un)lock, commit etc.
+ * According to RFC-6241
+ */
+public final class NetconfBaseOps {
+
+ private final RpcImplementation rpc;
+
+ public NetconfBaseOps(final RpcImplementation rpc) {
+ this.rpc = rpc;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> lock(final FutureCallback<RpcResult<CompositeNode>> callback, final QName datastore) {
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(datastore);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_LOCK_QNAME, getLockContent(datastore));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> lockCandidate(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_LOCK_QNAME, getLockContent(NETCONF_CANDIDATE_QNAME));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+
+ public ListenableFuture<RpcResult<CompositeNode>> lockRunning(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_LOCK_QNAME, getLockContent(NETCONF_RUNNING_QNAME));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> unlock(final FutureCallback<RpcResult<CompositeNode>> callback, final QName datastore) {
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(datastore);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_UNLOCK_QNAME, getUnLockContent(datastore));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> unlockRunning(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_UNLOCK_QNAME, getUnLockContent(NETCONF_RUNNING_QNAME));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> unlockCandidate(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_UNLOCK_QNAME, getUnLockContent(NETCONF_CANDIDATE_QNAME));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> discardChanges(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ Preconditions.checkNotNull(callback);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_DISCARD_CHANGES_QNAME, DISCARD_CHANGES_RPC_CONTENT);
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> commit(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ Preconditions.checkNotNull(callback);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NetconfMessageTransformUtil.NETCONF_COMMIT_QNAME, NetconfMessageTransformUtil.COMMIT_RPC_CONTENT);
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> validate(final FutureCallback<RpcResult<CompositeNode>> callback, final QName datastore) {
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(datastore);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NetconfMessageTransformUtil.NETCONF_VALIDATE_QNAME, getValidateContent(datastore));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> validateCandidate(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ return validate(callback, NETCONF_CANDIDATE_QNAME);
+ }
+
+
+ public ListenableFuture<RpcResult<CompositeNode>> validateRunning(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ return validate(callback, NETCONF_RUNNING_QNAME);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> copyConfig(final FutureCallback<RpcResult<CompositeNode>> callback, final QName source, final QName target) {
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(source);
+ Preconditions.checkNotNull(target);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NetconfMessageTransformUtil.NETCONF_COPY_CONFIG_QNAME, getCopyConfigContent(source, target));
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> copyRunningToCandidate(final FutureCallback<RpcResult<CompositeNode>> callback) {
+ return copyConfig(callback, NETCONF_RUNNING_QNAME, NETCONF_CANDIDATE_QNAME);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> getConfig(final FutureCallback<RpcResult<CompositeNode>> callback, final QName datastore, final Optional<YangInstanceIdentifier> filterPath) {
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(datastore);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future;
+ if (filterPath.isPresent()) {
+ final Node<?> node = toFilterStructure(filterPath.get());
+ future = rpc.invokeRpc(NETCONF_GET_CONFIG_QNAME,
+ NetconfMessageTransformUtil.wrap(NETCONF_GET_CONFIG_QNAME, getSourceNode(datastore), node));
+ } else {
+ future = rpc.invokeRpc(NETCONF_GET_CONFIG_QNAME,
+ NetconfMessageTransformUtil.wrap(NETCONF_GET_CONFIG_QNAME, getSourceNode(datastore)));
+ }
+
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> getConfigRunning(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
+ return getConfig(callback, NETCONF_RUNNING_QNAME, filterPath);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> getConfigCandidate(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
+ return getConfig(callback, NETCONF_CANDIDATE_QNAME, filterPath);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> get(final FutureCallback<RpcResult<CompositeNode>> callback, final QName datastore, final Optional<YangInstanceIdentifier> filterPath) {
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(datastore);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future;
+ if (filterPath.isPresent()) {
+ final Node<?> node = toFilterStructure(filterPath.get());
+ future = rpc.invokeRpc(NETCONF_GET_QNAME,
+ NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, getSourceNode(datastore), node));
+ } else {
+ future = rpc.invokeRpc(NETCONF_GET_QNAME,
+ NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, getSourceNode(datastore)));
+ }
+
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> getRunning(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
+ return get(callback, NETCONF_RUNNING_QNAME, filterPath);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> getCandidate(final FutureCallback<RpcResult<CompositeNode>> callback, final Optional<YangInstanceIdentifier> filterPath) {
+ return get(callback, NETCONF_CANDIDATE_QNAME, filterPath);
+ }
+
+
+ public ListenableFuture<RpcResult<CompositeNode>> editConfigCandidate(final FutureCallback<? super RpcResult<CompositeNode>> callback, final CompositeNode editStructure, final ModifyAction modifyAction, final boolean rollback) {
+ return editConfig(callback, NETCONF_CANDIDATE_QNAME, editStructure, Optional.of(modifyAction), rollback);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> editConfigCandidate(final FutureCallback<? super RpcResult<CompositeNode>> callback, final CompositeNode editStructure, final boolean rollback) {
+ return editConfig(callback, NETCONF_CANDIDATE_QNAME, editStructure, Optional.<ModifyAction>absent(), rollback);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> editConfigRunning(final FutureCallback<? super RpcResult<CompositeNode>> callback, final CompositeNode editStructure, final ModifyAction modifyAction, final boolean rollback) {
+ return editConfig(callback, NETCONF_RUNNING_QNAME, editStructure, Optional.of(modifyAction), rollback);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> editConfigRunning(final FutureCallback<? super RpcResult<CompositeNode>> callback, final CompositeNode editStructure, final boolean rollback) {
+ return editConfig(callback, NETCONF_RUNNING_QNAME, editStructure, Optional.<ModifyAction>absent(), rollback);
+ }
+
+ public ListenableFuture<RpcResult<CompositeNode>> editConfig(final FutureCallback<? super RpcResult<CompositeNode>> callback, final QName datastore, final CompositeNode editStructure, final Optional<ModifyAction> modifyAction, final boolean rollback) {
+ Preconditions.checkNotNull(editStructure);
+ Preconditions.checkNotNull(callback);
+ Preconditions.checkNotNull(datastore);
+
+ final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_EDIT_CONFIG_QNAME, getEditConfigContent(datastore, editStructure, modifyAction, rollback));
+
+ Futures.addCallback(future, callback);
+ return future;
+ }
+
+ private CompositeNode getEditConfigContent(final QName datastore, final CompositeNode editStructure, final Optional<ModifyAction> defaultOperation, final boolean rollback) {
+ final CompositeNodeBuilder<ImmutableCompositeNode> ret = ImmutableCompositeNode.builder();
+
+ // Target
+ ret.add(getTargetNode(datastore));
+
+ // Default operation
+ if(defaultOperation.isPresent()) {
+ final SimpleNode<String> defOp = NodeFactory.createImmutableSimpleNode(NETCONF_DEFAULT_OPERATION_QNAME, null, NetconfMessageTransformUtil.modifyOperationToXmlString(defaultOperation.get()));
+ ret.add(defOp);
+ }
+
+ // Error option
+ if(rollback) {
+ ret.addLeaf(NETCONF_ERROR_OPTION_QNAME, ROLLBACK_ON_ERROR_OPTION);
+ }
+
+ ret.setQName(NETCONF_EDIT_CONFIG_QNAME);
+ // Edit content
+ ret.add(editStructure);
+ return ret.toInstance();
+ }
+
+ private static CompositeNode getSourceNode(final QName datastore) {
+ return NodeFactory.createImmutableCompositeNode(NETCONF_SOURCE_QNAME, null,
+ Collections.<Node<?>> singletonList(new SimpleNodeTOImpl<>(datastore, null, null)));
+ }
+
+
+ public static CompositeNode getLockContent(final QName datastore) {
+ return NodeFactory.createImmutableCompositeNode(NETCONF_LOCK_QNAME, null, Collections.<Node<?>>singletonList(
+ getTargetNode(datastore)));
+ }
+
+ private static CompositeNode getTargetNode(final QName datastore) {
+ return NodeFactory.createImmutableCompositeNode(NETCONF_TARGET_QNAME, null, Collections.<Node<?>>singletonList(
+ NodeFactory.createImmutableSimpleNode(datastore, null, null)
+ ));
+ }
+
+ public static CompositeNode getCopyConfigContent(final QName source, final QName target) {
+ return NodeFactory.createImmutableCompositeNode(NETCONF_LOCK_QNAME, null,
+ Lists.<Node<?>> newArrayList(getTargetNode(target), getSourceNode(source)));
+ }
+
+ public static CompositeNode getValidateContent(final QName source) {
+ return NodeFactory.createImmutableCompositeNode(NETCONF_VALIDATE_QNAME, null, Lists.<Node<?>> newArrayList(getSourceNode(source)));
+ }
+
+ public static CompositeNode getUnLockContent(final QName preferedDatastore) {
+ return NodeFactory.createImmutableCompositeNode(NETCONF_UNLOCK_QNAME, null, Collections.<Node<?>>singletonList(
+ getTargetNode(preferedDatastore)));
+ }
+
+}
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import java.net.URI;
import java.util.ArrayList;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.ModifyAction;
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public static final QName IETF_NETCONF_MONITORING_SCHEMA_VERSION = QName.create(IETF_NETCONF_MONITORING, "version");
public static final QName IETF_NETCONF_MONITORING_SCHEMA_NAMESPACE = QName.create(IETF_NETCONF_MONITORING, "namespace");
- public static final URI NETCONF_URI = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0");
- public static final QName NETCONF_QNAME = QName.create(NETCONF_URI, null, "netconf");
- public static final QName NETCONF_DATA_QNAME = QName.create(NETCONF_QNAME, "data");
- public static final QName NETCONF_RPC_REPLY_QNAME = QName.create(NETCONF_QNAME, "rpc-reply");
- public static final QName NETCONF_ERROR_OPTION_QNAME = QName.create(NETCONF_QNAME, "error-option");
- public static final QName NETCONF_RUNNING_QNAME = QName.create(NETCONF_QNAME, "running");
- static final List<Node<?>> RUNNING = Collections.<Node<?>> singletonList(new SimpleNodeTOImpl<>(NETCONF_RUNNING_QNAME, null, null));
- public static final QName NETCONF_SOURCE_QNAME = QName.create(NETCONF_QNAME, "source");
- public static final CompositeNode CONFIG_SOURCE_RUNNING = new CompositeNodeTOImpl(NETCONF_SOURCE_QNAME, null, RUNNING);
- public static final QName NETCONF_CANDIDATE_QNAME = QName.create(NETCONF_QNAME, "candidate");
- public static final QName NETCONF_TARGET_QNAME = QName.create(NETCONF_QNAME, "target");
- public static final QName NETCONF_CONFIG_QNAME = QName.create(NETCONF_QNAME, "config");
- public static final QName NETCONF_COMMIT_QNAME = QName.create(NETCONF_QNAME, "commit");
- public static final QName NETCONF_OPERATION_QNAME = QName.create(NETCONF_QNAME, "operation");
- public static final QName NETCONF_DEFAULT_OPERATION_QNAME = QName.create(NETCONF_OPERATION_QNAME, "default-operation");
- public static final QName NETCONF_EDIT_CONFIG_QNAME = QName.create(NETCONF_QNAME, "edit-config");
- public static final QName NETCONF_GET_CONFIG_QNAME = QName.create(NETCONF_QNAME, "get-config");
- public static final QName NETCONF_DISCARD_CHANGES_QNAME = QName.create(NETCONF_QNAME, "discard-changes");
- public static final QName NETCONF_TYPE_QNAME = QName.create(NETCONF_QNAME, "type");
- public static final QName NETCONF_FILTER_QNAME = QName.create(NETCONF_QNAME, "filter");
- public static final QName NETCONF_GET_QNAME = QName.create(NETCONF_QNAME, "get");
- public static final QName NETCONF_RPC_QNAME = QName.create(NETCONF_QNAME, "rpc");
-
- public static final URI NETCONF_ROLLBACK_ON_ERROR_URI = URI
+ public static URI NETCONF_URI = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0");
+ public static QName NETCONF_QNAME = QName.create(NETCONF_URI, null, "netconf");
+ public static QName NETCONF_DATA_QNAME = QName.create(NETCONF_QNAME, "data");
+ public static QName NETCONF_RPC_REPLY_QNAME = QName.create(NETCONF_QNAME, "rpc-reply");
+ public static QName NETCONF_ERROR_OPTION_QNAME = QName.create(NETCONF_QNAME, "error-option");
+ public static QName NETCONF_RUNNING_QNAME = QName.create(NETCONF_QNAME, "running");
+ public static QName NETCONF_SOURCE_QNAME = QName.create(NETCONF_QNAME, "source");
+ public static QName NETCONF_CANDIDATE_QNAME = QName.create(NETCONF_QNAME, "candidate");
+ public static QName NETCONF_TARGET_QNAME = QName.create(NETCONF_QNAME, "target");
+ public static QName NETCONF_CONFIG_QNAME = QName.create(NETCONF_QNAME, "config");
+ public static QName NETCONF_COMMIT_QNAME = QName.create(NETCONF_QNAME, "commit");
+ public static QName NETCONF_VALIDATE_QNAME = QName.create(NETCONF_QNAME, "validate");
+ public static QName NETCONF_COPY_CONFIG_QNAME = QName.create(NETCONF_QNAME, "copy-config");
+ public static QName NETCONF_OPERATION_QNAME = QName.create(NETCONF_QNAME, "operation");
+ public static QName NETCONF_DEFAULT_OPERATION_QNAME = QName.create(NETCONF_OPERATION_QNAME, "default-operation");
+ public static QName NETCONF_EDIT_CONFIG_QNAME = QName.create(NETCONF_QNAME, "edit-config");
+ public static QName NETCONF_GET_CONFIG_QNAME = QName.create(NETCONF_QNAME, "get-config");
+ public static QName NETCONF_DISCARD_CHANGES_QNAME = QName.create(NETCONF_QNAME, "discard-changes");
+ public static QName NETCONF_TYPE_QNAME = QName.create(NETCONF_QNAME, "type");
+ public static QName NETCONF_FILTER_QNAME = QName.create(NETCONF_QNAME, "filter");
+ public static QName NETCONF_GET_QNAME = QName.create(NETCONF_QNAME, "get");
+ public static QName NETCONF_RPC_QNAME = QName.create(NETCONF_QNAME, "rpc");
+
+ public static URI NETCONF_ROLLBACK_ON_ERROR_URI = URI
.create("urn:ietf:params:netconf:capability:rollback-on-error:1.0");
- public static final String ROLLBACK_ON_ERROR_OPTION = "rollback-on-error";
+ public static String ROLLBACK_ON_ERROR_OPTION = "rollback-on-error";
- public static final URI NETCONF_CANDIDATE_URI = URI
+ public static URI NETCONF_CANDIDATE_URI = URI
.create("urn:ietf:params:netconf:capability:candidate:1.0");
+ public static URI NETCONF_RUNNING_WRITABLE_URI = URI
+ .create("urn:ietf:params:netconf:capability:writable-running:1.0");
+
+ public static QName NETCONF_LOCK_QNAME = QName.create(NETCONF_QNAME, "lock");
+ public static QName NETCONF_UNLOCK_QNAME = QName.create(NETCONF_QNAME, "unlock");
+
// Discard changes message
public static final CompositeNode DISCARD_CHANGES_RPC_CONTENT =
NodeFactory.createImmutableCompositeNode(NETCONF_DISCARD_CHANGES_QNAME, null, Collections.<Node<?>>emptyList());
static Node<?> toNode(final YangInstanceIdentifier.NodeIdentifierWithPredicates argument, final Node<?> node) {
final List<Node<?>> list = new ArrayList<>();
for (final Map.Entry<QName, Object> arg : argument.getKeyValues().entrySet()) {
- list.add(new SimpleNodeTOImpl<>(arg.getKey(), null, arg.getValue()));
+ list.add(new SimpleNodeTOImpl(arg.getKey(), null, arg.getValue()));
}
if (node != null) {
list.add(node);
}
return current;
}
+
+ public static String modifyOperationToXmlString(final ModifyAction operation) {
+ return operation.name().toLowerCase();
+ }
+
+
+ public static CompositeNode createEditConfigStructure(final YangInstanceIdentifier dataPath, final Optional<ModifyAction> operation,
+ final Optional<CompositeNode> lastChildOverride) {
+ Preconditions.checkArgument(Iterables.isEmpty(dataPath.getPathArguments()) == false, "Instance identifier with empty path %s", dataPath);
+
+ List<YangInstanceIdentifier.PathArgument> reversedPath = Lists.reverse(dataPath.getPath());
+
+ // Create deepest edit element with expected edit operation
+ CompositeNode previous = getDeepestEditElement(reversedPath.get(0), operation, lastChildOverride);
+
+ // Remove already processed deepest child
+ reversedPath = Lists.newArrayList(reversedPath);
+ reversedPath.remove(0);
+
+ // Create edit structure in reversed order
+ for (final YangInstanceIdentifier.PathArgument arg : reversedPath) {
+ final CompositeNodeBuilder<ImmutableCompositeNode> builder = ImmutableCompositeNode.builder();
+ builder.setQName(arg.getNodeType());
+
+ addPredicatesToCompositeNodeBuilder(getPredicates(arg), builder);
+
+ builder.add(previous);
+ previous = builder.toInstance();
+ }
+ return ImmutableCompositeNode.create(NETCONF_CONFIG_QNAME, ImmutableList.<Node<?>>of(previous));
+ }
+
+ public static void addPredicatesToCompositeNodeBuilder(final Map<QName, Object> predicates, final CompositeNodeBuilder<ImmutableCompositeNode> builder) {
+ for (final Map.Entry<QName, Object> entry : predicates.entrySet()) {
+ builder.addLeaf(entry.getKey(), entry.getValue());
+ }
+ }
+
+ public static Map<QName, Object> getPredicates(final YangInstanceIdentifier.PathArgument arg) {
+ Map<QName, Object> predicates = Collections.emptyMap();
+ if (arg instanceof YangInstanceIdentifier.NodeIdentifierWithPredicates) {
+ predicates = ((YangInstanceIdentifier.NodeIdentifierWithPredicates) arg).getKeyValues();
+ }
+ return predicates;
+ }
+
+ public static CompositeNode getDeepestEditElement(final YangInstanceIdentifier.PathArgument arg, final Optional<ModifyAction> operation, final Optional<CompositeNode> lastChildOverride) {
+ final CompositeNodeBuilder<ImmutableCompositeNode> builder = ImmutableCompositeNode.builder();
+ builder.setQName(arg.getNodeType());
+
+ final Map<QName, Object> predicates = getPredicates(arg);
+ addPredicatesToCompositeNodeBuilder(predicates, builder);
+
+ if (operation.isPresent()) {
+ builder.setAttribute(NETCONF_OPERATION_QNAME, modifyOperationToXmlString(operation.get()));
+ }
+ if (lastChildOverride.isPresent()) {
+ final List<Node<?>> children = lastChildOverride.get().getValue();
+ for(final Node<?> child : children) {
+ if(!predicates.containsKey(child.getKey())) {
+ builder.add(child);
+ }
+ }
+ }
+
+ return builder.toInstance();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.util;
+
+import com.google.common.util.concurrent.FutureCallback;
+import org.opendaylight.controller.sal.connect.netconf.sal.tx.WriteRunningTx;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Simple Netconf rpc logging callback
+ */
+public class NetconfRpcFutureCallback implements FutureCallback<RpcResult<CompositeNode>> {
+ private static final Logger LOG = LoggerFactory.getLogger(WriteRunningTx.class);
+
+ private final String type;
+ private final RemoteDeviceId id;
+
+ public NetconfRpcFutureCallback(final String prefix, final RemoteDeviceId id) {
+ this.type = prefix;
+ this.id = id;
+ }
+
+ @Override
+ public void onSuccess(final RpcResult<CompositeNode> result) {
+ if(result.isSuccessful()) {
+ LOG.trace("{}: " + type + " invoked successfully", id);
+ } else {
+ onUnsuccess(result);
+ }
+ }
+
+ protected void onUnsuccess(final RpcResult<CompositeNode> result) {
+ LOG.warn("{}: " + type + " invoked unsuccessfully: {}", id, result.getErrors());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ LOG.warn("{}: " + type + " failed.", id, t);
+ }
+}
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.same;
import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.inOrder;
import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.DISCARD_CHANGES_RPC_CONTENT;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_CANDIDATE_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_RUNNING_QNAME;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfBaseOps;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
- doReturn(Futures.<RpcResult<CompositeNode>>immediateFailedFuture(new IllegalStateException("Failed tx")))
- .doReturn(Futures.immediateFuture(RpcResultBuilder.<CompositeNode>success().build()))
+ ListenableFuture<RpcResult<CompositeNode>> successFuture = Futures.immediateFuture(RpcResultBuilder.<CompositeNode>success().build());
+
+ doReturn(successFuture)
+ .doReturn(Futures.<RpcResult<CompositeNode>>immediateFailedFuture(new IllegalStateException("Failed tx")))
+ .doReturn(successFuture)
.when(rpc).invokeRpc(any(QName.class), any(CompositeNode.class));
yangIId = YangInstanceIdentifier.builder().node(QName.create("namespace", "2012-12-12", "name")).build();
}
@Test
- public void testDiscardCahnges() {
- final NetconfDeviceWriteOnlyTx tx = new NetconfDeviceWriteOnlyTx(id, rpc, normalizer, true, true);
+ public void testDiscardChanges() {
+ final WriteCandidateTx tx = new WriteCandidateTx(id, new NetconfBaseOps(rpc), normalizer,
+ NetconfSessionCapabilities.fromStrings(Collections.<String>emptySet()));
final CheckedFuture<Void, TransactionCommitFailedException> submitFuture = tx.submit();
try {
submitFuture.checkedGet();
} catch (final TransactionCommitFailedException e) {
// verify discard changes was sent
- verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_DISCARD_CHANGES_QNAME, DISCARD_CHANGES_RPC_CONTENT);
+ final InOrder inOrder = inOrder(rpc);
+ inOrder.verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_LOCK_QNAME, NetconfBaseOps.getLockContent(NETCONF_CANDIDATE_QNAME));
+ inOrder.verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_COMMIT_QNAME, NetconfMessageTransformUtil.COMMIT_RPC_CONTENT);
+ inOrder.verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_DISCARD_CHANGES_QNAME, DISCARD_CHANGES_RPC_CONTENT);
+ inOrder.verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_UNLOCK_QNAME, NetconfBaseOps.getUnLockContent(NETCONF_CANDIDATE_QNAME));
return;
}
fail("Submit should fail");
}
-
@Test
- public void testDiscardCahngesNotSentWithoutCandidate() {
+ public void testDiscardChangesNotSentWithoutCandidate() {
doReturn(Futures.immediateFuture(RpcResultBuilder.<CompositeNode>success().build()))
.doReturn(Futures.<RpcResult<CompositeNode>>immediateFailedFuture(new IllegalStateException("Failed tx")))
.when(rpc).invokeRpc(any(QName.class), any(CompositeNode.class));
- final NetconfDeviceWriteOnlyTx tx = new NetconfDeviceWriteOnlyTx(id, rpc, normalizer, false, true);
- tx.delete(LogicalDatastoreType.CONFIGURATION, yangIId);
- verify(rpc).invokeRpc(eq(NetconfMessageTransformUtil.NETCONF_EDIT_CONFIG_QNAME), any(CompositeNode.class));
- verifyNoMoreInteractions(rpc);
+ final WriteRunningTx tx = new WriteRunningTx(id, new NetconfBaseOps(rpc), normalizer,
+ NetconfSessionCapabilities.fromStrings(Collections.<String>emptySet()));
+ try {
+ tx.delete(LogicalDatastoreType.CONFIGURATION, yangIId);
+ } catch (final Exception e) {
+ // verify discard changes was sent
+ final InOrder inOrder = inOrder(rpc);
+ inOrder.verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_LOCK_QNAME, NetconfBaseOps.getLockContent(NETCONF_RUNNING_QNAME));
+ inOrder.verify(rpc).invokeRpc(same(NetconfMessageTransformUtil.NETCONF_EDIT_CONFIG_QNAME), any(CompositeNode.class));
+ inOrder.verify(rpc).invokeRpc(NetconfMessageTransformUtil.NETCONF_UNLOCK_QNAME, NetconfBaseOps.getUnLockContent(NETCONF_RUNNING_QNAME));
+ return;
+ }
+
+ fail("Delete should fail");
}
}
config.getRpcManagerName());
LOG.debug("rpc manager started");
+
+ schemaService.registerSchemaContextListener(this);
}
@Override
import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.japi.Pair;
+
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
import com.google.common.util.concurrent.ListenableFuture;
+
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
+import org.opendaylight.controller.remote.rpc.messages.UpdateSchemaContext;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
import org.opendaylight.controller.remote.rpc.utils.LatestEntryRoutingLogic;
import org.opendaylight.controller.remote.rpc.utils.RoutingLogic;
private static final Logger LOG = LoggerFactory.getLogger(RpcBroker.class);
private final Broker.ProviderSession brokerSession;
private final ActorRef rpcRegistry;
- private final SchemaContext schemaContext;
+ private SchemaContext schemaContext;
private final RemoteRpcProviderConfig config;
private RpcBroker(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
invokeRemoteRpc((InvokeRpc) message);
} else if(message instanceof ExecuteRpc) {
executeRpc((ExecuteRpc) message);
+ } else if(message instanceof UpdateSchemaContext) {
+ updateSchemaContext((UpdateSchemaContext) message);
}
}
+ private void updateSchemaContext(UpdateSchemaContext message) {
+ this.schemaContext = message.getSchemaContext();
+ }
+
private void invokeRemoteRpc(final InvokeRpc msg) {
if(LOG.isDebugEnabled()) {
LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
import akka.actor.OneForOneStrategy;
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
-import akka.japi.Creator;
import akka.japi.Function;
+import java.util.Set;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.remote.rpc.messages.UpdateSchemaContext;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
-import java.util.Set;
-
/**
* This class acts as a supervisor, creates all the actors, resumes them, if an exception is thrown.
*
}
- public static Props props(final SchemaContext schemaContext,
- final Broker.ProviderSession brokerSession,
- final RpcProvisionRegistry rpcProvisionRegistry) {
- return Props.create(new Creator<RpcManager>() {
- private static final long serialVersionUID = 1L;
- @Override
- public RpcManager create() throws Exception {
- return new RpcManager(schemaContext, brokerSession, rpcProvisionRegistry);
- }
- });
- }
+ public static Props props(final SchemaContext schemaContext, final Broker.ProviderSession brokerSession,
+ final RpcProvisionRegistry rpcProvisionRegistry) {
+ return Props.create(RpcManager.class, schemaContext, brokerSession, rpcProvisionRegistry);
+ }
private void createRpcActors() {
LOG.debug("Create rpc registry and broker actors");
private void updateSchemaContext(UpdateSchemaContext message) {
this.schemaContext = message.getSchemaContext();
+ rpcBroker.tell(message, ActorRef.noSender());
}
@Override
import akka.actor.ActorRef;
import akka.japi.Option;
import akka.japi.Pair;
-import org.opendaylight.controller.remote.rpc.registry.gossip.Copier;
-import org.opendaylight.controller.sal.connector.api.RpcRouter;
-
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Copier;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
public class RoutingTable implements Copier<RoutingTable>, Serializable {
private static final long serialVersionUID = 1L;
- private Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
+ private final Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
private ActorRef router;
@Override
public RoutingTable copy() {
RoutingTable copy = new RoutingTable();
- copy.setTable(new HashMap<>(table));
+ copy.table.putAll(table);
copy.setRouter(this.getRouter());
return copy;
public Option<Pair<ActorRef, Long>> getRouterFor(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
Long updatedTime = table.get(routeId);
- if (updatedTime == null || router == null)
+ if (updatedTime == null || router == null) {
return Option.none();
- else
+ } else {
return Option.option(new Pair<>(router, updatedTime));
+ }
}
public void addRoute(RpcRouter.RouteIdentifier<?,?,?> routeId){
table.remove(routeId);
}
- public Boolean contains(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+ public boolean contains(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
return table.containsKey(routeId);
}
- public Boolean isEmpty(){
+ public boolean isEmpty(){
return table.isEmpty();
}
- ///
- /// Getter, Setters
- ///
- //TODO: Remove public
- public Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> getTable() {
- return table;
- }
- void setTable(Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table) {
- this.table = table;
+ public int size() {
+ return table.size();
}
public ActorRef getRouter() {
package org.opendaylight.controller.remote.rpc.registry;
import akka.actor.ActorRef;
-import akka.actor.Address;
-import akka.actor.Props;
-import akka.dispatch.Mapper;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.japi.Option;
import akka.japi.Pair;
-import akka.pattern.Patterns;
import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import java.util.ArrayList;
+import java.util.List;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
-import scala.concurrent.Future;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
-import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
-import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
-import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucketReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateBucket;
+import org.opendaylight.controller.sal.connector.api.RpcRouter.RouteIdentifier;
/**
* Registry to look up cluster nodes that have registered for a given rpc.
* It uses {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} to maintain this
* cluster wide information.
*/
-public class RpcRegistry extends AbstractUntypedActorWithMetering {
+public class RpcRegistry extends BucketStore<RoutingTable> {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
- /**
- * Store to keep the registry. Bucket store sync's it across nodes in the cluster
- */
- private ActorRef bucketStore;
-
- /**
- * Rpc broker that would use the registry to route requests.
- */
- private ActorRef localRouter;
-
- private RemoteRpcProviderConfig config;
-
public RpcRegistry() {
- bucketStore = getContext().actorOf(Props.create(BucketStore.class), "store");
- this.config = new RemoteRpcProviderConfig(getContext().system().settings().config());
- log.info("Bucket store path = {}", bucketStore.path().toString());
+ getLocalBucket().setData(new RoutingTable());
}
- public RpcRegistry(ActorRef bucketStore) {
- this.bucketStore = bucketStore;
- }
-
-
@Override
protected void handleReceive(Object message) throws Exception {
//TODO: if sender is remote, reject message
- if (message instanceof SetLocalRouter)
+ if (message instanceof SetLocalRouter) {
receiveSetLocalRouter((SetLocalRouter) message);
-
- if (message instanceof AddOrUpdateRoutes)
+ } else if (message instanceof AddOrUpdateRoutes) {
receiveAddRoutes((AddOrUpdateRoutes) message);
-
- else if (message instanceof RemoveRoutes)
+ } else if (message instanceof RemoveRoutes) {
receiveRemoveRoutes((RemoveRoutes) message);
-
- else if (message instanceof Messages.FindRouters)
+ } else if (message instanceof Messages.FindRouters) {
receiveGetRouter((FindRouters) message);
-
- else
- unhandled(message);
+ } else {
+ super.handleReceive(message);
+ }
}
/**
* @param message contains {@link akka.actor.ActorRef} for rpc broker
*/
private void receiveSetLocalRouter(SetLocalRouter message) {
- localRouter = message.getRouter();
+ getLocalBucket().getData().setRouter(message.getRouter());
}
/**
*/
private void receiveAddRoutes(AddOrUpdateRoutes msg) {
- Preconditions.checkState(localRouter != null, "Router must be set first");
+ log.debug("AddOrUpdateRoutes: {}", msg.getRouteIdentifiers());
+
+ RoutingTable table = getLocalBucket().getData().copy();
+ for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : msg.getRouteIdentifiers()) {
+ table.addRoute(routeId);
+ }
- Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), config.getAskDuration());
- futureReply.map(getMapperToAddRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
+ updateLocalBucket(table);
}
/**
*/
private void receiveRemoveRoutes(RemoveRoutes msg) {
- Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), config.getAskDuration());
- futureReply.map(getMapperToRemoveRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
+ RoutingTable table = getLocalBucket().getData().copy();
+ for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : msg.getRouteIdentifiers()) {
+ table.removeRoute(routeId);
+ }
+ updateLocalBucket(table);
}
/**
* @param msg
*/
private void receiveGetRouter(FindRouters msg) {
- final ActorRef sender = getSender();
-
- Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), config.getAskDuration());
- futureReply.map(getMapperToGetRouter(msg.getRouteIdentifier(), sender), getContext().dispatcher());
- }
-
- /**
- * Helper to create empty reply when no routers are found
- *
- * @return
- */
- private Messages.FindRoutersReply createEmptyReply() {
- List<Pair<ActorRef, Long>> routerWithUpdateTime = Collections.emptyList();
- return new Messages.FindRoutersReply(routerWithUpdateTime);
- }
-
- /**
- * Helper to create a reply when routers are found for the given rpc
- *
- * @param buckets
- * @param routeId
- * @return
- */
- private Messages.FindRoutersReply createReplyWithRouters(
- Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
-
List<Pair<ActorRef, Long>> routers = new ArrayList<>();
- Option<Pair<ActorRef, Long>> routerWithUpdateTime = null;
-
- for (Bucket bucket : buckets.values()) {
-
- RoutingTable table = (RoutingTable) bucket.getData();
- if (table == null)
- continue;
- routerWithUpdateTime = table.getRouterFor(routeId);
- if (routerWithUpdateTime.isEmpty())
- continue;
+ RouteIdentifier<?, ?, ?> routeId = msg.getRouteIdentifier();
+ findRoutes(getLocalBucket().getData(), routeId, routers);
- routers.add(routerWithUpdateTime.get());
+ for(Bucket<RoutingTable> bucket : getRemoteBuckets().values()) {
+ findRoutes(bucket.getData(), routeId, routers);
}
- return new Messages.FindRoutersReply(routers);
- }
-
-
- ///
- ///private factories to create Mapper
- ///
-
- /**
- * Receives all buckets returned from bucket store and finds routers for the buckets where given rpc(routeId) is found
- *
- * @param routeId the rpc
- * @param sender client who asked to find the routers.
- * @return
- */
- private Mapper<Object, Void> getMapperToGetRouter(
- final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
- return new Mapper<Object, Void>() {
- @Override
- public Void apply(Object replyMessage) {
-
- if (replyMessage instanceof GetAllBucketsReply) {
-
- GetAllBucketsReply reply = (GetAllBucketsReply) replyMessage;
- Map<Address, Bucket> buckets = reply.getBuckets();
-
- if (buckets == null || buckets.isEmpty()) {
- sender.tell(createEmptyReply(), getSelf());
- return null;
- }
-
- sender.tell(createReplyWithRouters(buckets, routeId), getSelf());
- }
- return null;
- }
- };
- }
-
- /**
- * Receives local bucket from bucket store and updates routing table in it by removing the route. Subsequently,
- * it updates the local bucket in bucket store.
- *
- * @param routeIds rpc to remote
- * @return
- */
- private Mapper<Object, Void> getMapperToRemoveRoutes(final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds) {
- return new Mapper<Object, Void>() {
- @Override
- public Void apply(Object replyMessage) {
- if (replyMessage instanceof GetLocalBucketReply) {
-
- GetLocalBucketReply reply = (GetLocalBucketReply) replyMessage;
- Bucket<RoutingTable> bucket = reply.getBucket();
-
- if (bucket == null) {
- log.debug("Local bucket is null");
- return null;
- }
-
- RoutingTable table = bucket.getData();
- if (table == null)
- table = new RoutingTable();
-
- table.setRouter(localRouter);
-
- if (!table.isEmpty()) {
- for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
- table.removeRoute(routeId);
- }
- }
- bucket.setData(table);
-
- UpdateBucket updateBucketMessage = new UpdateBucket(bucket);
- bucketStore.tell(updateBucketMessage, getSelf());
- }
- return null;
- }
- };
+ getSender().tell(new Messages.FindRoutersReply(routers), getSelf());
}
- /**
- * Receives local bucket from bucket store and updates routing table in it by adding the route. Subsequently,
- * it updates the local bucket in bucket store.
- *
- * @param routeIds rpc to add
- * @return
- */
- private Mapper<Object, Void> getMapperToAddRoutes(final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds) {
-
- return new Mapper<Object, Void>() {
- @Override
- public Void apply(Object replyMessage) {
- if (replyMessage instanceof GetLocalBucketReply) {
-
- GetLocalBucketReply reply = (GetLocalBucketReply) replyMessage;
- Bucket<RoutingTable> bucket = reply.getBucket();
-
- if (bucket == null) {
- log.debug("Local bucket is null");
- return null;
- }
-
- RoutingTable table = bucket.getData();
- if (table == null)
- table = new RoutingTable();
-
- table.setRouter(localRouter);
- for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
- table.addRoute(routeId);
- }
-
- bucket.setData(table);
-
- UpdateBucket updateBucketMessage = new UpdateBucket(bucket);
- bucketStore.tell(updateBucketMessage, getSelf());
- }
+ private void findRoutes(RoutingTable table, RpcRouter.RouteIdentifier<?, ?, ?> routeId,
+ List<Pair<ActorRef, Long>> routers) {
+ if (table == null) {
+ return;
+ }
- return null;
- }
- };
+ Option<Pair<ActorRef, Long>> routerWithUpdateTime = table.getRouterFor(routeId);
+ if(!routerWithUpdateTime.isEmpty()) {
+ routers.add(routerWithUpdateTime.get());
+ }
}
/**
public interface Bucket<T extends Copier<T>> {
public Long getVersion();
public T getData();
- public void setData(T data);
}
private T data;
+ public BucketImpl() {
+ }
+
+ public BucketImpl(T data) {
+ this.data = data;
+ }
+
+ public BucketImpl(Bucket<T> other) {
+ this.version = other.getVersion();
+ this.data = other.getData();
+ }
+
+ public void setData(T data) {
+ this.data = data;
+ this.version = System.currentTimeMillis()+1;
+ }
+
@Override
public Long getVersion() {
return version;
@Override
public T getData() {
- if (this.data == null)
- return null;
-
- return data.copy();
- }
-
- public void setData(T data){
- this.version = System.currentTimeMillis()+1;
- this.data = data;
+ return data;
}
@Override
import akka.cluster.ClusterActorRefProvider;
import akka.event.Logging;
import akka.event.LoggingAdapter;
+import com.google.common.annotations.VisibleForTesting;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
-import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
-import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucketReply;
-import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateBucket;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
import org.opendaylight.controller.utils.ConditionalProbe;
* This store uses a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}.
*
*/
-public class BucketStore extends AbstractUntypedActorWithMetering {
+public class BucketStore<T extends Copier<T>> extends AbstractUntypedActorWithMetering {
+
+ private static final Long NO_VERSION = -1L;
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
/**
* Bucket owned by the node
*/
- private BucketImpl localBucket = new BucketImpl();
+ private final BucketImpl<T> localBucket = new BucketImpl<>();
/**
* Buckets ownded by other known nodes in the cluster
*/
- private ConcurrentMap<Address, Bucket> remoteBuckets = new ConcurrentHashMap<>();
+ private final Map<Address, Bucket<T>> remoteBuckets = new HashMap<>();
/**
* Bucket version for every known node in the cluster including this node
*/
- private ConcurrentMap<Address, Long> versions = new ConcurrentHashMap<>();
+ private final Map<Address, Long> versions = new HashMap<>();
/**
* Cluster address for this node
}
}
-
@Override
protected void handleReceive(Object message) throws Exception {
if (probe != null) {
probe = (ConditionalProbe) message;
// Send back any message to tell the caller we got the probe.
getSender().tell("Got it", getSelf());
- } else if (message instanceof UpdateBucket) {
- receiveUpdateBucket(((UpdateBucket) message).getBucket());
} else if (message instanceof GetAllBuckets) {
- receiveGetAllBucket();
- } else if (message instanceof GetLocalBucket) {
- receiveGetLocalBucket();
+ receiveGetAllBuckets();
} else if (message instanceof GetBucketsByMembers) {
- receiveGetBucketsByMembers(
- ((GetBucketsByMembers) message).getMembers());
+ receiveGetBucketsByMembers(((GetBucketsByMembers) message).getMembers());
} else if (message instanceof GetBucketVersions) {
receiveGetBucketVersions();
} else if (message instanceof UpdateRemoteBuckets) {
- receiveUpdateRemoteBuckets(
- ((UpdateRemoteBuckets) message).getBuckets());
+ receiveUpdateRemoteBuckets(((UpdateRemoteBuckets) message).getBuckets());
} else {
if(log.isDebugEnabled()) {
log.debug("Unhandled message [{}]", message);
}
}
- /**
- * Returns a copy of bucket owned by this node
- */
- private void receiveGetLocalBucket() {
- final ActorRef sender = getSender();
- GetLocalBucketReply reply = new GetLocalBucketReply(localBucket);
- sender.tell(reply, getSelf());
- }
-
- /**
- * Updates the bucket owned by this node
- *
- * @param updatedBucket
- */
- void receiveUpdateBucket(Bucket updatedBucket){
-
- localBucket = (BucketImpl) updatedBucket;
- versions.put(selfAddress, localBucket.getVersion());
- }
-
/**
* Returns all the buckets the this node knows about, self owned + remote
*/
- void receiveGetAllBucket(){
+ void receiveGetAllBuckets(){
final ActorRef sender = getSender();
sender.tell(new GetAllBucketsReply(getAllBuckets()), getSelf());
}
*
* @return self owned + remote buckets
*/
+ @SuppressWarnings("rawtypes")
Map<Address, Bucket> getAllBuckets(){
Map<Address, Bucket> all = new HashMap<>(remoteBuckets.size() + 1);
//first add the local bucket
- all.put(selfAddress, localBucket);
+ all.put(selfAddress, new BucketImpl<>(localBucket));
//then get all remote buckets
all.putAll(remoteBuckets);
*
* @param members requested members
*/
+ @SuppressWarnings("rawtypes")
void receiveGetBucketsByMembers(Set<Address> members){
final ActorRef sender = getSender();
Map<Address, Bucket> buckets = getBucketsByMembers(members);
* @param members requested members
* @return buckets for requested memebers
*/
+ @SuppressWarnings("rawtypes")
Map<Address, Bucket> getBucketsByMembers(Set<Address> members) {
Map<Address, Bucket> buckets = new HashMap<>();
//first add the local bucket if asked
if (members.contains(selfAddress)) {
- buckets.put(selfAddress, localBucket);
+ buckets.put(selfAddress, new BucketImpl<>(localBucket));
}
//then get buckets for requested remote nodes
* @param receivedBuckets buckets sent by remote
* {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}
*/
+ @SuppressWarnings({ "rawtypes", "unchecked" })
void receiveUpdateRemoteBuckets(Map<Address, Bucket> receivedBuckets){
-
+ log.debug("{}: receiveUpdateRemoteBuckets: {}", selfAddress, receivedBuckets);
if (receivedBuckets == null || receivedBuckets.isEmpty())
{
return; //nothing to do
Long localVersion = versions.get(entry.getKey());
if (localVersion == null) {
- localVersion = -1L;
+ localVersion = NO_VERSION;
}
- Bucket receivedBucket = entry.getValue();
+ Bucket<T> receivedBucket = entry.getValue();
if (receivedBucket == null) {
continue;
Long remoteVersion = receivedBucket.getVersion();
if (remoteVersion == null) {
- remoteVersion = -1L;
+ remoteVersion = NO_VERSION;
}
//update only if remote version is newer
versions.put(entry.getKey(), remoteVersion);
}
}
+
if(log.isDebugEnabled()) {
log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
}
}
- ///
- ///Getter Setters
- ///
-
- BucketImpl getLocalBucket() {
+ protected BucketImpl<T> getLocalBucket() {
return localBucket;
}
- void setLocalBucket(BucketImpl localBucket) {
- this.localBucket = localBucket;
+ protected void updateLocalBucket(T data) {
+ localBucket.setData(data);
+ versions.put(selfAddress, localBucket.getVersion());
}
- ConcurrentMap<Address, Bucket> getRemoteBuckets() {
+ protected Map<Address, Bucket<T>> getRemoteBuckets() {
return remoteBuckets;
}
- void setRemoteBuckets(ConcurrentMap<Address, Bucket> remoteBuckets) {
- this.remoteBuckets = remoteBuckets;
- }
-
- ConcurrentMap<Address, Long> getVersions() {
+ @VisibleForTesting
+ Map<Address, Long> getVersions() {
return versions;
}
-
- void setVersions(ConcurrentMap<Address, Long> versions) {
- this.versions = versions;
- }
-
- Address getSelfAddress() {
- return selfAddress;
- }
}
import akka.actor.Address;
import com.google.common.base.Preconditions;
-
import java.io.Serializable;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
-
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBucketVersions;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBuckets;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBucketVersions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBuckets;
/**
public static class BucketStoreMessages{
- public static class GetLocalBucket implements Serializable {
- private static final long serialVersionUID = 1L;
- }
-
- public static class ContainsBucket implements Serializable {
- private static final long serialVersionUID = 1L;
- final private Bucket bucket;
-
- public ContainsBucket(Bucket bucket){
- Preconditions.checkArgument(bucket != null, "bucket can not be null");
- this.bucket = bucket;
- }
-
- public Bucket getBucket(){
- return bucket;
- }
-
- }
-
- public static class UpdateBucket extends ContainsBucket implements Serializable {
- private static final long serialVersionUID = 1L;
- public UpdateBucket(Bucket bucket){
- super(bucket);
- }
- }
-
- public static class GetLocalBucketReply extends ContainsBucket implements Serializable {
- private static final long serialVersionUID = 1L;
- public GetLocalBucketReply(Bucket bucket){
- super(bucket);
- }
- }
-
public static class GetAllBuckets implements Serializable {
private static final long serialVersionUID = 1L;
}
public static class GetBucketsByMembers implements Serializable{
private static final long serialVersionUID = 1L;
- private Set<Address> members;
+ private final Set<Address> members;
public GetBucketsByMembers(Set<Address> members){
Preconditions.checkArgument(members != null, "members can not be null");
public static class ContainsBuckets implements Serializable{
private static final long serialVersionUID = 1L;
- private Map<Address, Bucket> buckets;
+ private final Map<Address, Bucket> buckets;
public ContainsBuckets(Map<Address, Bucket> buckets){
Preconditions.checkArgument(buckets != null, "buckets can not be null");
for (Map.Entry<Address, Bucket> entry : buckets.entrySet()){
//ignore null entries
- if ( (entry.getKey() == null) || (entry.getValue() == null) )
+ if ( (entry.getKey() == null) || (entry.getValue() == null) ) {
continue;
+ }
copy.put(entry.getKey(), entry.getValue());
}
- return new HashMap<>(copy);
+ return copy;
}
}
public static final class GossipStatus extends ContainsBucketVersions implements Serializable{
private static final long serialVersionUID = 1L;
- private Address from;
+ private final Address from;
public GossipStatus(Address from, Map<Address, Long> versions) {
super(versions);
package org.opendaylight.controller.remote.rpc.registry;
-import akka.actor.ActorPath;
import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
-import akka.actor.ChildActorPath;
+import akka.actor.Address;
import akka.actor.Props;
-import akka.pattern.Patterns;
+import akka.japi.Pair;
import akka.testkit.JavaTestKit;
-import akka.util.Timeout;
-import com.google.common.base.Predicate;
+import com.google.common.util.concurrent.Uninterruptibles;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import javax.annotation.Nullable;
import org.junit.After;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRoutersReply;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
-import org.opendaylight.controller.remote.rpc.registry.gossip.Messages;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
-import org.opendaylight.controller.utils.ConditionalProbe;
+import org.opendaylight.controller.sal.connector.api.RpcRouter.RouteIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
private ActorRef registry2;
private ActorRef registry3;
+ private int routeIdCounter = 1;
+
@BeforeClass
public static void staticSetup() throws InterruptedException {
RemoteRpcProviderConfig config1 = new RemoteRpcProviderConfig.Builder("memberA").build();
final JavaTestKit mockBroker = new JavaTestKit(node1);
- final ActorPath bucketStorePath = new ChildActorPath(registry1.path(), "store");
+ Address nodeAddress = node1.provider().getDefaultAddress();
// Add rpc on node 1
registry1.tell(new SetLocalRouter(mockBroker.getRef()), mockBroker.getRef());
- // install probe
- final JavaTestKit probe1 = createProbeForMessage(node1, bucketStorePath,
- Messages.BucketStoreMessages.UpdateBucket.class);
+ List<RpcRouter.RouteIdentifier<?, ?, ?>> addedRouteIds = createRouteIds();
- registry1.tell(getAddRouteMessage(), mockBroker.getRef());
+ registry1.tell(new AddOrUpdateRoutes(addedRouteIds), mockBroker.getRef());
// Bucket store should get an update bucket message. Updated bucket contains added rpc.
- probe1.expectMsgClass(FiniteDuration.apply(10, TimeUnit.SECONDS),
- Messages.BucketStoreMessages.UpdateBucket.class);
+
+ Map<Address, Bucket> buckets = retrieveBuckets(registry1, mockBroker, nodeAddress);
+ verifyBucket(buckets.get(nodeAddress), addedRouteIds);
+
+ Map<Address, Long> versions = retrieveVersions(registry1, mockBroker);
+ Assert.assertEquals("Version for bucket " + nodeAddress, buckets.get(nodeAddress).getVersion(),
+ versions.get(nodeAddress));
// Now remove rpc
- registry1.tell(getRemoveRouteMessage(), mockBroker.getRef());
+ registry1.tell(new RemoveRoutes(addedRouteIds), mockBroker.getRef());
// Bucket store should get an update bucket message. Rpc is removed in the updated bucket
- probe1.expectMsgClass(FiniteDuration.apply(10, TimeUnit.SECONDS),
- Messages.BucketStoreMessages.UpdateBucket.class);
+
+ verifyEmptyBucket(mockBroker, registry1, nodeAddress);
System.out.println("testAddRemoveRpcOnSameNode ending");
System.out.println("testRpcAddRemoveInCluster starting");
final JavaTestKit mockBroker1 = new JavaTestKit(node1);
+ final JavaTestKit mockBroker2 = new JavaTestKit(node2);
+
+ List<RpcRouter.RouteIdentifier<?, ?, ?>> addedRouteIds = createRouteIds();
- // install probe on node2's bucket store
- final ActorPath bucketStorePath = new ChildActorPath(registry2.path(), "store");
- final JavaTestKit probe2 = createProbeForMessage(node2, bucketStorePath,
- Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
+ Address node1Address = node1.provider().getDefaultAddress();
// Add rpc on node 1
registry1.tell(new SetLocalRouter(mockBroker1.getRef()), mockBroker1.getRef());
- registry1.tell(getAddRouteMessage(), mockBroker1.getRef());
+ registry1.tell(new AddOrUpdateRoutes(addedRouteIds), mockBroker1.getRef());
// Bucket store on node2 should get a message to update its local copy of remote buckets
- probe2.expectMsgClass(FiniteDuration.apply(10, TimeUnit.SECONDS),
- Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
+
+ Map<Address, Bucket> buckets = retrieveBuckets(registry2, mockBroker2, node1Address);
+ verifyBucket(buckets.get(node1Address), addedRouteIds);
// Now remove
- registry1.tell(getRemoveRouteMessage(), mockBroker1.getRef());
+ registry1.tell(new RemoveRoutes(addedRouteIds), mockBroker1.getRef());
- // Bucket store on node2 should get a message to update its local copy of remote buckets
- probe2.expectMsgClass(FiniteDuration.apply(10, TimeUnit.SECONDS),
- Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
+ // Bucket store on node2 should get a message to update its local copy of remote buckets.
+ // Wait for the bucket for node1 to be empty.
+
+ verifyEmptyBucket(mockBroker2, registry2, node1Address);
System.out.println("testRpcAddRemoveInCluster ending");
}
+ private void verifyEmptyBucket(JavaTestKit testKit, ActorRef registry, Address address)
+ throws AssertionError {
+ Map<Address, Bucket> buckets;
+ int nTries = 0;
+ while(true) {
+ buckets = retrieveBuckets(registry1, testKit, address);
+
+ try {
+ verifyBucket(buckets.get(address), Collections.<RouteIdentifier<?, ?, ?>>emptyList());
+ break;
+ } catch (AssertionError e) {
+ if(++nTries >= 50) {
+ throw e;
+ }
+ }
+
+ Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+ }
+ }
+
/**
* Three node cluster. Register rpc on 2 nodes. Ensure 3rd gets updated.
*
registry3.tell(new SetLocalRouter(mockBroker3.getRef()), mockBroker3.getRef());
- // install probe on node 3
- final ActorPath bucketStorePath = new ChildActorPath(registry3.path(), "store");
- final JavaTestKit probe3 = createProbeForMessage(node3, bucketStorePath,
- Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
-
// Add rpc on node 1
+ List<RpcRouter.RouteIdentifier<?, ?, ?>> addedRouteIds1 = createRouteIds();
registry1.tell(new SetLocalRouter(mockBroker1.getRef()), mockBroker1.getRef());
- registry1.tell(getAddRouteMessage(), mockBroker1.getRef());
-
- probe3.expectMsgClass(FiniteDuration.apply(10, TimeUnit.SECONDS),
- Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
+ registry1.tell(new AddOrUpdateRoutes(addedRouteIds1), mockBroker1.getRef());
- // Add same rpc on node 2
+ // Add rpc on node 2
+ List<RpcRouter.RouteIdentifier<?, ?, ?>> addedRouteIds2 = createRouteIds();
registry2.tell(new SetLocalRouter(mockBroker2.getRef()), mockBroker2.getRef());
- registry2.tell(getAddRouteMessage(), mockBroker2.getRef());
+ registry2.tell(new AddOrUpdateRoutes(addedRouteIds2), mockBroker2.getRef());
+
+ Address node1Address = node1.provider().getDefaultAddress();
+ Address node2Address = node2.provider().getDefaultAddress();
+
+ Map<Address, Bucket> buckets = retrieveBuckets(registry3, mockBroker3, node1Address,
+ node2Address);
- probe3.expectMsgClass(FiniteDuration.apply(10, TimeUnit.SECONDS),
- Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
+ verifyBucket(buckets.get(node1Address), addedRouteIds1);
+ verifyBucket(buckets.get(node2Address), addedRouteIds2);
+
+ Map<Address, Long> versions = retrieveVersions(registry3, mockBroker3);
+ Assert.assertEquals("Version for bucket " + node1Address, buckets.get(node1Address).getVersion(),
+ versions.get(node1Address));
+ Assert.assertEquals("Version for bucket " + node2Address, buckets.get(node2Address).getVersion(),
+ versions.get(node2Address));
+
+ RouteIdentifier<?, ?, ?> routeID = addedRouteIds1.get(0);
+ registry3.tell(new FindRouters(routeID), mockBroker3.getRef());
+
+ FindRoutersReply reply = mockBroker3.expectMsgClass(Duration.create(3, TimeUnit.SECONDS),
+ FindRoutersReply.class);
+
+ List<Pair<ActorRef, Long>> respList = reply.getRouterWithUpdateTime();
+ Assert.assertEquals("getRouterWithUpdateTime size", 1, respList.size());
+
+ respList.get(0).first().tell("hello", ActorRef.noSender());
+ mockBroker1.expectMsgEquals(Duration.create(3, TimeUnit.SECONDS), "hello");
}
- private JavaTestKit createProbeForMessage(ActorSystem node, ActorPath subjectPath, final Class<?> clazz)
- throws Exception {
- final JavaTestKit probe = new JavaTestKit(node);
-
- ConditionalProbe conditionalProbe = new ConditionalProbe(probe.getRef(), new Predicate<Object>() {
- @Override
- public boolean apply(@Nullable Object input) {
- if (input != null) {
- return clazz.equals(input.getClass());
- } else {
- return false;
- }
+ private Map<Address, Long> retrieveVersions(ActorRef bucketStore, JavaTestKit testKit) {
+ bucketStore.tell(new GetBucketVersions(), testKit.getRef());
+ GetBucketVersionsReply reply = testKit.expectMsgClass(Duration.create(3, TimeUnit.SECONDS),
+ GetBucketVersionsReply.class);
+ return reply.getVersions();
+ }
+
+ private void verifyBucket(Bucket<RoutingTable> bucket, List<RouteIdentifier<?, ?, ?>> expRouteIds) {
+ RoutingTable table = bucket.getData();
+ Assert.assertNotNull("Bucket RoutingTable is null", table);
+ for(RouteIdentifier<?, ?, ?> r: expRouteIds) {
+ if(!table.contains(r)) {
+ Assert.fail("RoutingTable does not contain " + r + ". Actual: " + table);
}
- });
+ }
- FiniteDuration duration = Duration.create(3, TimeUnit.SECONDS);
- Timeout timeout = new Timeout(duration);
- int maxTries = 30;
- int i = 0;
- while(true) {
- ActorSelection subject = node.actorSelection(subjectPath);
- Future<Object> future = Patterns.ask(subject, conditionalProbe, timeout);
+ Assert.assertEquals("RoutingTable size", expRouteIds.size(), table.size());
+ }
- try {
- Await.ready(future, duration);
- break;
- } catch (TimeoutException | InterruptedException e) {
- if(++i > maxTries) {
- throw e;
+ private Map<Address, Bucket> retrieveBuckets(ActorRef bucketStore, JavaTestKit testKit,
+ Address... addresses) {
+ int nTries = 0;
+ while(true) {
+ bucketStore.tell(new GetAllBuckets(), testKit.getRef());
+ GetAllBucketsReply reply = testKit.expectMsgClass(Duration.create(3, TimeUnit.SECONDS),
+ GetAllBucketsReply.class);
+
+ Map<Address, Bucket> buckets = reply.getBuckets();
+ boolean foundAll = true;
+ for(Address addr: addresses) {
+ Bucket bucket = buckets.get(addr);
+ if(bucket == null) {
+ foundAll = false;
+ break;
}
}
- }
- return probe;
+ if(foundAll) {
+ return buckets;
+ }
- }
+ if(++nTries >= 50) {
+ Assert.fail("Missing expected buckets for addresses: " + Arrays.toString(addresses)
+ + ", Actual: " + buckets);
+ }
- private AddOrUpdateRoutes getAddRouteMessage() throws URISyntaxException {
- return new AddOrUpdateRoutes(createRouteIds());
+ Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+ }
}
- private RemoveRoutes getRemoveRouteMessage() throws URISyntaxException {
- return new RemoveRoutes(createRouteIds());
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testAddRoutesConcurrency() throws Exception {
+ final JavaTestKit testKit = new JavaTestKit(node1);
+
+ registry1.tell(new SetLocalRouter(testKit.getRef()), ActorRef.noSender());
+
+ final int nRoutes = 500;
+ final RouteIdentifier<?, ?, ?>[] added = new RouteIdentifier<?, ?, ?>[nRoutes];
+ for(int i = 0; i < nRoutes; i++) {
+ final RouteIdentifierImpl routeId = new RouteIdentifierImpl(null,
+ new QName(new URI("/mockrpc"), "type" + i), null);
+ added[i] = routeId;
+
+ //Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ registry1.tell(new AddOrUpdateRoutes(Arrays.<RouteIdentifier<?, ?, ?>>asList(routeId)),
+ ActorRef.noSender());
+ }
+
+ GetAllBuckets getAllBuckets = new GetAllBuckets();
+ FiniteDuration duration = Duration.create(3, TimeUnit.SECONDS);
+ int nTries = 0;
+ while(true) {
+ registry1.tell(getAllBuckets, testKit.getRef());
+ GetAllBucketsReply reply = testKit.expectMsgClass(duration, GetAllBucketsReply.class);
+
+ Bucket<RoutingTable> localBucket = reply.getBuckets().values().iterator().next();
+ RoutingTable table = localBucket.getData();
+ if(table != null && table.size() == nRoutes) {
+ for(RouteIdentifier<?, ?, ?> r: added) {
+ Assert.assertEquals("RoutingTable contains " + r, true, table.contains(r));
+ }
+
+ break;
+ }
+
+ if(++nTries >= 50) {
+ Assert.fail("Expected # routes: " + nRoutes + ", Actual: " + table.size());
+ }
+
+ Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+ }
}
private List<RpcRouter.RouteIdentifier<?, ?, ?>> createRouteIds() throws URISyntaxException {
- QName type = new QName(new URI("/mockrpc"), "mockrpc");
+ QName type = new QName(new URI("/mockrpc"), "mockrpc" + routeIdCounter++);
List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = new ArrayList<>();
routeIds.add(new RouteIdentifierImpl(null, type, null));
return routeIds;
}
-
}
import akka.actor.Props;
import akka.testkit.TestActorRef;
import com.typesafe.config.ConfigFactory;
+import java.util.HashMap;
+import java.util.Map;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.remote.rpc.TerminationMonitor;
-import java.util.HashMap;
-import java.util.Map;
-
public class BucketStoreTest {
private static ActorSystem system;
- private static BucketStore store;
@BeforeClass
public static void setup() {
system = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("unit-test"));
system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
-
- store = createStore();
}
@AfterClass
system.shutdown();
}
- /**
- * Given a new local bucket
- * Should replace
- */
- @Test
- public void testReceiveUpdateBucket(){
- Bucket bucket = new BucketImpl();
- Long expectedVersion = bucket.getVersion();
-
- store.receiveUpdateBucket(bucket);
-
- Assert.assertEquals(bucket, store.getLocalBucket());
- Assert.assertEquals(expectedVersion, store.getLocalBucket().getVersion());
- }
-
/**
* Given remote buckets
* Should merge with local copy of remote buckets
@Test
public void testReceiveUpdateRemoteBuckets(){
+ BucketStore store = createStore();
+
Address localAddress = system.provider().getDefaultAddress();
Bucket localBucket = new BucketImpl();
//Should NOT contain local bucket
//Should contain ONLY 3 entries i.e a1, a2, a3
- Map<Address, Bucket> remoteBucketsInStore = store.getRemoteBuckets();
+ Map<Address, Bucket<?>> remoteBucketsInStore = store.getRemoteBuckets();
Assert.assertFalse("remote buckets contains local bucket", remoteBucketsInStore.containsKey(localAddress));
Assert.assertTrue(remoteBucketsInStore.size() == 3);
Assert.assertTrue(remoteBucketsInStore.size() == 4);
//Should update versions map
- //versions map contains versions for all remote buckets (4) + local bucket
- //so it should have total 5.
+ //versions map contains versions for all remote buckets (4).
Map<Address, Long> versionsInStore = store.getVersions();
- Assert.assertTrue(String.format("Expected:%s, Actual:%s", 5, versionsInStore.size()),
- versionsInStore.size() == 5);
+ Assert.assertEquals(4, versionsInStore.size());
Assert.assertEquals(b1.getVersion(), versionsInStore.get(a1));
Assert.assertEquals(b2.getVersion(), versionsInStore.get(a2));
Assert.assertEquals(b3_new.getVersion(), versionsInStore.get(a3));
}
public static IdentityValuesDTO asInstanceIdentifier(final String value, final PrefixesMaping prefixMap) {
- String valueTrimmed = value.trim();
+ final String valueTrimmed = value.trim();
if (!valueTrimmed.startsWith("/")) {
return null;
}
- String[] xPathParts = valueTrimmed.split("/");
+ final String[] xPathParts = valueTrimmed.split("/");
if (xPathParts.length < 2) { // must be at least "/pr:node"
return null;
}
- IdentityValuesDTO identityValuesDTO = new IdentityValuesDTO(value);
+ final IdentityValuesDTO identityValuesDTO = new IdentityValuesDTO(value);
for (int i = 1; i < xPathParts.length; i++) {
- String xPathPartTrimmed = xPathParts[i].trim();
+ final String xPathPartTrimmed = xPathParts[i].trim();
- String xPathPartStr = getIdAndPrefixAsStr(xPathPartTrimmed);
- IdentityValue identityValue = toIdentity(xPathPartStr, prefixMap);
+ final String xPathPartStr = getIdAndPrefixAsStr(xPathPartTrimmed);
+ final IdentityValue identityValue = toIdentity(xPathPartStr, prefixMap);
if (identityValue == null) {
return null;
}
- List<Predicate> predicates = toPredicates(xPathPartTrimmed, prefixMap);
+ final List<Predicate> predicates = toPredicates(xPathPartTrimmed, prefixMap);
if (predicates == null) {
return null;
}
}
private static String getIdAndPrefixAsStr(final String pathPart) {
- int predicateStartIndex = pathPart.indexOf("[");
+ final int predicateStartIndex = pathPart.indexOf("[");
return predicateStartIndex == -1 ? pathPart : pathPart.substring(0, predicateStartIndex);
}
private static IdentityValue toIdentity(final String xPathPart, final PrefixesMaping prefixMap) {
- String xPathPartTrimmed = xPathPart.trim();
+ final String xPathPartTrimmed = xPathPart.trim();
if (xPathPartTrimmed.isEmpty()) {
return null;
}
- String[] prefixAndIdentifier = xPathPartTrimmed.split(":");
+ final String[] prefixAndIdentifier = xPathPartTrimmed.split(":");
// it is not "prefix:value"
if (prefixAndIdentifier.length != 2) {
return null;
}
- String prefix = prefixAndIdentifier[0].trim();
- String identifier = prefixAndIdentifier[1].trim();
+ final String prefix = prefixAndIdentifier[0].trim();
+ final String identifier = prefixAndIdentifier[1].trim();
if (prefix.isEmpty() || identifier.isEmpty()) {
return null;
}
- String namespace = prefixMap.getNamespace(prefix);
- return new IdentityValue(namespace, identifier, namespace.equals(prefix) ? null : prefix);
+ final String namespace = prefixMap.getNamespace(prefix);
+ return new IdentityValue(namespace, identifier);
}
private static List<Predicate> toPredicates(final String predicatesStr, final PrefixesMaping prefixMap) {
- List<Predicate> result = new ArrayList<>();
- List<String> predicates = new ArrayList<>();
- Matcher matcher = PREDICATE_PATTERN.matcher(predicatesStr);
+ final List<Predicate> result = new ArrayList<>();
+ final List<String> predicates = new ArrayList<>();
+ final Matcher matcher = PREDICATE_PATTERN.matcher(predicatesStr);
while (matcher.find()) {
predicates.add(matcher.group(1).trim());
}
- for (String predicate : predicates) {
- int indexOfEqualityMark = predicate.indexOf("=");
+ for (final String predicate : predicates) {
+ final int indexOfEqualityMark = predicate.indexOf("=");
if (indexOfEqualityMark != -1) {
- String predicateValue = toPredicateValue(predicate.substring(indexOfEqualityMark + 1));
+ final String predicateValue = toPredicateValue(predicate.substring(indexOfEqualityMark + 1));
if (predicate.startsWith(".")) { // it is leaf-list
if (predicateValue == null) {
return null;
}
result.add(new Predicate(null, predicateValue));
} else {
- IdentityValue identityValue = toIdentity(predicate.substring(0, indexOfEqualityMark), prefixMap);
+ final IdentityValue identityValue = toIdentity(predicate.substring(0, indexOfEqualityMark), prefixMap);
if (identityValue == null || predicateValue == null) {
return null;
}
}
private static String toPredicateValue(final String predicatedValue) {
- String predicatedValueTrimmed = predicatedValue.trim();
+ final String predicatedValueTrimmed = predicatedValue.trim();
if ((predicatedValueTrimmed.startsWith(DQUOTE) || predicatedValueTrimmed.startsWith(SQUOTE))
&& (predicatedValueTrimmed.endsWith(DQUOTE) || predicatedValueTrimmed.endsWith(SQUOTE))) {
return predicatedValueTrimmed.substring(1, predicatedValueTrimmed.length() - 1);
private final List<IdentityValue> elementData = new ArrayList<>();
private final String originValue;
- public IdentityValuesDTO(String namespace, String value, String prefix, String originValue) {
- elementData.add(new IdentityValue(namespace, value, prefix));
+ public IdentityValuesDTO(final String namespace, final String value, final String prefix, final String originValue) {
+ elementData.add(new IdentityValue(namespace, value));
this.originValue = originValue;
}
- public IdentityValuesDTO(String originValue) {
+ public IdentityValuesDTO(final String originValue) {
this.originValue = originValue;
}
originValue = null;
}
- public void add(String namespace, String value, String prefix) {
- elementData.add(new IdentityValue(namespace, value, prefix));
+ public void add(final String namespace, final String value, final String prefix) {
+ elementData.add(new IdentityValue(namespace, value));
}
- public void add(IdentityValue identityValue) {
+ public void add(final IdentityValue identityValue) {
elementData.add(identityValue);
}
private final String namespace;
private final String value;
- private final String prefix;
private List<Predicate> predicates;
- public IdentityValue(String namespace, String value, String prefix) {
+ public IdentityValue(final String namespace, final String value) {
this.namespace = namespace;
this.value = value;
- this.prefix = prefix;
}
public String getNamespace() {
return value;
}
- public String getPrefix() {
- return prefix;
- }
public List<Predicate> getPredicates() {
if (predicates == null) {
return Collections.unmodifiableList(predicates);
}
- public void setPredicates(List<Predicate> predicates) {
+ public void setPredicates(final List<Predicate> predicates) {
this.predicates = predicates;
}
@Override
public String toString() {
- StringBuilder sb = new StringBuilder();
+ final StringBuilder sb = new StringBuilder();
if (namespace != null) {
sb.append(namespace);
}
- if (prefix != null) {
- sb.append("(").append(prefix).append(")");
- }
if (value != null) {
sb.append(" - ").append(value);
}
if (predicates != null && !predicates.isEmpty()) {
- for (Predicate predicate : predicates) {
+ for (final Predicate predicate : predicates) {
sb.append("[");
predicate.toString();
sb.append("]");
private final IdentityValue name;
private final String value;
- public Predicate(IdentityValue name, String value) {
+ public Predicate(final IdentityValue name, final String value) {
super();
this.name = name;
this.value = value;
@Override
public String toString() {
- StringBuilder sb = new StringBuilder();
+ final StringBuilder sb = new StringBuilder();
if (name != null) {
sb.append(name.toString());
}
input == null ? "null" : input.getClass(), String.valueOf(input));
return null;
} else {
- TypeDefinitionAwareCodec<Object, ? extends TypeDefinition<?>> typeAwarecodec = TypeDefinitionAwareCodec
+ final TypeDefinitionAwareCodec<Object, ? extends TypeDefinition<?>> typeAwarecodec = TypeDefinitionAwareCodec
.from(type);
if (typeAwarecodec != null) {
if (input instanceof IdentityValuesDTO) {
return null;
}
}
- } catch (ClassCastException e) { // TODO remove this catch when everyone use codecs
+ } catch (final ClassCastException e) { // TODO remove this catch when everyone use codecs
logger.error(
"ClassCastException was thrown when codec is invoked with parameter " + String.valueOf(input),
e);
} else if (type instanceof InstanceIdentifierTypeDefinition) {
return instanceIdentifier.serialize(input);
} else {
- TypeDefinitionAwareCodec<Object, ? extends TypeDefinition<?>> typeAwarecodec = TypeDefinitionAwareCodec
+ final TypeDefinitionAwareCodec<Object, ? extends TypeDefinition<?>> typeAwarecodec = TypeDefinitionAwareCodec
.from(type);
if (typeAwarecodec != null) {
return typeAwarecodec.serialize(input);
return null;
}
}
- } catch (ClassCastException e) { // TODO remove this catch when everyone use codecs
+ } catch (final ClassCastException e) { // TODO remove this catch when everyone use codecs
logger.error(
"ClassCastException was thrown when codec is invoked with parameter " + String.valueOf(input),
e);
@Override
public IdentityValuesDTO serialize(final QName data) {
- return new IdentityValuesDTO(data.getNamespace().toString(), data.getLocalName(), data.getPrefix(), null);
+ return new IdentityValuesDTO(data.getNamespace().toString(), data.getLocalName(), null, null);
}
@Override
public QName deserialize(final IdentityValuesDTO data) {
- IdentityValue valueWithNamespace = data.getValuesWithNamespaces().get(0);
- Module module = getModuleByNamespace(valueWithNamespace.getNamespace(), mountPoint);
+ final IdentityValue valueWithNamespace = data.getValuesWithNamespaces().get(0);
+ final Module module = getModuleByNamespace(valueWithNamespace.getNamespace(), mountPoint);
if (module == null) {
logger.info("Module was not found for namespace {}", valueWithNamespace.getNamespace());
logger.info("Idenetityref will be translated as NULL for data - {}", String.valueOf(valueWithNamespace));
@Override
public IdentityValuesDTO serialize(final YangInstanceIdentifier data) {
- IdentityValuesDTO identityValuesDTO = new IdentityValuesDTO();
- for (PathArgument pathArgument : data.getPathArguments()) {
- IdentityValue identityValue = qNameToIdentityValue(pathArgument.getNodeType());
+ final IdentityValuesDTO identityValuesDTO = new IdentityValuesDTO();
+ for (final PathArgument pathArgument : data.getPathArguments()) {
+ final IdentityValue identityValue = qNameToIdentityValue(pathArgument.getNodeType());
if (pathArgument instanceof NodeIdentifierWithPredicates && identityValue != null) {
- List<Predicate> predicates = keyValuesToPredicateList(((NodeIdentifierWithPredicates) pathArgument)
+ final List<Predicate> predicates = keyValuesToPredicateList(((NodeIdentifierWithPredicates) pathArgument)
.getKeyValues());
identityValue.setPredicates(predicates);
} else if (pathArgument instanceof NodeWithValue && identityValue != null) {
- List<Predicate> predicates = new ArrayList<>();
- String value = String.valueOf(((NodeWithValue) pathArgument).getValue());
+ final List<Predicate> predicates = new ArrayList<>();
+ final String value = String.valueOf(((NodeWithValue) pathArgument).getValue());
predicates.add(new Predicate(null, value));
identityValue.setPredicates(predicates);
}
@Override
public YangInstanceIdentifier deserialize(final IdentityValuesDTO data) {
- List<PathArgument> result = new ArrayList<PathArgument>();
- IdentityValue valueWithNamespace = data.getValuesWithNamespaces().get(0);
- Module module = getModuleByNamespace(valueWithNamespace.getNamespace(), mountPoint);
+ final List<PathArgument> result = new ArrayList<PathArgument>();
+ final IdentityValue valueWithNamespace = data.getValuesWithNamespaces().get(0);
+ final Module module = getModuleByNamespace(valueWithNamespace.getNamespace(), mountPoint);
if (module == null) {
logger.info("Module by namespace '{}' of first node in instance-identifier was not found.",
valueWithNamespace.getNamespace());
}
DataNodeContainer parentContainer = module;
- List<IdentityValue> identities = data.getValuesWithNamespaces();
+ final List<IdentityValue> identities = data.getValuesWithNamespaces();
for (int i = 0; i < identities.size(); i++) {
- IdentityValue identityValue = identities.get(i);
+ final IdentityValue identityValue = identities.get(i);
URI validNamespace = resolveValidNamespace(identityValue.getNamespace(), mountPoint);
- DataSchemaNode node = ControllerContext.findInstanceDataChildByNameAndNamespace(
+ final DataSchemaNode node = ControllerContext.findInstanceDataChildByNameAndNamespace(
parentContainer, identityValue.getValue(), validNamespace);
if (node == null) {
logger.info("'{}' node was not found in {}", identityValue, parentContainer.getChildNodes());
String.valueOf(identityValue.getValue()));
return null;
}
- QName qName = node.getQName();
+ final QName qName = node.getQName();
PathArgument pathArgument = null;
if (identityValue.getPredicates().isEmpty()) {
pathArgument = new NodeIdentifier(qName);
} else {
if (node instanceof LeafListSchemaNode) { // predicate is value of leaf-list entry
- Predicate leafListPredicate = identityValue.getPredicates().get(0);
+ final Predicate leafListPredicate = identityValue.getPredicates().get(0);
if (!leafListPredicate.isLeafList()) {
logger.info("Predicate's data is not type of leaf-list. It should be in format \".='value'\"");
logger.info("Instance-identifier will be translated as NULL for data - {}",
}
pathArgument = new NodeWithValue(qName, leafListPredicate.getValue());
} else if (node instanceof ListSchemaNode) { // predicates are keys of list
- DataNodeContainer listNode = (DataNodeContainer) node;
- Map<QName, Object> predicatesMap = new HashMap<>();
- for (Predicate predicate : identityValue.getPredicates()) {
+ final DataNodeContainer listNode = (DataNodeContainer) node;
+ final Map<QName, Object> predicatesMap = new HashMap<>();
+ for (final Predicate predicate : identityValue.getPredicates()) {
validNamespace = resolveValidNamespace(predicate.getName().getNamespace(), mountPoint);
- DataSchemaNode listKey = ControllerContext
+ final DataSchemaNode listKey = ControllerContext
.findInstanceDataChildByNameAndNamespace(listNode, predicate.getName().getValue(),
validNamespace);
predicatesMap.put(listKey.getQName(), predicate.getValue());
}
private List<Predicate> keyValuesToPredicateList(final Map<QName, Object> keyValues) {
- List<Predicate> result = new ArrayList<>();
- for (QName qName : keyValues.keySet()) {
- Object value = keyValues.get(qName);
+ final List<Predicate> result = new ArrayList<>();
+ for (final QName qName : keyValues.keySet()) {
+ final Object value = keyValues.get(qName);
result.add(new Predicate(qNameToIdentityValue(qName), String.valueOf(value)));
}
return result;
private IdentityValue qNameToIdentityValue(final QName qName) {
if (qName != null) {
- return new IdentityValue(qName.getNamespace().toString(), qName.getLocalName(), qName.getPrefix());
+ return new IdentityValue(qName.getNamespace().toString(), qName.getLocalName());
}
return null;
}
}
private static Module getModuleByNamespace(final String namespace, final DOMMountPoint mountPoint) {
- URI validNamespace = resolveValidNamespace(namespace, mountPoint);
+ final URI validNamespace = resolveValidNamespace(namespace, mountPoint);
Module module = null;
if (mountPoint != null) {
import org.opendaylight.yangtools.yang.model.api.type.IdentityrefTypeDefinition;
import org.opendaylight.yangtools.yang.model.api.type.LeafrefTypeDefinition;
import org.opendaylight.yangtools.yang.model.util.EmptyType;
+import org.opendaylight.yangtools.yang.model.util.ExtendedType;
import org.opendaylight.yangtools.yang.model.util.SchemaContextUtil;
import org.opendaylight.yangtools.yang.parser.builder.impl.ContainerSchemaNodeBuilder;
import org.opendaylight.yangtools.yang.parser.builder.impl.LeafSchemaNodeBuilder;
}
}
+ private static class TypeDef {
+ public final TypeDefinition<? extends Object> typedef;
+ public final QName qName;
+ TypeDef(final TypeDefinition<? extends Object> typedef, final QName qName) {
+ this.typedef = typedef;
+ this.qName = qName;
+ }
+ }
private final static RestconfImpl INSTANCE = new RestconfImpl();
final DOMMountPoint mountPoint) {
final Object value = simpleNode.getValue();
Object inputValue = value;
- TypeDefinition<? extends Object> typeDefinition = this.typeDefinition(schema);
+ TypeDef typeDef = this.typeDefinition(schema);
+ TypeDefinition<? extends Object> typeDefinition = typeDef != null ? typeDef.typedef : null;
// For leafrefs, extract the type it is pointing to
if(typeDefinition instanceof LeafrefTypeDefinition) {
- typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? this.controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), schema);
+ if (schema.getQName().equals(typeDef.qName)) {
+ typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? this.controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), schema);
+ } else {
+ typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? this.controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), typeDef.qName);
+ }
}
if (typeDefinition instanceof IdentityrefTypeDefinition) {
}
}
- private TypeDefinition<? extends Object> _typeDefinition(final LeafSchemaNode node) {
- TypeDefinition<?> baseType = node.getType();
+ private TypeDef typeDefinition(final TypeDefinition<?> type, final QName nodeQName) {
+ TypeDefinition<?> baseType = type;
+ QName qName = nodeQName;
while (baseType.getBaseType() != null) {
+ if (baseType instanceof ExtendedType) {
+ qName = baseType.getQName();
+ }
baseType = baseType.getBaseType();
}
- return baseType;
- }
-
- private TypeDefinition<? extends Object> typeDefinition(final LeafListSchemaNode node) {
- TypeDefinition<?> baseType = node.getType();
- while (baseType.getBaseType() != null) {
- baseType = baseType.getBaseType();
- }
+ return new TypeDef(baseType, qName);
- return baseType;
}
- private TypeDefinition<? extends Object> typeDefinition(final DataSchemaNode node) {
+ private TypeDef typeDefinition(final DataSchemaNode node) {
if (node instanceof LeafListSchemaNode) {
- return typeDefinition((LeafListSchemaNode) node);
+ return typeDefinition(((LeafListSchemaNode)node).getType(), node.getQName());
} else if (node instanceof LeafSchemaNode) {
- return _typeDefinition((LeafSchemaNode) node);
+ return typeDefinition(((LeafSchemaNode)node).getType(), node.getQName());
} else if (node instanceof AnyXmlSchemaNode) {
return null;
} else {
}
@Override
- public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
+ public void onDataChanged(final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
// TODO Auto-generated method stub
if (!change.getCreatedData().isEmpty() || !change.getUpdatedData().isEmpty()
|| !change.getRemovedPaths().isEmpty()) {
- String xml = prepareXmlFrom(change);
- Event event = new Event(EventType.NOTIFY);
+ final String xml = prepareXmlFrom(change);
+ final Event event = new Event(EventType.NOTIFY);
event.setData(xml);
eventBus.post(event);
}
@Subscribe
public void recordCustomerChange(final Event event) {
if (event.getType() == EventType.REGISTER) {
- Channel subscriber = event.getSubscriber();
+ final Channel subscriber = event.getSubscriber();
if (!subscribers.contains(subscriber)) {
subscribers.add(subscriber);
}
subscribers.remove(event.getSubscriber());
Notificator.removeListenerIfNoSubscriberExists(ListenerAdapter.this);
} else if (event.getType() == EventType.NOTIFY) {
- for (Channel subscriber : subscribers) {
+ for (final Channel subscriber : subscribers) {
if (subscriber.isActive()) {
LOG.debug("Data are sent to subscriber {}:", subscriber.remoteAddress());
subscriber.writeAndFlush(new TextWebSocketFrame(event.getData()));
* DataChangeEvent
* @return Data in printable form.
*/
- private String prepareXmlFrom(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- Document doc = createDocument();
- Element notificationElement = doc.createElementNS("urn:ietf:params:xml:ns:netconf:notification:1.0",
+ private String prepareXmlFrom(final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
+ final Document doc = createDocument();
+ final Element notificationElement = doc.createElementNS("urn:ietf:params:xml:ns:netconf:notification:1.0",
"notification");
doc.appendChild(notificationElement);
- Element eventTimeElement = doc.createElement("eventTime");
+ final Element eventTimeElement = doc.createElement("eventTime");
eventTimeElement.setTextContent(toRFC3339(new Date()));
notificationElement.appendChild(eventTimeElement);
- Element dataChangedNotificationEventElement = doc.createElementNS(
+ final Element dataChangedNotificationEventElement = doc.createElementNS(
"urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote", "data-changed-notification");
addValuesToDataChangedNotificationEventElement(doc, dataChangedNotificationEventElement, change);
notificationElement.appendChild(dataChangedNotificationEventElement);
try {
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- Transformer transformer = FACTORY.newTransformer();
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ final Transformer transformer = FACTORY.newTransformer();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "no");
transformer.setOutputProperty(OutputKeys.METHOD, "xml");
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "4");
transformer.transform(new DOMSource(doc), new StreamResult(new OutputStreamWriter(out, Charsets.UTF_8)));
- byte[] charData = out.toByteArray();
+ final byte[] charData = out.toByteArray();
return new String(charData, "UTF-8");
} catch (TransformerException | UnsupportedEncodingException e) {
- String msg = "Error during transformation of Document into String";
+ final String msg = "Error during transformation of Document into String";
LOG.error(msg, e);
return msg;
}
final DocumentBuilder bob;
try {
bob = DBF.newDocumentBuilder();
- } catch (ParserConfigurationException e) {
+ } catch (final ParserConfigurationException e) {
return null;
}
return bob.newDocument();
*/
private void addValuesToDataChangedNotificationEventElement(final Document doc,
final Element dataChangedNotificationEventElement,
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
+ final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
addValuesFromDataToElement(doc, change.getCreatedData().keySet(), dataChangedNotificationEventElement,
Operation.CREATED);
if (change.getCreatedData().isEmpty()) {
* @param operation
* {@link Operation}
*/
- private void addValuesFromDataToElement(Document doc, Set<YangInstanceIdentifier> data, Element element,
- Operation operation) {
+ private void addValuesFromDataToElement(final Document doc, final Set<YangInstanceIdentifier> data, final Element element,
+ final Operation operation) {
if (data == null || data.isEmpty()) {
return;
}
- for (YangInstanceIdentifier path : data) {
- Node node = createDataChangeEventElement(doc, path, null, operation);
+ for (final YangInstanceIdentifier path : data) {
+ final Node node = createDataChangeEventElement(doc, path, null, operation);
element.appendChild(node);
}
}
* @param operation
* {@link Operation}
*/
- private void addValuesFromDataToElement(Document doc, Map<YangInstanceIdentifier, CompositeNode> data, Element element,
- Operation operation) {
+ private void addValuesFromDataToElement(final Document doc, final Map<YangInstanceIdentifier, CompositeNode> data, final Element element,
+ final Operation operation) {
if (data == null || data.isEmpty()) {
return;
}
- for (Entry<YangInstanceIdentifier, CompositeNode> entry : data.entrySet()) {
- Node node = createDataChangeEventElement(doc, entry.getKey(), entry.getValue(), operation);
+ for (final Entry<YangInstanceIdentifier, CompositeNode> entry : data.entrySet()) {
+ final Node node = createDataChangeEventElement(doc, entry.getKey(), entry.getValue(), operation);
element.appendChild(node);
}
}
* {@link Operation}
* @return {@link Node} node represented by changed event element.
*/
- private Node createDataChangeEventElement(Document doc, YangInstanceIdentifier path, CompositeNode data,
- Operation operation) {
- Element dataChangeEventElement = doc.createElement("data-change-event");
-
- Element pathElement = doc.createElement("path");
+ private Node createDataChangeEventElement(final Document doc, final YangInstanceIdentifier path, final CompositeNode data,
+ final Operation operation) {
+ final Element dataChangeEventElement = doc.createElement("data-change-event");
+ final Element pathElement = doc.createElement("path");
addPathAsValueToElement(path, pathElement);
dataChangeEventElement.appendChild(pathElement);
// storeElement.setTextContent(store.value);
// dataChangeEventElement.appendChild(storeElement);
- Element operationElement = doc.createElement("operation");
+ final Element operationElement = doc.createElement("operation");
operationElement.setTextContent(operation.value);
dataChangeEventElement.appendChild(operationElement);
if (data != null) {
- Element dataElement = doc.createElement("data");
- Node dataAnyXml = translateToXml(path, data);
- Node adoptedNode = doc.adoptNode(dataAnyXml);
+ final Element dataElement = doc.createElement("data");
+ final Node dataAnyXml = translateToXml(path, data);
+ final Node adoptedNode = doc.adoptNode(dataAnyXml);
dataElement.appendChild(adoptedNode);
dataChangeEventElement.appendChild(dataElement);
}
* @return Data in XML format.
*/
private Node translateToXml(final YangInstanceIdentifier path, final CompositeNode data) {
- DataNodeContainer schemaNode = ControllerContext.getInstance().getDataNodeContainerFor(path);
+ final DataNodeContainer schemaNode = ControllerContext.getInstance().getDataNodeContainerFor(path);
if (schemaNode == null) {
LOG.info(
"Path '{}' contains node with unsupported type (supported type is Container or List) or some node was not found.",
return null;
}
try {
- Document xml = xmlMapper.write(data, schemaNode);
+ final Document xml = xmlMapper.write(data, schemaNode);
return xml.getFirstChild();
- } catch (UnsupportedDataTypeException e) {
+ } catch (final UnsupportedDataTypeException e) {
LOG.error("Error occured during translation of notification to XML.", e);
return null;
}
*/
private void addPathAsValueToElement(final YangInstanceIdentifier path, final Element element) {
// Map< key = namespace, value = prefix>
- Map<String, String> prefixes = new HashMap<>();
- YangInstanceIdentifier instanceIdentifier = path;
- StringBuilder textContent = new StringBuilder();
+ final Map<String, String> prefixes = new HashMap<>();
+ final YangInstanceIdentifier instanceIdentifier = path;
+ final StringBuilder textContent = new StringBuilder();
// FIXME: BUG-1281: this is duplicated code from yangtools (BUG-1275)
- for (PathArgument pathArgument : instanceIdentifier.getPathArguments()) {
+ for (final PathArgument pathArgument : instanceIdentifier.getPathArguments()) {
textContent.append("/");
writeIdentifierWithNamespacePrefix(element, textContent, pathArgument.getNodeType(), prefixes);
if (pathArgument instanceof NodeIdentifierWithPredicates) {
- Map<QName, Object> predicates = ((NodeIdentifierWithPredicates) pathArgument).getKeyValues();
- for (QName keyValue : predicates.keySet()) {
- String predicateValue = String.valueOf(predicates.get(keyValue));
+ final Map<QName, Object> predicates = ((NodeIdentifierWithPredicates) pathArgument).getKeyValues();
+ for (final QName keyValue : predicates.keySet()) {
+ final String predicateValue = String.valueOf(predicates.get(keyValue));
textContent.append("[");
writeIdentifierWithNamespacePrefix(element, textContent, keyValue, prefixes);
textContent.append("='");
*/
private static void writeIdentifierWithNamespacePrefix(final Element element, final StringBuilder textContent,
final QName qName, final Map<String, String> prefixes) {
- String namespace = qName.getNamespace().toString();
+ final String namespace = qName.getNamespace().toString();
String prefix = prefixes.get(namespace);
if (prefix == null) {
- prefix = qName.getPrefix();
- if (prefix == null || prefix.isEmpty() || prefixes.containsValue(prefix)) {
- prefix = generateNewPrefix(prefixes.values());
- }
+ prefix = generateNewPrefix(prefixes.values());
}
element.setAttribute("xmlns:" + prefix, namespace);
*/
private static String generateNewPrefix(final Collection<String> prefixes) {
StringBuilder result = null;
- Random random = new Random();
+ final Random random = new Random();
do {
result = new StringBuilder();
for (int i = 0; i < 4; i++) {
- int randomNumber = 0x61 + (Math.abs(random.nextInt()) % 26);
+ final int randomNumber = 0x61 + (Math.abs(random.nextInt()) % 26);
result.append(Character.toChars(randomNumber));
}
} while (prefixes.contains(result.toString()));
if (!subscriber.isActive()) {
LOG.debug("Channel is not active between websocket server and subscriber {}" + subscriber.remoteAddress());
}
- Event event = new Event(EventType.REGISTER);
+ final Event event = new Event(EventType.REGISTER);
event.setSubscriber(subscriber);
eventBus.post(event);
}
*/
public void removeSubscriber(final Channel subscriber) {
LOG.debug("Subscriber {} is removed.", subscriber.remoteAddress());
- Event event = new Event(EventType.DEREGISTER);
+ final Event event = new Event(EventType.DEREGISTER);
event.setSubscriber(subscriber);
eventBus.post(event);
}
@Test
public void snAsYangIdentityrefWithQNamePrefixToXMLTest() {
serializeToXml(prepareIdentityrefData("prefix", true),
- "<lf11 xmlns:prefix=\"referenced:module\">prefix:iden</lf11>");
+ "<lf11 xmlns","=\"referenced:module\">",":iden</lf11>");
}
@Test
@Test
public void snAsYangInt8ToXmlTest() {
- String elName = "lfInt8";
+ final String elName = "lfInt8";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Int8.getInstance()).deserialize("14"), elName),
"<" + elName + ">14</" + elName + ">");
@Test
public void snAsYangInt16ToXmlTest() {
- String elName = "lfInt16";
+ final String elName = "lfInt16";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Int16.getInstance()).deserialize("3000"),
elName), "<" + elName + ">3000</" + elName + ">");
@Test
public void snAsYangInt32ToXmlTest() {
- String elName = "lfInt32";
+ final String elName = "lfInt32";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Int32.getInstance()).deserialize("201234"),
elName), "<" + elName + ">201234</" + elName + ">");
@Test
public void snAsYangInt64ToXmlTest() {
- String elName = "lfInt64";
+ final String elName = "lfInt64";
serializeToXml(
prepareCnStructForYangData(
TypeDefinitionAwareCodec.from(Int64.getInstance()).deserialize("5123456789"), elName), "<"
@Test
public void snAsYangUint8ToXmlTest() {
- String elName = "lfUint8";
+ final String elName = "lfUint8";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Uint8.getInstance()).deserialize("200"),
elName), "<" + elName + ">200</" + elName + ">");
@Test
public void snAsYangUint16ToXmlTest() {
- String elName = "lfUint16";
+ final String elName = "lfUint16";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Uint16.getInstance()).deserialize("4000"),
elName), "<" + elName + ">4000</" + elName + ">");
@Test
public void snAsYangUint32ToXmlTest() {
- String elName = "lfUint32";
+ final String elName = "lfUint32";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Uint32.getInstance())
.deserialize("4123456789"), elName), "<" + elName + ">4123456789</" + elName + ">");
@Test
public void snAsYangUint64ToXmlTest() {
- String elName = "lfUint64";
+ final String elName = "lfUint64";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(Uint64.getInstance())
.deserialize("5123456789"), elName), "<" + elName + ">5123456789</" + elName + ">");
@Test
public void snAsYangBinaryToXmlTest() {
- String elName = "lfBinary";
+ final String elName = "lfBinary";
serializeToXml(
prepareCnStructForYangData(
TypeDefinitionAwareCodec.from(BinaryType.getInstance()).deserialize(
@Test
public void snAsYangBitsToXmlTest() {
- BitsTypeDefinition.Bit mockBit1 = mock(BitsTypeDefinition.Bit.class);
+ final BitsTypeDefinition.Bit mockBit1 = mock(BitsTypeDefinition.Bit.class);
when(mockBit1.getName()).thenReturn("one");
- BitsTypeDefinition.Bit mockBit2 = mock(BitsTypeDefinition.Bit.class);
+ final BitsTypeDefinition.Bit mockBit2 = mock(BitsTypeDefinition.Bit.class);
when(mockBit2.getName()).thenReturn("two");
- List<BitsTypeDefinition.Bit> bitList = Lists.newArrayList(mockBit1, mockBit2);
+ final List<BitsTypeDefinition.Bit> bitList = Lists.newArrayList(mockBit1, mockBit2);
- String elName = "lfBits";
+ final String elName = "lfBits";
serializeToXml(
prepareCnStructForYangData(
TypeDefinitionAwareCodec.from(BitsType.create(mock(SchemaPath.class), bitList)).deserialize(
@Test
public void snAsYangEnumerationToXmlTest() {
- EnumTypeDefinition.EnumPair mockEnum = mock(EnumTypeDefinition.EnumPair.class);
+ final EnumTypeDefinition.EnumPair mockEnum = mock(EnumTypeDefinition.EnumPair.class);
when(mockEnum.getName()).thenReturn("enum2");
- List<EnumPair> enumList = Lists.newArrayList(mockEnum);
+ final List<EnumPair> enumList = Lists.newArrayList(mockEnum);
- String elName = "lfEnumeration";
+ final String elName = "lfEnumeration";
serializeToXml(
prepareCnStructForYangData(
TypeDefinitionAwareCodec.from(
@Test
public void snAsYangEmptyToXmlTest() {
- String elName = "lfEmpty";
+ final String elName = "lfEmpty";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(EmptyType.getInstance()).deserialize(null),
elName), "<" + elName + "/>");
@Test
public void snAsYangBooleanToXmlTest() {
- String elName = "lfBoolean";
+ final String elName = "lfBoolean";
serializeToXml(
prepareCnStructForYangData(TypeDefinitionAwareCodec.from(BooleanType.getInstance()).deserialize("str"),
elName), "<" + elName + ">false</" + elName + ">");
@Test
public void snAsYangUnionToXmlTest() {
- BitsTypeDefinition.Bit mockBit1 = mock(BitsTypeDefinition.Bit.class);
+ final BitsTypeDefinition.Bit mockBit1 = mock(BitsTypeDefinition.Bit.class);
when(mockBit1.getName()).thenReturn("first");
- BitsTypeDefinition.Bit mockBit2 = mock(BitsTypeDefinition.Bit.class);
+ final BitsTypeDefinition.Bit mockBit2 = mock(BitsTypeDefinition.Bit.class);
when(mockBit2.getName()).thenReturn("second");
- List<BitsTypeDefinition.Bit> bitList = Lists.newArrayList(mockBit1, mockBit2);
+ final List<BitsTypeDefinition.Bit> bitList = Lists.newArrayList(mockBit1, mockBit2);
- List<TypeDefinition<?>> types = Lists.<TypeDefinition<?>> newArrayList(Int8.getInstance(),
+ final List<TypeDefinition<?>> types = Lists.<TypeDefinition<?>> newArrayList(Int8.getInstance(),
BitsType.create(mock(SchemaPath.class), bitList), BooleanType.getInstance());
- UnionType unionType = UnionType.create(types);
+ final UnionType unionType = UnionType.create(types);
- String elName = "lfUnion";
- String int8 = "15";
+ final String elName = "lfUnion";
+ final String int8 = "15";
serializeToXml(prepareCnStructForYangData(TypeDefinitionAwareCodec.from(unionType).deserialize(int8), elName),
"<" + elName + ">15</" + elName + ">");
- String bits = "first second";
+ final String bits = "first second";
serializeToXml(prepareCnStructForYangData(TypeDefinitionAwareCodec.from(unionType).deserialize(bits), elName),
"<" + elName + ">first second</" + elName + ">");
- String bool = "str";
+ final String bool = "str";
serializeToXml(prepareCnStructForYangData(TypeDefinitionAwareCodec.from(unionType).deserialize(bool), elName),
"<" + elName + ">str</" + elName + ">");
}
assertNotNull(xmlString);
boolean containSearchedStr = false;
String strRepresentation = "";
- for (String searchedStr : xmlRepresentation) {
+ for (final String searchedStr : xmlRepresentation) {
if (xmlString.contains(searchedStr)) {
containSearchedStr = true;
break;
}
private CompositeNode prepareIdentityrefData(final String prefix, final boolean valueAsQName) {
- MutableCompositeNode cont = NodeFactory.createMutableCompositeNode(
+ final MutableCompositeNode cont = NodeFactory.createMutableCompositeNode(
TestUtils.buildQName("cont", "basic:module", "2013-12-2"), null, null, ModifyAction.CREATE, null);
- MutableCompositeNode cont1 = NodeFactory.createMutableCompositeNode(
+ final MutableCompositeNode cont1 = NodeFactory.createMutableCompositeNode(
TestUtils.buildQName("cont1", "basic:module", "2013-12-2"), cont, null, ModifyAction.CREATE, null);
cont.getValue().add(cont1);
} else {
value = "no qname value";
}
- MutableSimpleNode<Object> lf11 = NodeFactory.createMutableSimpleNode(
+ final MutableSimpleNode<Object> lf11 = NodeFactory.createMutableSimpleNode(
TestUtils.buildQName("lf11", "basic:module", "2013-12-2"), cont1, value, ModifyAction.CREATE, null);
cont1.getValue().add(lf11);
cont1.init();
}
private CompositeNode prepareCnStructForYangData(final Object data, final String leafName) {
- MutableCompositeNode cont = NodeFactory.createMutableCompositeNode(
+ final MutableCompositeNode cont = NodeFactory.createMutableCompositeNode(
TestUtils.buildQName("cont", "basic:module", "2013-12-2"), null, null, ModifyAction.CREATE, null);
- MutableSimpleNode<Object> lf1 = NodeFactory.createMutableSimpleNode(
+ final MutableSimpleNode<Object> lf1 = NodeFactory.createMutableSimpleNode(
TestUtils.buildQName(leafName, "basic:module", "2013-12-2"), cont, data, ModifyAction.CREATE, null);
cont.getValue().add(lf1);
cont.init();
}
private CompositeNode prepareLeafrefData() {
- MutableCompositeNode cont = NodeFactory.createMutableCompositeNode(TestUtils.buildQName("cont"), null, null,
+ final MutableCompositeNode cont = NodeFactory.createMutableCompositeNode(TestUtils.buildQName("cont"), null, null,
ModifyAction.CREATE, null);
- MutableSimpleNode<Object> lfBoolean = NodeFactory.createMutableSimpleNode(TestUtils.buildQName("lfBoolean"),
+ final MutableSimpleNode<Object> lfBoolean = NodeFactory.createMutableSimpleNode(TestUtils.buildQName("lfBoolean"),
cont, Boolean.TRUE, ModifyAction.CREATE, null);
- MutableSimpleNode<Object> lfLfref = NodeFactory.createMutableSimpleNode(TestUtils.buildQName("lfLfref"), cont,
+ final MutableSimpleNode<Object> lfLfref = NodeFactory.createMutableSimpleNode(TestUtils.buildQName("lfLfref"), cont,
"true", ModifyAction.CREATE, null);
cont.getValue().add(lfBoolean);
cont.getValue().add(lfLfref);
@BeforeClass
public static void initialize() {
- dataLoad("/json-to-cnsn/leafref");
+ dataLoad("/json-to-cnsn/leafref",2,"leafref-module","cont");
}
/**
private final static YangContextParser parser = new YangParserImpl();
- private static Set<Module> loadModules(String resourceDirectory) throws FileNotFoundException {
+ private static Set<Module> loadModules(final String resourceDirectory) throws FileNotFoundException {
final File testDir = new File(resourceDirectory);
final String[] fileList = testDir.list();
final List<File> testFiles = new ArrayList<File>();
throw new FileNotFoundException(resourceDirectory);
}
for (int i = 0; i < fileList.length; i++) {
- String fileName = fileList[i];
+ final String fileName = fileList[i];
if (new File(testDir, fileName).isDirectory() == false) {
testFiles.add(new File(testDir, fileName));
}
return parser.parseYangModels(testFiles);
}
- public static Set<Module> loadModulesFrom(String yangPath) {
+ public static Set<Module> loadModulesFrom(final String yangPath) {
try {
return TestUtils.loadModules(TestUtils.class.getResource(yangPath).getPath());
- } catch (FileNotFoundException e) {
+ } catch (final FileNotFoundException e) {
LOG.error("Yang files at path: " + yangPath + " weren't loaded.");
}
return null;
}
- public static SchemaContext loadSchemaContext(Set<Module> modules) {
+ public static SchemaContext loadSchemaContext(final Set<Module> modules) {
return parser.resolveSchemaContext(modules);
}
- public static SchemaContext loadSchemaContext(String resourceDirectory) throws FileNotFoundException {
+ public static SchemaContext loadSchemaContext(final String resourceDirectory) throws FileNotFoundException {
return parser.resolveSchemaContext(loadModulesFrom(resourceDirectory));
}
- public static Module findModule(Set<Module> modules, String moduleName) {
- for (Module module : modules) {
+ public static Module findModule(final Set<Module> modules, final String moduleName) {
+ for (final Module module : modules) {
if (module.getName().equals(moduleName)) {
return module;
}
return null;
}
- public static Document loadDocumentFrom(InputStream inputStream) {
+ public static Document loadDocumentFrom(final InputStream inputStream) {
try {
- DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance();
- DocumentBuilder docBuilder = dbfac.newDocumentBuilder();
+ final DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance();
+ final DocumentBuilder docBuilder = dbfac.newDocumentBuilder();
return docBuilder.parse(inputStream);
} catch (SAXException | IOException | ParserConfigurationException e) {
LOG.error("Error during loading Document from XML", e);
}
}
- public static String getDocumentInPrintableForm(Document doc) {
+ public static String getDocumentInPrintableForm(final Document doc) {
Preconditions.checkNotNull(doc);
try {
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- TransformerFactory tf = TransformerFactory.newInstance();
- Transformer transformer = tf.newTransformer();
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ final TransformerFactory tf = TransformerFactory.newInstance();
+ final Transformer transformer = tf.newTransformer();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "no");
transformer.setOutputProperty(OutputKeys.METHOD, "xml");
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "4");
transformer.transform(new DOMSource(doc), new StreamResult(new OutputStreamWriter(out, "UTF-8")));
- byte[] charData = out.toByteArray();
+ final byte[] charData = out.toByteArray();
return new String(charData, "UTF-8");
} catch (IOException | TransformerException e) {
- String msg = "Error during transformation of Document into String";
+ final String msg = "Error during transformation of Document into String";
LOG.error(msg, e);
return msg;
}
* {@code dataSchemaNode}. The method {@link RestconfImpl#createConfigurationData createConfigurationData} is used
* because it contains calling of method {code normalizeNode}
*/
- public static void normalizeCompositeNode(Node<?> node, Set<Module> modules, String schemaNodePath) {
- RestconfImpl restconf = RestconfImpl.getInstance();
+ public static void normalizeCompositeNode(final Node<?> node, final Set<Module> modules, final String schemaNodePath) {
+ final RestconfImpl restconf = RestconfImpl.getInstance();
ControllerContext.getInstance().setSchemas(TestUtils.loadSchemaContext(modules));
prepareMocksForRestconf(modules, restconf);
* module set has only one element then this element is returned.
*
*/
- public static Module resolveModule(String searchedModuleName, Set<Module> modules) {
+ public static Module resolveModule(final String searchedModuleName, final Set<Module> modules) {
assertNotNull("Modules can't be null.", modules);
if (searchedModuleName != null) {
- for (Module m : modules) {
+ for (final Module m : modules) {
if (m.getName().equals(searchedModuleName)) {
return m;
}
return null;
}
- public static DataSchemaNode resolveDataSchemaNode(String searchedDataSchemaName, Module module) {
+ public static DataSchemaNode resolveDataSchemaNode(final String searchedDataSchemaName, final Module module) {
assertNotNull("Module can't be null", module);
if (searchedDataSchemaName != null) {
- for (DataSchemaNode dsn : module.getChildNodes()) {
+ for (final DataSchemaNode dsn : module.getChildNodes()) {
if (dsn.getQName().getLocalName().equals(searchedDataSchemaName)) {
return dsn;
}
return null;
}
- public static QName buildQName(String name, String uri, String date, String prefix) {
+ public static QName buildQName(final String name, final String uri, final String date, final String prefix) {
try {
- URI u = new URI(uri);
+ final URI u = new URI(uri);
Date dt = null;
if (date != null) {
dt = Date.valueOf(date);
}
- return new QName(u, dt, prefix, name);
- } catch (URISyntaxException e) {
+ return QName.create(u, dt, name);
+ } catch (final URISyntaxException e) {
return null;
}
}
- public static QName buildQName(String name, String uri, String date) {
+ public static QName buildQName(final String name, final String uri, final String date) {
return buildQName(name, uri, date, null);
}
- public static QName buildQName(String name) {
+ public static QName buildQName(final String name) {
return buildQName(name, "", null);
}
- private static void addDummyNamespaceToAllNodes(NodeWrapper<?> wrappedNode) throws URISyntaxException {
+ private static void addDummyNamespaceToAllNodes(final NodeWrapper<?> wrappedNode) throws URISyntaxException {
wrappedNode.setNamespace(new URI(""));
if (wrappedNode instanceof CompositeNodeWrapper) {
- for (NodeWrapper<?> childNodeWrapper : ((CompositeNodeWrapper) wrappedNode).getValues()) {
+ for (final NodeWrapper<?> childNodeWrapper : ((CompositeNodeWrapper) wrappedNode).getValues()) {
addDummyNamespaceToAllNodes(childNodeWrapper);
}
}
}
- private static void prepareMocksForRestconf(Set<Module> modules, RestconfImpl restconf) {
- ControllerContext controllerContext = ControllerContext.getInstance();
- BrokerFacade mockedBrokerFacade = mock(BrokerFacade.class);
+ private static void prepareMocksForRestconf(final Set<Module> modules, final RestconfImpl restconf) {
+ final ControllerContext controllerContext = ControllerContext.getInstance();
+ final BrokerFacade mockedBrokerFacade = mock(BrokerFacade.class);
controllerContext.setSchemas(TestUtils.loadSchemaContext(modules));
restconf.setBroker(mockedBrokerFacade);
}
- public static Node<?> readInputToCnSn(String path, boolean dummyNamespaces,
- MessageBodyReader<Node<?>> reader) throws WebApplicationException {
+ public static Node<?> readInputToCnSn(final String path, final boolean dummyNamespaces,
+ final MessageBodyReader<Node<?>> reader) throws WebApplicationException {
- InputStream inputStream = TestUtils.class.getResourceAsStream(path);
+ final InputStream inputStream = TestUtils.class.getResourceAsStream(path);
try {
final Node<?> node = reader.readFrom(null, null, null, null, null, inputStream);
assertTrue(node instanceof CompositeNodeWrapper);
try {
TestUtils.addDummyNamespaceToAllNodes((CompositeNodeWrapper) node);
return ((CompositeNodeWrapper) node).unwrap();
- } catch (URISyntaxException e) {
+ } catch (final URISyntaxException e) {
LOG.error(e.getMessage());
assertTrue(e.getMessage(), false);
}
}
return node;
- } catch (IOException e) {
+ } catch (final IOException e) {
LOG.error(e.getMessage());
assertTrue(e.getMessage(), false);
}
// return null;
// }
- public static Node<?> readInputToCnSn(String path, MessageBodyReader<Node<?>> reader) {
+ public static Node<?> readInputToCnSn(final String path, final MessageBodyReader<Node<?>> reader) {
return readInputToCnSn(path, false, reader);
}
- public static String writeCompNodeWithSchemaContextToOutput(Node<?> node, Set<Module> modules,
- DataSchemaNode dataSchemaNode, MessageBodyWriter<StructuredData> messageBodyWriter) throws IOException,
+ public static String writeCompNodeWithSchemaContextToOutput(final Node<?> node, final Set<Module> modules,
+ final DataSchemaNode dataSchemaNode, final MessageBodyWriter<StructuredData> messageBodyWriter) throws IOException,
WebApplicationException {
assertNotNull(dataSchemaNode);
assertNotNull("Composite node can't be null", node);
- ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream();
+ final ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream();
ControllerContext.getInstance().setSchemas(loadSchemaContext(modules));
return byteArrayOS.toString();
}
- public static String loadTextFile(String filePath) throws IOException {
- FileReader fileReader = new FileReader(filePath);
- BufferedReader bufReader = new BufferedReader(fileReader);
+ public static String loadTextFile(final String filePath) throws IOException {
+ final FileReader fileReader = new FileReader(filePath);
+ final BufferedReader bufReader = new BufferedReader(fileReader);
String line = null;
- StringBuilder result = new StringBuilder();
+ final StringBuilder result = new StringBuilder();
while ((line = bufReader.readLine()) != null) {
result.append(line);
}
return result.toString();
}
- private static Pattern patternForStringsSeparatedByWhiteChars(String... substrings) {
- StringBuilder pattern = new StringBuilder();
+ private static Pattern patternForStringsSeparatedByWhiteChars(final String... substrings) {
+ final StringBuilder pattern = new StringBuilder();
pattern.append(".*");
- for (String substring : substrings) {
+ for (final String substring : substrings) {
pattern.append(substring);
pattern.append("\\s*");
}
return Pattern.compile(pattern.toString(), Pattern.DOTALL);
}
- public static boolean containsStringData(String jsonOutput, String... substrings) {
- Pattern pattern = patternForStringsSeparatedByWhiteChars(substrings);
- Matcher matcher = pattern.matcher(jsonOutput);
+ public static boolean containsStringData(final String jsonOutput, final String... substrings) {
+ final Pattern pattern = patternForStringsSeparatedByWhiteChars(substrings);
+ final Matcher matcher = pattern.matcher(jsonOutput);
return matcher.matches();
}
public static NormalizedNode compositeNodeToDatastoreNormalizedNode(final CompositeNode compositeNode,
final DataSchemaNode schema) {
- List<Node<?>> lst = new ArrayList<Node<?>>();
+ final List<Node<?>> lst = new ArrayList<Node<?>>();
lst.add(compositeNode);
if (schema instanceof ContainerSchemaNode) {
return CnSnToNormalizedNodeParserFactory.getInstance().getContainerNodeParser()
"It wasn't possible to translate specified data to datastore readable form."));
}
- public static YangInstanceIdentifier.NodeIdentifier getNodeIdentifier(String localName, String namespace,
- String revision) throws ParseException {
+ public static YangInstanceIdentifier.NodeIdentifier getNodeIdentifier(final String localName, final String namespace,
+ final String revision) throws ParseException {
return new YangInstanceIdentifier.NodeIdentifier(QName.create(namespace, revision, localName));
}
- public static YangInstanceIdentifier.NodeIdentifierWithPredicates getNodeIdentifierPredicate(String localName,
- String namespace, String revision, Map<String, Object> keys) throws ParseException {
- Map<QName, Object> predicate = new HashMap<>();
- for (String key : keys.keySet()) {
+ public static YangInstanceIdentifier.NodeIdentifierWithPredicates getNodeIdentifierPredicate(final String localName,
+ final String namespace, final String revision, final Map<String, Object> keys) throws ParseException {
+ final Map<QName, Object> predicate = new HashMap<>();
+ for (final String key : keys.keySet()) {
predicate.put(QName.create(namespace, revision, key), keys.get(key));
}
QName.create(namespace, revision, localName), predicate);
}
- public static YangInstanceIdentifier.NodeIdentifierWithPredicates getNodeIdentifierPredicate(String localName,
- String namespace, String revision, String... keysAndValues) throws ParseException {
- java.util.Date date = new SimpleDateFormat("yyyy-MM-dd").parse(revision);
+ public static YangInstanceIdentifier.NodeIdentifierWithPredicates getNodeIdentifierPredicate(final String localName,
+ final String namespace, final String revision, final String... keysAndValues) throws ParseException {
+ final java.util.Date date = new SimpleDateFormat("yyyy-MM-dd").parse(revision);
if (keysAndValues.length % 2 != 0) {
new IllegalArgumentException("number of keys argument have to be divisible by 2 (map)");
}
- Map<QName, Object> predicate = new HashMap<>();
+ final Map<QName, Object> predicate = new HashMap<>();
int i = 0;
while (i < keysAndValues.length) {
}
public static CompositeNode prepareCompositeNodeWithIetfInterfacesInterfacesData() {
- CompositeNodeBuilder<ImmutableCompositeNode> interfaceBuilder = ImmutableCompositeNode.builder();
+ final CompositeNodeBuilder<ImmutableCompositeNode> interfaceBuilder = ImmutableCompositeNode.builder();
interfaceBuilder.addLeaf(buildQName("name", "dummy", "2014-07-29"), "eth0");
interfaceBuilder.addLeaf(buildQName("type", "dummy", "2014-07-29"), "ethernetCsmacd");
interfaceBuilder.addLeaf(buildQName("enabled", "dummy", "2014-07-29"), "false");
}
static NormalizedNode<?,?> prepareNormalizedNodeWithIetfInterfacesInterfacesData() throws ParseException {
- String ietfInterfacesDate = "2013-07-04";
- String namespace = "urn:ietf:params:xml:ns:yang:ietf-interfaces";
- DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode> mapEntryNode = ImmutableMapEntryNodeBuilder.create();
+ final String ietfInterfacesDate = "2013-07-04";
+ final String namespace = "urn:ietf:params:xml:ns:yang:ietf-interfaces";
+ final DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode> mapEntryNode = ImmutableMapEntryNodeBuilder.create();
- Map<String, Object> predicates = new HashMap<>();
+ final Map<String, Object> predicates = new HashMap<>();
predicates.put("name", "eth0");
mapEntryNode.withNodeIdentifier(getNodeIdentifierPredicate("interface", namespace, ietfInterfacesDate,
--- /dev/null
+module augment-leafref-module {
+ namespace "augment:leafref:module";
+
+ prefix "auglfrfmo";
+ revision 2014-12-16 {
+ }
+
+
+ typedef leafreftype {
+ type leafref {
+ path "/cont/lf3";
+
+ }
+ }
+
+ container cont {
+ leaf lf3 {
+ type string;
+ }
+ }
+}
\ No newline at end of file
{
"cont":{
"lf1":121,
- "lf2":121
+ "lf2":121,
+ "lf4":"pcc://39.39.39.39"
}
}
\ No newline at end of file
namespace "leafref:module";
prefix "lfrfmo";
+
+ import augment-leafref-module {prefix augleafref; revision-date 2014-12-16;}
revision 2013-11-18 {
}
path "/cont/lf1";
}
}
+ leaf lf4 {
+ type augleafref:leafreftype;
+ }
}
}
\ No newline at end of file
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
* Internal {@link TransactionChainListener} joining all DS commits
* to Set of chained changes for prevent often DataStore touches.
*/
- public interface StatDataStoreOperation {
+ public abstract class StatDataStoreOperation {
+ public enum StatsManagerOperationType {
+ /**
+ * Operation will carry out work related to new node addition /
+ * update
+ */
+ NODE_UPDATE,
+ /**
+ * Operation will carry out work related to node removal
+ */
+ NODE_REMOVAL,
+ /**
+ * Operation will commit data to the operational data store
+ */
+ DATA_COMMIT_OPER_DS
+ }
+
+ private NodeId nodeId;
+ private StatsManagerOperationType operationType = StatsManagerOperationType.DATA_COMMIT_OPER_DS;
+
+ public StatDataStoreOperation(final StatsManagerOperationType operType, final NodeId id){
+ if(operType != null){
+ operationType = operType;
+ }
+ nodeId = id;
+ }
+
+ public final StatsManagerOperationType getType() {
+ return operationType;
+ }
+
+ public final NodeId getNodeId(){
+ return nodeId;
+ }
/**
- * Apply all read / write (put|merge) operation
- * for DataStore
+ * Apply all read / write (put|merge) operation for DataStore
+ *
* @param {@link ReadWriteTransaction} tx
*/
- void applyOperation(ReadWriteTransaction tx);
+ public abstract void applyOperation(ReadWriteTransaction tx);
}
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.md.statistics.manager.impl.helper.FlowComparator;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
return;
}
/* check flow Capable Node and write statistics */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
return;
}
/* add flow's statistics */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
/* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent, transId);
}
+
});
}
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
/* Get and Validate TransactionCacheContainer */
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
/* Get and Validate TransactionCacheContainer */
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FeatureCapability;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
Preconditions.checkNotNull(data, "SwitchFeatures data for {} can not be null!", keyIdent);
Preconditions.checkArgument(( ! keyIdent.isWildcarded()), "InstanceIdentifier is WildCarded!");
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.NODE_UPDATE,nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId()) {
+
@Override
public void applyOperation(final ReadWriteTransaction tx) {
Preconditions.checkArgument(nodeIdent != null, "InstanceIdentifier can not be NULL!");
Preconditions.checkArgument(( ! nodeIdent.isWildcarded()),
"InstanceIdentifier {} is WildCarded!", nodeIdent);
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.NODE_REMOVAL,nodeIdent.firstKeyOf(Node.class, NodeKey.class).getId()) {
+
@Override
public void applyOperation(final ReadWriteTransaction tx) {
manager.disconnectedNodeUnregistration(nodeIdent);
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
.child(Node.class, new NodeKey(nodeId));
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction trans) {
final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
return;
}
/* Don't block RPC Notification thread */
- manager.enqueue(new StatDataStoreOperation() {
+ manager.enqueue(new StatDataStoreOperation(StatsManagerOperationType.DATA_COMMIT_OPER_DS,nodeId) {
@Override
public void applyOperation(final ReadWriteTransaction trans) {
final List<FlowTableAndStatisticsMap> tableStats = new ArrayList<FlowTableAndStatisticsMap>(10);
package org.opendaylight.controller.md.statistics.manager.impl;
+import java.util.Arrays;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CopyOnWriteArrayList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
@Override
public void onSuccess(final RpcResult<? extends TransactionAware> result) {
final TransactionId id = result.getResult().getTransactionId();
+ final NodeKey nodeKey = nodeRef.getValue().firstKeyOf(Node.class, NodeKey.class);
if (id == null) {
- LOG.warn("No protocol support");
+ String[] multipartRequestName = result.getResult().getClass().getSimpleName().split("(?=\\p{Upper})");
+ LOG.warn("Node [{}] does not support statistics request type : {}",
+ nodeKey.getId(),Joiner.on(" ").join(Arrays.copyOfRange(multipartRequestName, 2, multipartRequestName.length-2)));
} else {
if (resultTransId != null) {
resultTransId.set(id);
}
- final NodeKey nodeKey = nodeRef.getValue().firstKeyOf(Node.class, NodeKey.class);
final String cacheKey = buildCacheKey(id, nodeKey.getId());
final TransactionCacheContainer<? super TransactionAware> container =
new TransactionCacheContainerImpl<>(id, inputObj, nodeKey.getId());
import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation.StatsManagerOperationType;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
private synchronized void cleanDataStoreOperQueue() {
// Drain all events, making sure any blocked threads are unblocked
while (! dataStoreOperQueue.isEmpty()) {
- dataStoreOperQueue.poll();
+ StatDataStoreOperation op = dataStoreOperQueue.poll();
+
+ // Execute the node removal clean up operation if queued in the
+ // operational queue.
+ if (op.getType() == StatsManagerOperationType.NODE_REMOVAL) {
+ try {
+ LOG.debug("Node {} disconnected. Cleaning internal data.",op.getNodeId());
+ op.applyOperation(null);
+ } catch (final Exception ex) {
+ LOG.warn("Unhandled exception while cleaning up internal data of node [{}]",op.getNodeId());
+ }
+ }
}
}
this.configServiceRefRegistry = configServiceRefRegistry;
}
- public ObjectName getByServiceAndRefName(String namespace, String serviceName, String refName) {
- Map<String, Map<String, String>> serviceNameToRefNameToInstance = getMappedServices().get(namespace);
+ public ObjectName getByServiceAndRefName(String namespace, String serviceType, String refName) {
+ Map<String, Map<String, Map<String, String>>> mappedServices = getMappedServices();
+ Map<String, Map<String, String>> serviceNameToRefNameToInstance = mappedServices.get(namespace);
- Preconditions.checkNotNull(serviceNameToRefNameToInstance, "No serviceInstances mapped to " + namespace);
+ Preconditions.checkArgument(serviceNameToRefNameToInstance != null,
+ "No service mapped to %s:%s:%s. Wrong namespace, available namespaces: %s",
+ namespace, serviceType, refName, mappedServices.keySet());
- Map<String, String> refNameToInstance = serviceNameToRefNameToInstance.get(serviceName);
- Preconditions.checkNotNull(refNameToInstance, "No serviceInstances mapped to " + serviceName + " , "
- + serviceNameToRefNameToInstance.keySet());
+ Map<String, String> refNameToInstance = serviceNameToRefNameToInstance.get(serviceType);
+ Preconditions.checkArgument(refNameToInstance != null,
+ "No service mapped to %s:%s:%s. Wrong service type, available service types: %s"
+ , namespace, serviceType, refName, serviceNameToRefNameToInstance.keySet());
String instanceId = refNameToInstance.get(refName);
- Preconditions.checkArgument(instanceId != null, "No serviceInstances mapped to " + serviceName + ":"
- + refName + ", " + serviceNameToRefNameToInstance.keySet());
+ Preconditions.checkArgument(instanceId != null,
+ "No service mapped to %s:%s:%s. Wrong ref name, available ref names: %s"
+ ,namespace, serviceType, refName, refNameToInstance.keySet());
Services.ServiceInstance serviceInstance = Services.ServiceInstance.fromString(instanceId);
- Preconditions.checkArgument(serviceInstance != null, "No serviceInstance mapped to " + refName
- + " under service name " + serviceName + " , " + refNameToInstance.keySet());
+ Preconditions.checkArgument(serviceInstance != null,
+ "No service mapped to %s:%s:%s. Wrong ref name, available ref names: %s"
+ ,namespace, serviceType, refName, refNameToInstance.keySet());
- String qNameOfService = configServiceRefRegistry.getServiceInterfaceName(namespace, serviceName);
+ String qNameOfService = configServiceRefRegistry.getServiceInterfaceName(namespace, serviceType);
try {
/*
Remove transaction name as this is redundant - will be stripped in DynamicWritableWrapper,
configServiceRefRegistry.getServiceReference(qNameOfService, refName));
} catch (InstanceNotFoundException e) {
throw new IllegalArgumentException("No serviceInstance mapped to " + refName
- + " under service name " + serviceName + " , " + refNameToInstance.keySet(), e);
+ + " under service name " + serviceType + " , " + refNameToInstance.keySet(), e);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.confignetconfconnector.operations;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Simple Lock implementation that pretends to lock candidate datastore.
+ * Candidate datastore is allocated per session and is private so no real locking is needed (JMX is the only possible interference)
+ */
+public class Lock extends AbstractLastNetconfOperation {
+
+ private static final Logger LOG = LoggerFactory.getLogger(Lock.class);
+
+ private static final String LOCK = "lock";
+ private static final String TARGET_KEY = "target";
+
+ public Lock(final String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final Datastore targetDatastore = extractTargetParameter(operationElement);
+ if(targetDatastore == Datastore.candidate) {
+ // Since candidate datastore instances are allocated per session and not accessible anywhere else, no need to lock
+ LOG.debug("Locking {} datastore on session: {}", targetDatastore, getNetconfSessionIdForReporting());
+ // TODO should this fail if we are already locked ?
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ // Not supported running lock
+ throw new NetconfDocumentedException("Unable to lock " + Datastore.running + " datastore", NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_not_supported, NetconfDocumentedException.ErrorSeverity.error);
+ }
+
+ static Datastore extractTargetParameter(final XmlElement operationElement) throws NetconfDocumentedException {
+ final XmlElement targetChildNode;
+ try {
+ final XmlElement targetElement = operationElement.getOnlyChildElementWithSameNamespace(TARGET_KEY);
+ targetChildNode = targetElement.getOnlyChildElementWithSameNamespace();
+ } catch (final MissingNameSpaceException | UnexpectedNamespaceException e) {
+ LOG.trace("Can't get only child element with same namespace", e);
+ throw NetconfDocumentedException.wrap(e);
+ }
+
+ return Datastore.valueOf(targetChildNode.getName());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return LOCK;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.confignetconfconnector.operations;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Simple unlock implementation that pretends to unlock candidate datastore.
+ * Candidate datastore is allocated per session and is private so no real locking is needed (JMX is the only possible interference)
+ */
+public class UnLock extends AbstractLastNetconfOperation {
+
+ private static final Logger LOG = LoggerFactory.getLogger(UnLock.class);
+
+ private static final String UNLOCK = "unlock";
+
+ public UnLock(final String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final Datastore targetDatastore = Lock.extractTargetParameter(operationElement);
+ if(targetDatastore == Datastore.candidate) {
+ // Since candidate datastore instances are allocated per session and not accessible anywhere else, no need to lock
+ LOG.debug("Unlocking {} datastore on session: {}", targetDatastore, getNetconfSessionIdForReporting());
+ // TODO this should fail if we are not locked
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ // Not supported running lock
+ throw new NetconfDocumentedException("Unable to unlock " + Datastore.running + " datastore", NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_not_supported, NetconfDocumentedException.ErrorSeverity.error);
+ }
+
+ @Override
+ protected String getOperationName() {
+ return UNLOCK;
+ }
+}
import org.opendaylight.controller.config.util.ConfigRegistryClient;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Commit;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.DiscardChanges;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.Lock;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.UnLock;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Validate;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.get.Get;
ops.add(new EditConfig(yangStoreSnapshot, transactionProvider, configRegistryClient,
netconfSessionIdForReporting));
ops.add(new Commit(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
+ ops.add(new Lock(netconfSessionIdForReporting));
+ ops.add(new UnLock(netconfSessionIdForReporting));
ops.add(new Get(yangStoreSnapshot, configRegistryClient, netconfSessionIdForReporting));
ops.add(new DiscardChanges(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
ops.add(new Validate(transactionProvider, configRegistryClient, netconfSessionIdForReporting));
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Commit;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.DiscardChanges;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.Lock;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.UnLock;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfig;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.get.Get;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.getconfig.GetConfig;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.test.types.rev131127.TestIdentity1;
}
+ @Test
+ public void testUnLock() throws Exception {
+ assertTrue(NetconfMessageUtil.isOKMessage(lockCandidate()));
+ assertTrue(NetconfMessageUtil.isOKMessage(unlockCandidate()));
+ }
+
private void assertCorrectRefNamesForDependencies(Document config) throws NodeTestException {
NodeList modulesList = config.getElementsByTagName("modules");
assertEquals(1, modulesList.getLength());
executeOp(commitOp, "netconfMessages/commit.xml");
}
+ private Document lockCandidate() throws ParserConfigurationException, SAXException, IOException, NetconfDocumentedException {
+ Lock commitOp = new Lock(NETCONF_SESSION_ID);
+ return executeOp(commitOp, "netconfMessages/lock.xml");
+ }
+
+ private Document unlockCandidate() throws ParserConfigurationException, SAXException, IOException, NetconfDocumentedException {
+ UnLock commitOp = new UnLock(NETCONF_SESSION_ID);
+ return executeOp(commitOp, "netconfMessages/unlock.xml");
+ }
+
private Document getConfigCandidate() throws ParserConfigurationException, SAXException, IOException,
NetconfDocumentedException {
GetConfig getConfigOp = new GetConfig(yangStoreSnapshot, Optional.<String> absent(), transactionProvider,
import com.google.common.collect.Maps;
import java.util.Map;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.netconf.cli.commands.CommandConstants;
import org.opendaylight.yangtools.yang.common.QName;
Mockito.doReturn("").when(channelHandler).toString();
NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps);
- final NetconfMessageToEXIEncoder exiEncoder = new NetconfMessageToEXIEncoder(codec);
- final NetconfEXIToMessageDecoder exiDecoder = new NetconfEXIToMessageDecoder(codec);
+ final NetconfMessageToEXIEncoder exiEncoder = NetconfMessageToEXIEncoder.create(codec);
+ final NetconfEXIToMessageDecoder exiDecoder = NetconfEXIToMessageDecoder.create(codec);
session.addExiHandlers(exiDecoder, exiEncoder);
session.stopExiCommunication();
package org.opendaylight.controller.netconf.impl.osgi;
-import java.util.Collections;
-import java.util.HashSet;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSet.Builder;
import java.util.Set;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
private final Set<NetconfOperationService> services;
private final String netconfSessionIdForReporting;
- public NetconfOperationServiceSnapshotImpl(Set<NetconfOperationServiceFactory> factories, String sessionIdForReporting) {
- Set<NetconfOperationService> services = new HashSet<>();
+ public NetconfOperationServiceSnapshotImpl(final Set<NetconfOperationServiceFactory> factories, final String sessionIdForReporting) {
+ final Builder<NetconfOperationService> b = ImmutableSet.builder();
netconfSessionIdForReporting = sessionIdForReporting;
for (NetconfOperationServiceFactory factory : factories) {
- services.add(factory.createService(netconfSessionIdForReporting));
+ b.add(factory.createService(netconfSessionIdForReporting));
}
- this.services = Collections.unmodifiableSet(services);
+ this.services = b.build();
}
-
-
@Override
public String getNetconfSessionIdForReporting() {
return netconfSessionIdForReporting;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.io.IOUtils;
import org.junit.After;
import org.junit.Before;
public static final String LOOPBACK_ADDRESS = "127.0.0.1";
public static final int SERVER_CONNECTION_TIMEOUT_MILLIS = 5000;
+ private static final int RESOURCE_TIMEOUT_MINUTES = 2;
static ModuleFactory[] FACTORIES = {new TestImplModuleFactory(),
new DepTestImplModuleFactory(),
} else {
s = dispatch.createServer(((InetSocketAddress) getTcpServerAddress()));
}
- s.await();
+ s.await(RESOURCE_TIMEOUT_MINUTES, TimeUnit.MINUTES);
return s.channel();
}
*/
@After
public void cleanUpNetconf() throws Exception {
- serverTcpChannel.close().await();
+ serverTcpChannel.close().await(RESOURCE_TIMEOUT_MINUTES, TimeUnit.MINUTES);
hashedWheelTimer.stop();
- nettyThreadgroup.shutdownGracefully().await();
+ nettyThreadgroup.shutdownGracefully().await(RESOURCE_TIMEOUT_MINUTES, TimeUnit.MINUTES);
}
public NetconfClientConfiguration getClientConfiguration(final InetSocketAddress tcpAddress, final int timeout) {
@After
public void tearDown() throws Exception {
sshProxyServer.close();
- clientGroup.shutdownGracefully().await();
+ clientGroup.shutdownGracefully();
minaTimerEx.shutdownNow();
nioExec.shutdownNow();
}
- @Test
+ @Test(timeout = 2*60*1000)
public void testSecure() throws Exception {
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(new SimpleNetconfClientSessionListener(), TLS_ADDRESS))) {
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.protocol.framework.AbstractProtocolSession;
import org.openexi.proc.common.EXIOptionsException;
+import org.openexi.sax.TransmogrifierException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
final NetconfEXICodec exiCodec = new NetconfEXICodec(exiParams.getOptions());
- final NetconfMessageToEXIEncoder exiEncoder = new NetconfMessageToEXIEncoder(exiCodec);
- final NetconfEXIToMessageDecoder exiDecoder = new NetconfEXIToMessageDecoder(exiCodec);
- addExiHandlers(exiDecoder, exiEncoder);
+ final NetconfMessageToEXIEncoder exiEncoder;
+ try {
+ exiEncoder = NetconfMessageToEXIEncoder.create(exiCodec);
+ } catch (EXIOptionsException | TransmogrifierException e) {
+ LOG.warn("Failed to instantiate EXI encoder for {} on session {}", exiCodec, this, e);
+ throw new IllegalStateException("Cannot instantiate encoder for options", e);
+ }
+ final NetconfEXIToMessageDecoder exiDecoder;
+ try {
+ exiDecoder = NetconfEXIToMessageDecoder.create(exiCodec);
+ } catch (EXIOptionsException e) {
+ LOG.warn("Failed to instantiate EXI decodeer for {} on session {}", exiCodec, this, e);
+ throw new IllegalStateException("Cannot instantiate encoder for options", e);
+ }
+
+ addExiHandlers(exiDecoder, exiEncoder);
LOG.debug("Session {} EXI handlers added to pipeline", this);
}
private static final Logger LOG = LoggerFactory.getLogger(NetconfEXIToMessageDecoder.class);
private static final SAXTransformerFactory FACTORY = (SAXTransformerFactory) SAXTransformerFactory.newInstance();
+ /**
+ * This class is not marked as shared, so it can be attached to only a single channel,
+ * which means that {@link #decode(ChannelHandlerContext, ByteBuf, List)}
+ * cannot be invoked concurrently. Hence we can reuse the reader.
+ */
+ private final EXIReader reader;
- private final NetconfEXICodec codec;
+ private NetconfEXIToMessageDecoder(final EXIReader reader) {
+ this.reader = Preconditions.checkNotNull(reader);
+ }
- public NetconfEXIToMessageDecoder(final NetconfEXICodec codec) {
- this.codec = Preconditions.checkNotNull(codec);
+ public static NetconfEXIToMessageDecoder create(final NetconfEXICodec codec) throws EXIOptionsException {
+ return new NetconfEXIToMessageDecoder(codec.getReader());
}
@Override
LOG.trace("Received to decode: {}", ByteBufUtil.hexDump(in));
}
- final EXIReader r = codec.getReader();
final TransformerHandler handler = FACTORY.newTransformerHandler();
- r.setContentHandler(handler);
+ reader.setContentHandler(handler);
final DOMResult domResult = new DOMResult();
handler.setResult(domResult);
try (final InputStream is = new ByteBufInputStream(in)) {
- r.parse(new InputSource(is));
+ // Performs internal reset before doing anything
+ reader.parse(new InputSource(is));
}
out.add(new NetconfMessage((Document) domResult.getNode()));
import io.netty.handler.codec.MessageToByteEncoder;
import java.io.IOException;
import java.io.OutputStream;
+import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.sax.SAXResult;
import org.openexi.sax.TransmogrifierException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.xml.sax.ContentHandler;
public final class NetconfMessageToEXIEncoder extends MessageToByteEncoder<NetconfMessage> {
private static final Logger LOG = LoggerFactory.getLogger(NetconfMessageToEXIEncoder.class);
- private final NetconfEXICodec codec;
+ /**
+ * This class is not marked as shared, so it can be attached to only a single channel,
+ * which means that {@link #encode(ChannelHandlerContext, NetconfMessage, ByteBuf)}
+ * cannot be invoked concurrently. Hence we can reuse the transmogrifier.
+ */
+ private final Transmogrifier transmogrifier;
- public NetconfMessageToEXIEncoder(final NetconfEXICodec codec) {
- this.codec = Preconditions.checkNotNull(codec);
+ private NetconfMessageToEXIEncoder(final Transmogrifier transmogrifier) {
+ this.transmogrifier = Preconditions.checkNotNull(transmogrifier);
+ }
+
+ public static NetconfMessageToEXIEncoder create(final NetconfEXICodec codec) throws EXIOptionsException, TransmogrifierException {
+ return new NetconfMessageToEXIEncoder(codec.getTransmogrifier());
}
@Override
LOG.trace("Sent to encode : {}", msg);
try (final OutputStream os = new ByteBufOutputStream(out)) {
- final Transmogrifier transmogrifier = codec.getTransmogrifier();
transmogrifier.setOutputStream(os);
-
- ThreadLocalTransformers.getDefaultTransformer().transform(new DOMSource(msg.getDocument()), new SAXResult(transmogrifier.getSAXTransmogrifier()));
+ final ContentHandler handler = transmogrifier.getSAXTransmogrifier();
+ final Transformer transformer = ThreadLocalTransformers.getDefaultTransformer();
+ transformer.transform(new DOMSource(msg.getDocument()), new SAXResult(handler));
+ } finally {
+ // Make sure we do not retain any reference to state by removing
+ // the output stream reference and resetting internal state.
+ transmogrifier.setOutputStream(null);
+ transmogrifier.getSAXTransmogrifier();
}
}
}
if (future.isSuccess()) {
handleSshAuthenticated(session, ctx);
} else {
- handleSshSetupFailure(ctx, future.getException());
+ // Exception does not have to be set in the future, add simple exception in such case
+ final Throwable exception = future.getException() == null ?
+ new IllegalStateException("Authentication failed") :
+ future.getException();
+ handleSshSetupFailure(ctx, exception);
}
}
});
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
-
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
@Before
public void setUp() throws Exception {
final NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
- netconfMessageToEXIEncoder = new NetconfMessageToEXIEncoder(codec);
- netconfEXIToMessageDecoder = new NetconfEXIToMessageDecoder(codec);
+ netconfMessageToEXIEncoder = NetconfMessageToEXIEncoder.create(codec);
+ netconfEXIToMessageDecoder = NetconfEXIToMessageDecoder.create(codec);
msg = new NetconfMessage(XmlUtil.readXmlToDocument(msgAsString));
this.msgAsExi = msgToExi(msgAsString, codec);
--- /dev/null
+<!--
+ ~ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="101"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <lock>
+ <target>
+ <candidate/>
+ </target>
+ </lock>
+</rpc>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc message-id="101"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <unlock>
+ <target>
+ <candidate/>
+ </target>
+ </unlock>
+</rpc>
\ No newline at end of file
<version>1.1.0-SNAPSHOT</version>
<relativePath>opendaylight/commons/parent</relativePath>
</parent>
+
<artifactId>releasepom</artifactId>
<version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
+ <name>controller</name> <!-- Used by Sonar to set project name -->
+
<prerequisites>
<maven>3.0</maven>
</prerequisites>
+
<modules>
<!-- md-sal -->
<module>opendaylight/commons/liblldp</module>
<!-- Karaf Distribution -->
- <module>opendaylight/karaf-branding</module>
- <module>opendaylight/distribution/opendaylight-karaf-empty</module>
- <module>opendaylight/distribution/opendaylight-karaf</module>
- <module>opendaylight/distribution/opendaylight-karaf-resources</module>
+ <module>karaf</module>
<module>features</module>
<!-- archetypes -->