<properties>
<features.file>features.xml</features.file>
+ <org.json.version>20131018</org.json.version>
</properties>
<dependencies>
<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-provider</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
<feature version='${mdsal.version}'>odl-mdsal-broker</feature>
<feature>war</feature>
+ <!-- presently we need sal-remote to be listed BEFORE sal-rest-connector because sal-rest-connector
+ has a yang file which augments a yang file in sal-remote, and order seems to matter -->
+ <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
<bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
<bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
</feature>
<feature name='odl-toaster' version='${project.version}' description="OpenDaylight :: Toaster">
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
</feature>
+
+ <feature name='odl-clustering-test-app' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+ <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
+ <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
+ </feature>
</features>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>net.sf.jung2</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.antlr</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.moxy</artifactId>
+ </dependency>
</dependencies>
<build>
<resources>
<bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.neutron.northbound/${networkconfig.neutron.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
<distributionManagement>
<repository>
<id>opendaylight-release</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+ <url>${nexusproxy}/repositories/opendaylight.release/</url>
</repository>
<snapshotRepository>
<id>opendaylight-snapshot</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
</snapshotRepository>
<site>
<id>website</id>
- <url>dav:http://nexus.opendaylight.org/content/sites/site/sal-parent</url>
+ <url>dav:${nexusproxy}/sites/site/sal-parent</url>
</site>
</distributionManagement>
</project>
<ignorePermissions>false</ignorePermissions>
</configuration>
</execution>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/assembly/system</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>true</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ <useRepositoryLayout>true</useRepositoryLayout>
+ <addParentPoms>true</addParentPoms>
+ <copyPom>true</copyPom>
+ </configuration>
+ </execution>
</executions>
</plugin>
<plugin>
<sonar.language>java</sonar.language>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
- <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
+ <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+ <sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
<yang-ext.version>2013.09.07.4-SNAPSHOT</yang-ext.version>
<yang-jmx-generator.version>1.0.0-SNAPSHOT</yang-jmx-generator.version>
<yangtools.version>0.6.2-SNAPSHOT</yangtools.version>
- <sshd-core.version>0.12.0</sshd-core.version>
+ <sshd-core.version>0.12.0</sshd-core.version>
+ <jmh.version>0.9.7</jmh.version>
</properties>
<dependencyManagement>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <!-- JMH Benchmark dependencies -->
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
</dependencies>
</dependencyManagement>
<repositories>
<!-- OpenDayLight Repo Mirror -->
+ <!-- NOTE: URLs need to be hardcoded in the repository section because we have
+ parent poms that do NOT exist in this project and thus need to be pulled
+ down from the repository. To override these URLs you should use the
+ mirror section in your local settings.xml file. -->
<repository>
<releases>
<enabled>true</enabled>
<url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
</pluginRepository>
</pluginRepositories>
+
+ <!-- distribution management only runs when you run mvn deploy
+ which is if you are deploying compiled artifacts to a
+ maven repository. In that case logic dictacts that you already
+ compiled and thus already have the necessary parent pom files
+ that do not exist in this project pulled down to your local
+ .m2. That way the variables can be resolved and artifacts can
+ be uploaded when running mvn deploy. -->
<distributionManagement>
<!-- OpenDayLight Released artifact -->
<repository>
<id>opendaylight-release</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+ <url>${nexusproxy}/repositories/opendaylight.release/</url>
</repository>
<!-- OpenDayLight Snapshot artifact -->
<snapshotRepository>
<id>opendaylight-snapshot</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
</snapshotRepository>
<!-- Site deployment -->
<site>
--- /dev/null
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+# script; for example karaf, start, stop, admin, client, ...
+#
+# if [ "$KARAF_SCRIPT" == "SCRIPT_NAME" ]; then
+# Actions go here...
+# fi
+
+#
+# general settings which should be applied for all scripts go here; please keep
+# in mind that it is possible that scripts might be executed more than once, e.g.
+# in example of the start script where the start script is executed first and the
+# karaf script afterwards.
+#
+
+#
+# The following section shows the possible configuration options for the default
+# karaf scripts
+#
+# export JAVA_HOME # Location of Java installation
+# export JAVA_MIN_MEM # Minimum memory for the JVM
+# export JAVA_MAX_MEM # Maximum memory for the JVM
+# export JAVA_PERM_MEM # Minimum perm memory for the JVM
+# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM
+# export KARAF_HOME # Karaf home folder
+# export KARAF_DATA # Karaf data folder
+# export KARAF_BASE # Karaf base folder
+# export KARAF_ETC # Karaf etc folder
+# export KARAF_OPTS # Additional available Karaf options
+# export KARAF_DEBUG # Enable debug mode
+if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then
+ export JAVA_MAX_PERM_MEM="512m"
+fi
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
+ export JAVA_MAX_MEM="2048m"
+fi
+
#Hosttracker hostsdb key scheme setting
hosttracker.keyscheme=IP
+# LISP Flow Mapping configuration
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+lisp.mappingOverwrite = true
+# Enable the Solicit-Map-Request (SMR) mechanism
+lisp.smr = false
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>benchmark-data-store</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-parser-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-inmemory-datastore</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <configuration>
+ <classpathScope>test</classpathScope>
+ <executable>java</executable>
+ <arguments>
+ <argument>-classpath</argument>
+ <classpath/>
+ <argument>org.openjdk.jmh.Main</argument>
+ <argument>.*</argument>
+ </arguments>
+ </configuration>
+ <executions>
+ <execution>
+ <id>run-benchmarks</id>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Warmup;
+
+/**
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int WARMUP_ITERATIONS = 20;
+ private static final int MEASUREMENT_ITERATIONS = 20;
+
+ private static final int OUTER_LIST_100K = 100000;
+ private static final int OUTER_LIST_50K = 50000;
+ private static final int OUTER_LIST_10K = 10000;
+
+ private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
+
+ private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
+ final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
+
+ for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
+ paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
+ .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .build();
+ }
+ return paths;
+ }
+
+ private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
+ private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
+ private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
+
+ private static MapNode initInnerListItems(final int count) {
+ final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
+ .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
+
+ for (int i = 1; i <= count; ++i) {
+ mapEntryBuilder
+ .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
+ }
+ return mapEntryBuilder.build();
+ }
+
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
+
+ private static NormalizedNode<?,?>[] initOuterListItems(int outerListItemsCount, MapNode innerList) {
+ final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
+
+ for (int i = 0; i < outerListItemsCount; ++i) {
+ int outerListKey = i;
+ outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .withChild(innerList).build();
+ }
+ return outerListItems;
+ }
+
+ protected SchemaContext schemaContext;
+ protected InMemoryDOMDataStore domStore;
+
+ abstract public void setUp() throws Exception;
+
+ abstract public void tearDown();
+
+ protected void initTestNode() throws Exception {
+ final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
+ .build();
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(testPath, provideOuterListNode());
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ private DataContainerChild<?, ?> provideOuterListNode() {
+ return ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
+ .withChild(
+ ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
+ .build()).build();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+/**
+ * Benchmark Model class loads the odl-datastore-test.yang model from resources.
+ * <br>
+ * This class serves as facilitator class which holds several references to initialized yang model as static final
+ * members.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public final class BenchmarkModel {
+
+ public static final QName TEST_QNAME = QName
+ .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test");
+ public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+ public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+ public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+ public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+ private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+ public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+
+ public static final InputStream getDatastoreBenchmarkInputStream() {
+ return getInputStream(DATASTORE_TEST_YANG);
+ }
+
+ private static InputStream getInputStream(final String resourceName) {
+ return BenchmarkModel.class.getResourceAsStream(resourceName);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(
+ getDatastoreBenchmarkInputStream()));
+ return parser.resolveSchemaContext(modules);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as BlockingBoundedFastThreadPool
+ * and DOM Store Executor Service as Blocking Bounded Fast Thread Pool.
+ *
+ * @see org.opendaylight.yangtools.util.concurrent.SpecialExecutors
+ * @see org.opendaylight.controller.md.sal.dom.store.benchmark.AbstractInMemoryDatastoreWriteTransactionBenchmark
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+ private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
+
+ @Override
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ final ListeningExecutorService domStoreExecutor = MoreExecutors.listeningDecorator(SpecialExecutors.newBoundedSingleThreadExecutor(
+ MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name ));
+
+ domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @Override
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithSameThreadedExecutorBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Same Thread Executor
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+module odl-datastore-test {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
+ prefix "store-test";
+
+ revision "2014-03-13" {
+ description "Initial revision.";
+ }
+
+ container test {
+ list outer-list {
+ key id;
+ leaf id {
+ type int32;
+ }
+ choice outer-choice {
+ case one {
+ leaf one {
+ type string;
+ }
+ }
+ case two-three {
+ leaf two {
+ type string;
+ }
+ leaf three {
+ type string;
+ }
+ }
+ }
+ list inner-list {
+ key name;
+ leaf name {
+ type int32;
+ }
+ leaf value {
+ type string;
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
import com.google.common.net.InetAddresses;
-public class FromSalConversionsUtils {
+/**
+ * MD-SAL to AD-SAL conversions collection
+ */
+public final class FromSalConversionsUtils {
- private FromSalConversionsUtils() {
+ /** http://en.wikipedia.org/wiki/IPv4#Packet_structure (end of octet number 1, bit 14.+15.) */
+ public static final int ENC_FIELD_BIT_SIZE = 2;
+ private FromSalConversionsUtils() {
+ throw new IllegalAccessError("forcing no instance for factory");
}
@SuppressWarnings("unused")
return true;
}
+ /**
+ * @param nwDscp NW-DSCP
+ * @return shifted to NW-TOS (with empty ECN part)
+ */
+ public static int dscpToTos(int nwDscp) {
+ return (short) (nwDscp << ENC_FIELD_BIT_SIZE);
+ }
}
private static SetNwTosActionCase _toAction(final SetNwTos sourceAction) {
return new SetNwTosActionCaseBuilder()
- .setSetNwTosAction(new SetNwTosActionBuilder().setTos(sourceAction.getNwTos()).build())
+ .setSetNwTosAction(new SetNwTosActionBuilder().setTos(FromSalConversionsUtils.dscpToTos(sourceAction.getNwTos())).build())
.build();
}
private static final Logger LOG = LoggerFactory.getLogger(ToSalConversionsUtils.class);
private ToSalConversionsUtils() {
-
+ throw new IllegalAccessError("forcing no instance for factory");
}
public static Flow toFlow(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow source, Node node) {
} else if (sourceAction instanceof SetNwTosActionCase) {
Integer tos = ((SetNwTosActionCase) sourceAction).getSetNwTosAction().getTos();
if (tos != null) {
- targetAction.add(new SetNwTos(tos));
+ targetAction.add(new SetNwTos(ToSalConversionsUtils.tosToNwDscp(tos)));
}
} else if (sourceAction instanceof SetTpDstActionCase) {
PortNumber port = ((SetTpDstActionCase) sourceAction).getSetTpDstAction().getPort();
return mac;
}
+
+ /**
+ * @param nwTos NW-TOS
+ * @return shifted to NW-DSCP
+ */
+ public static int tosToNwDscp(int nwTos) {
+ return (short) (nwTos >>> FromSalConversionsUtils.ENC_FIELD_BIT_SIZE);
+ }
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils;
+
+/**
+ * test of {@link FromSalConversionsUtils}
+ */
+public class FromSalConversionsUtilsTest {
+
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils#dscpToTos(int)}.
+ */
+ @Test
+ public void testDscpToTos() {
+ Assert.assertEquals(0, FromSalConversionsUtils.dscpToTos(0));
+ Assert.assertEquals(4, FromSalConversionsUtils.dscpToTos(1));
+ Assert.assertEquals(252, FromSalConversionsUtils.dscpToTos(63));
+ Assert.assertEquals(256, FromSalConversionsUtils.dscpToTos(64));
+ Assert.assertEquals(-4, FromSalConversionsUtils.dscpToTos(-1));
+ }
+
+}
}
assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
} else if (innerAction instanceof SetNwTosActionCase) {
- assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 63, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
+ assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 252, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
} else if (innerAction instanceof SetNwDstActionCase) {
Address address = ((SetNwDstActionCase) innerAction).getSetNwDstAction().getAddress();
boolean ipv4AddressFound = false;
private void prepareActionSetNwTos(SetNwTosActionCaseBuilder wrapper) {
SetNwTosActionBuilder setNwTosActionBuilder = new SetNwTosActionBuilder();
- setNwTosActionBuilder.setTos(63);
+ setNwTosActionBuilder.setTos(252);
wrapper.setSetNwTosAction(setNwTosActionBuilder.build());
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils;
+
+/**
+ * test of {@link ToSalConversionsUtils}
+ */
+public class ToSalConversionsUtilsTest {
+
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils#tosToNwDscp(int)}.
+ */
+ @Test
+ public void testTosToNwDscp() {
+ Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(0));
+ Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(1));
+ Assert.assertEquals(1, ToSalConversionsUtils.tosToNwDscp(4));
+ Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(252));
+ Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(253));
+ Assert.assertEquals(-1, ToSalConversionsUtils.tosToNwDscp(-1));
+ }
+}
if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj);
builder.setFlowRef(new FlowRef(identifier));
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalFlowService().removeFlow(builder.build());
if (tableIdValidationPrecondition(tableKey, update)) {
final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
if (tableIdValidationPrecondition(tableKey, addDataObj)) {
final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
final Group group = (removeDataObj);
final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().removeGroup(builder.build());
final Group updatedGroup = (update);
final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
final Group group = (addDataObj);
final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().addGroup(builder.build());
final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().removeMeter(builder.build());
final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().addMeter(builder.build());
<module>sal-binding-dom-it</module>
</modules>
</profile>
+ <profile>
+ <id>benchmarks</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ </activation>
+ <modules>
+ <module>benchmark-data-store</module>
+ </modules>
+ </profile>
</profiles>
</project>
\ No newline at end of file
<Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
<Export-package>org.opendaylight.cluster.raft</Export-package>
<Import-Package>*</Import-Package>
+ <DynamicImport-Package>*</DynamicImport-Package>
</instructions>
</configuration>
</plugin>
}
} else if (message instanceof PrintState) {
- LOG.debug("State of the node:{} has entries={}, {}",
- getId(), state.size(), getReplicatedLogState());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("State of the node:{} has entries={}, {}",
+ getId(), state.size(), getReplicatedLogState());
+ }
} else if (message instanceof PrintRole) {
- LOG.debug("{} = {}, Peers={}", getId(), getRaftState(),getPeers());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers());
+ }
} else {
super.onReceiveCommand(message);
} catch (Exception e) {
LOG.error("Exception in applying snapshot", e);
}
- LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+ }
}
private ByteString fromObject(Object snapshot) throws Exception {
import org.opendaylight.controller.cluster.example.messages.PrintState;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
import java.io.BufferedReader;
import java.io.InputStreamReader;
actorSystem.stop(actorRef);
actorRefs.remove(actorName);
-
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new RemoveRaftPeer(actorName), null);
- }
-
allPeers.remove(actorName);
}
allPeers.put(actorName, address);
ActorRef exampleActor = createExampleActor(actorName);
-
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new AddRaftPeer(actorName, address), null);
- }
-
actorRefs.put(actorName, exampleActor);
addClientsToNode(actorName, 1);
*/
public class DefaultConfigParamsImpl implements ConfigParams {
- private static final int SNAPSHOT_BATCH_COUNT = 100000;
+ private static final int SNAPSHOT_BATCH_COUNT = 20000;
/**
* The maximum election time variance
* This context should NOT be passed directly to any other actor it is
* only to be consumed by the RaftActorBehaviors
*/
- private RaftActorContext context;
+ protected RaftActorContext context;
/**
* The in-memory journal
@Override public void onReceiveRecover(Object message) {
if (message instanceof SnapshotOffer) {
- LOG.debug("SnapshotOffer called..");
+ LOG.info("SnapshotOffer called..");
SnapshotOffer offer = (SnapshotOffer) message;
Snapshot snapshot = (Snapshot) offer.snapshot();
context.setReplicatedLog(replicatedLog);
context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
- LOG.debug("Applied snapshot to replicatedLog. " +
- "snapshotIndex={}, snapshotTerm={}, journal-size={}",
+ LOG.info("Applied snapshot to replicatedLog. " +
+ "snapshotIndex={}, snapshotTerm={}, journal-size={}",
replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
- replicatedLog.size());
+ replicatedLog.size()
+ );
// Apply the snapshot to the actors state
applySnapshot(ByteString.copyFrom(snapshot.getState()));
applyState(null, "recovery", logEntry.getData());
context.setLastApplied(logEntry.getIndex());
context.setCommitIndex(logEntry.getIndex());
+
} else if (message instanceof DeleteEntries) {
replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+
} else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor());
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+
} else if (message instanceof RecoveryCompleted) {
- LOG.debug(
+ LOG.info(
"RecoveryCompleted - Switching actor to Follower - " +
"Persistence Id = " + persistenceId() +
" Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
- LOG.debug("Applying state for log index {} data {}",
- applyState.getReplicatedLogEntry().getIndex(),
- applyState.getReplicatedLogEntry().getData());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Applying state for log index {} data {}",
+ applyState.getReplicatedLogEntry().getIndex(),
+ applyState.getReplicatedLogEntry().getData());
+ }
applyState(applyState.getClientActor(), applyState.getIdentifier(),
applyState.getReplicatedLogEntry().getData());
} else if(message instanceof ApplySnapshot ) {
Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
- LOG.debug("ApplySnapshot called on Follower Actor " +
- "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
- snapshot.getLastAppliedTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("ApplySnapshot called on Follower Actor " +
+ "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
+ snapshot.getLastAppliedTerm()
+ );
+ }
applySnapshot(ByteString.copyFrom(snapshot.getState()));
//clears the followers log, sets the snapshot index to ensure adjusted-index works
context.removePeer(rrp.getName());
} else if (message instanceof CaptureSnapshot) {
- LOG.debug("CaptureSnapshot received by actor");
+ LOG.info("CaptureSnapshot received by actor");
CaptureSnapshot cs = (CaptureSnapshot)message;
captureSnapshot = cs;
createSnapshot();
} else if (message instanceof CaptureSnapshotReply){
- LOG.debug("CaptureSnapshotReply received by actor");
+ LOG.info("CaptureSnapshotReply received by actor");
CaptureSnapshotReply csr = (CaptureSnapshotReply) message;
ByteString stateInBytes = csr.getSnapshot();
- LOG.debug("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
+ LOG.info("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
handleCaptureSnapshotReply(stateInBytes);
} else {
if (!(message instanceof AppendEntriesMessages.AppendEntries)
&& !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
- LOG.debug("onReceiveCommand: message:" + message.getClass());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("onReceiveCommand: message:" + message.getClass());
+ }
}
RaftState state =
if(oldBehavior != currentBehavior){
onStateChanged();
}
+
+ onLeaderChanged(oldBehavior.getLeaderId(), currentBehavior.getLeaderId());
}
}
context.getReplicatedLog().lastIndex() + 1,
context.getTermInformation().getCurrentTerm(), data);
- LOG.debug("Persist data {}", replicatedLogEntry);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Persist data {}", replicatedLogEntry);
+ }
replicatedLog
.appendAndPersist(clientActor, identifier, replicatedLogEntry);
*/
protected abstract void onStateChanged();
+ protected void onLeaderChanged(String oldLeader, String newLeader){};
+
private RaftActorBehavior switchBehavior(RaftState state) {
if (currentBehavior != null) {
if (currentBehavior.state() == state) {
return null;
}
String peerAddress = context.getPeerAddress(leaderId);
- LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
- + peerAddress);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
+ + peerAddress);
+ }
return peerAddress;
}
lastAppliedTerm = lastAppliedEntry.getTerm();
}
- LOG.debug("Snapshot Capture logSize: {}", journal.size());
- LOG.debug("Snapshot Capture lastApplied:{} ", context.getLastApplied());
- LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
- LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot Capture logSize: {}", journal.size());
+ LOG.debug("Snapshot Capture lastApplied:{} ",
+ context.getLastApplied());
+ LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
+ LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+ }
// send a CaptureSnapshot to self to make the expensive operation async.
getSelf().tell(new CaptureSnapshot(
}
@Override public void update(long currentTerm, String votedFor) {
- LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+ }
this.currentTerm = currentTerm;
this.votedFor = votedFor;
}
return null;
}
+ /**
+ * Find the client request tracker for a specific logIndex
+ *
+ * @param logIndex
+ * @return
+ */
+ protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+ return null;
+ }
+
+
/**
* Find the log index from the previous to last entry in the log
*
i < index + 1; i++) {
ActorRef clientActor = null;
String identifier = null;
- ClientRequestTracker tracker = findClientRequestTracker(i);
+ ClientRequestTracker tracker = removeClientRequestTracker(i);
if (tracker != null) {
clientActor = tracker.getClientActor();
context.getReplicatedLog().get(i);
if (replicatedLogEntry != null) {
+ // Send a local message to the local RaftActor (it's derived class to be
+ // specific to apply the log to it's index)
actor().tell(new ApplyState(clientActor, identifier,
replicatedLogEntry), actor());
newLastApplied = i;
} else {
//if one index is not present in the log, no point in looping
// around as the rest wont be present either
- context.getLogger().error(
+ context.getLogger().warning(
"Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index );
break;
}
}
- // Send a local message to the local RaftActor (it's derived class to be
- // specific to apply the log to it's index)
context.getLogger().debug("Setting last applied to {}", newLastApplied);
context.setLastApplied(newLastApplied);
}
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
+import akka.event.LoggingAdapter;
import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
public class Follower extends AbstractRaftActorBehavior {
private ByteString snapshotChunksCollected = ByteString.EMPTY;
+ private final LoggingAdapter LOG;
+
public Follower(RaftActorContext context) {
super(context);
+ LOG = context.getLogger();
+
scheduleElection(electionDuration());
}
AppendEntries appendEntries) {
if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
- context.getLogger()
- .debug(appendEntries.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntries.toString());
+ }
}
// TODO : Refactor this method into a bunch of smaller methods
// an entry at prevLogIndex and this follower has no entries in
// it's log.
- context.getLogger().debug(
- "The followers log is empty and the senders prevLogIndex is {}",
- appendEntries.getPrevLogIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("The followers log is empty and the senders prevLogIndex is {}",
+ appendEntries.getPrevLogIndex());
+ }
} else if (lastIndex() > -1
&& appendEntries.getPrevLogIndex() != -1
// The follower's log is out of sync because the Leader's
// prevLogIndex entry was not found in it's log
- context.getLogger().debug(
- "The log is not empty but the prevLogIndex {} was not found in it",
- appendEntries.getPrevLogIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("The log is not empty but the prevLogIndex {} was not found in it",
+ appendEntries.getPrevLogIndex());
+ }
} else if (lastIndex() > -1
&& previousEntry != null
// prevLogIndex entry does exist in the follower's log but it has
// a different term in it
- context.getLogger().debug(
- "Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}"
- , previousEntry.getTerm()
- , appendEntries.getPrevLogTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}"
+ , previousEntry.getTerm()
+ , appendEntries.getPrevLogTerm());
+ }
} else {
outOfSync = false;
}
if (outOfSync) {
// We found that the log was out of sync so just send a negative
// reply and return
- context.getLogger().debug("Follower is out-of-sync, " +
- "so sending negative reply, lastIndex():{}, lastTerm():{}",
- lastIndex(), lastTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Follower is out-of-sync, " +
+ "so sending negative reply, lastIndex():{}, lastTerm():{}",
+ lastIndex(), lastTerm()
+ );
+ }
sender.tell(
new AppendEntriesReply(context.getId(), currentTerm(), false,
lastIndex(), lastTerm()), actor()
if (appendEntries.getEntries() != null
&& appendEntries.getEntries().size() > 0) {
- context.getLogger().debug(
- "Number of entries to be appended = " + appendEntries
- .getEntries().size()
- );
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Number of entries to be appended = " + appendEntries
+ .getEntries().size()
+ );
+ }
// 3. If an existing entry conflicts with a new one (same index
// but different terms), delete the existing entry and all that
continue;
}
- context.getLogger().debug(
- "Removing entries from log starting at "
- + matchEntry.getIndex()
- );
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Removing entries from log starting at "
+ + matchEntry.getIndex()
+ );
+ }
// Entries do not match so remove all subsequent entries
context.getReplicatedLog()
}
}
- context.getLogger().debug(
- "After cleanup entries to be added from = " + (addEntriesFrom
- + lastIndex())
- );
+ if(LOG.isDebugEnabled()) {
+ context.getLogger().debug(
+ "After cleanup entries to be added from = " + (addEntriesFrom
+ + lastIndex())
+ );
+ }
// 4. Append any new entries not already in the log
for (int i = addEntriesFrom;
.appendAndPersist(appendEntries.getEntries().get(i));
}
- context.getLogger().debug(
- "Log size is now " + context.getReplicatedLog().size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Log size is now " + context.getReplicatedLog().size());
+ }
}
context.getReplicatedLog().lastIndex()));
if (prevCommitIndex != context.getCommitIndex()) {
- context.getLogger()
- .debug("Commit index set to " + context.getCommitIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Commit index set to " + context.getCommitIndex());
+ }
}
// If commitIndex > lastApplied: increment lastApplied, apply
// check if there are any entries to be applied. last-applied can be equal to last-index
if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
context.getLastApplied() < lastIndex()) {
- context.getLogger().debug("applyLogToStateMachine, " +
- "appendEntries.getLeaderCommit():{}," +
- "context.getLastApplied():{}, lastIndex():{}",
- appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("applyLogToStateMachine, " +
+ "appendEntries.getLeaderCommit():{}," +
+ "context.getLastApplied():{}, lastIndex():{}",
+ appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()
+ );
+ }
+
applyLogToStateMachine(appendEntries.getLeaderCommit());
}
}
private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
- context.getLogger().debug("InstallSnapshot received by follower " +
- "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
- installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("InstallSnapshot received by follower " +
+ "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
+ installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()
+ );
+ }
try {
if (installSnapshot.getChunkIndex() == installSnapshot.getTotalChunks()) {
} else {
// we have more to go
snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
- context.getLogger().debug("Chunk={},snapshotChunksCollected.size:{}",
- installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Chunk={},snapshotChunksCollected.size:{}",
+ installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+ }
}
sender.tell(new InstallSnapshotReply(
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
+import akka.event.LoggingAdapter;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
private final int minReplicationCount;
+ private final LoggingAdapter LOG;
+
public Leader(RaftActorContext context) {
super(context);
+ LOG = context.getLogger();
+
if (lastIndex() >= 0) {
context.setCommitIndex(lastIndex());
}
followerToLog.put(followerId, followerLogInformation);
}
- context.getLogger().debug("Election:Leader has following peers:"+ followers);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Election:Leader has following peers:" + followers);
+ }
if (followers.size() > 0) {
minReplicationCount = (followers.size() + 1) / 2 + 1;
@Override protected RaftState handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- context.getLogger().debug(appendEntries.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntries.toString());
+ }
return state();
}
AppendEntriesReply appendEntriesReply) {
if(! appendEntriesReply.isSuccess()) {
- context.getLogger()
- .debug(appendEntriesReply.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntriesReply.toString());
+ }
}
// Update the FollowerLogInformation
followerToLog.get(followerId);
if(followerLogInformation == null){
- context.getLogger().error("Unknown follower {}", followerId);
+ LOG.error("Unknown follower {}", followerId);
return state();
}
return state();
}
+ protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+
+ ClientRequestTracker toRemove = findClientRequestTracker(logIndex);
+ if(toRemove != null) {
+ trackerList.remove(toRemove);
+ }
+
+ return toRemove;
+ }
+
protected ClientRequestTracker findClientRequestTracker(long logIndex) {
for (ClientRequestTracker tracker : trackerList) {
if (tracker.getIndex() == logIndex) {
if (reply.isSuccess()) {
if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
//this was the last chunk reply
- context.getLogger().debug("InstallSnapshotReply received, " +
- "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
- reply.getChunkIndex(), followerId,
- context.getReplicatedLog().getSnapshotIndex() + 1);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("InstallSnapshotReply received, " +
+ "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
+ reply.getChunkIndex(), followerId,
+ context.getReplicatedLog().getSnapshotIndex() + 1
+ );
+ }
FollowerLogInformation followerLogInformation =
followerToLog.get(followerId);
followerLogInformation.setNextIndex(
context.getReplicatedLog().getSnapshotIndex() + 1);
mapFollowerToSnapshot.remove(followerId);
- context.getLogger().debug("followerToLog.get(followerId).getNextIndex().get()=" +
- followerToLog.get(followerId).getNextIndex().get());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" +
+ followerToLog.get(followerId).getNextIndex().get());
+ }
} else {
followerToSnapshot.markSendStatus(true);
}
} else {
- context.getLogger().info("InstallSnapshotReply received, " +
- "sending snapshot chunk failed, Will retry, Chunk:{}",
- reply.getChunkIndex());
+ LOG.info("InstallSnapshotReply received, " +
+ "sending snapshot chunk failed, Will retry, Chunk:{}",
+ reply.getChunkIndex()
+ );
followerToSnapshot.markSendStatus(false);
}
} else {
- context.getLogger().error("ERROR!!" +
- "FollowerId in InstallSnapshotReply not known to Leader" +
- " or Chunk Index in InstallSnapshotReply not matching {} != {}",
- followerToSnapshot.getChunkIndex(), reply.getChunkIndex() );
+ LOG.error("ERROR!!" +
+ "FollowerId in InstallSnapshotReply not known to Leader" +
+ " or Chunk Index in InstallSnapshotReply not matching {} != {}",
+ followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
+ );
}
}
private void replicate(Replicate replicate) {
long logIndex = replicate.getReplicatedLogEntry().getIndex();
- context.getLogger().debug("Replicate message " + logIndex);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Replicate message " + logIndex);
+ }
// Create a tracker entry we will use this later to notify the
// client actor
if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) {
// if the follower is just not starting and leader's index
// is more than followers index
- context.getLogger().debug("SendInstallSnapshot to follower:{}," +
- "follower-nextIndex:{}, leader-snapshot-index:{}, " +
- "leader-last-index:{}", followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("SendInstallSnapshot to follower:{}," +
+ "follower-nextIndex:{}, leader-snapshot-index:{}, " +
+ "leader-last-index:{}", followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+ );
+ }
actor().tell(new SendInstallSnapshot(), actor());
} else {
).toSerializable(),
actor()
);
- context.getLogger().info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
+ LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
mapFollowerToSnapshot.get(followerId).getTotalChunks());
} catch (IOException e) {
- context.getLogger().error("InstallSnapshot failed for Leader.", e);
+ LOG.error("InstallSnapshot failed for Leader.", e);
}
}
mapFollowerToSnapshot.put(followerId, followerToSnapshot);
}
ByteString nextChunk = followerToSnapshot.getNextChunk();
- context.getLogger().debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+ }
return nextChunk;
}
int size = snapshotBytes.size();
totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
- context.getLogger().debug("Snapshot {} bytes, total chunks to send:{}",
- size, totalChunks);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot {} bytes, total chunks to send:{}",
+ size, totalChunks);
+ }
}
public ByteString getSnapshotBytes() {
}
}
- context.getLogger().debug("length={}, offset={},size={}",
- snapshotLength, start, size);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("length={}, offset={},size={}",
+ snapshotLength, start, size);
+ }
return getSnapshotBytes().substring(start, start + size);
}
try {
if(leProtoBuff.getData() != null && leProtoBuff.getData().getClientPayloadClassName() != null) {
String clientPayloadClassName = leProtoBuff.getData().getClientPayloadClassName();
- payload = (Payload)Class.forName(clientPayloadClassName).newInstance();
+ payload = (Payload) Class.forName(clientPayloadClassName).newInstance();
payload = payload.decode(leProtoBuff.getData());
payload.setClientPayloadClassName(clientPayloadClassName);
} else {
package org.opendaylight.controller.cluster.raft.messages;
import com.google.protobuf.ByteString;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
public class InstallSnapshot extends AbstractRaftRPC {
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.event.Logging;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
import com.google.protobuf.ByteString;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
+import java.util.ArrayList;
import java.util.Collections;
+import java.util.List;
import java.util.Map;
+import static junit.framework.Assert.assertTrue;
import static junit.framework.TestCase.assertEquals;
public class RaftActorTest extends AbstractActorTest {
public static class MockRaftActor extends RaftActor {
+ boolean applySnapshotCalled = false;
+
public MockRaftActor(String id,
Map<String, String> peerAddresses) {
super(id, peerAddresses);
}
+ public RaftActorContext getRaftActorContext() {
+ return context;
+ }
+
+ public boolean isApplySnapshotCalled() {
+ return applySnapshotCalled;
+ }
+
public static Props props(final String id, final Map<String, String> peerAddresses){
return Props.create(new Creator<MockRaftActor>(){
}
@Override protected void applySnapshot(ByteString snapshot) {
- throw new UnsupportedOperationException("applySnapshot");
+ applySnapshotCalled = true;
}
@Override protected void onStateChanged() {
kit.findLeader(kit.getRaftActor().path().toString());
}
+ @Test
+ public void testActorRecovery() {
+ new JavaTestKit(getSystem()) {{
+ new Within(duration("1 seconds")) {
+ protected void run() {
+
+ String persistenceId = "follower10";
+
+ ActorRef followerActor = getSystem().actorOf(
+ MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId);
+
+
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E"));
+ ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F"));
+ entries.add(entry1);
+ entries.add(entry2);
+
+ int lastApplied = 3;
+ int lastIndex = 5;
+ Snapshot snapshot = Snapshot.create("A B C D".getBytes(), entries, lastIndex, 1 , lastApplied, 1);
+ MockSnapshotStore.setMockSnapshot(snapshot);
+ MockSnapshotStore.setPersistenceId(persistenceId);
+
+ followerActor.tell(PoisonPill.getInstance(), null);
+ try {
+ // give some time for actor to die
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId, Collections.EMPTY_MAP));
+ try {
+ //give some time for snapshot offer to get called.
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ assertEquals(entries.size(), context.getReplicatedLog().size());
+ assertEquals(lastApplied, context.getLastApplied());
+ assertEquals(lastApplied, context.getCommitIndex());
+ assertTrue(ref.underlyingActor().isApplySnapshotCalled());
+ }
+
+ };
+ }};
+
+ }
+
}
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.protobuf.ByteString;
import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import static akka.pattern.Patterns.ask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
public class FollowerTest extends AbstractRaftActorBehaviorTest {
return new Follower(actorContext);
}
- @Override protected RaftActorContext createActorContext() {
- return new MockRaftActorContext("test", getSystem(), followerActor);
+ @Override protected RaftActorContext createActorContext() {
+ return createActorContext(followerActor);
+ }
+
+ protected RaftActorContext createActorContext(ActorRef actorRef){
+ return new MockRaftActorContext("test", getSystem(), actorRef);
}
@Test
createActorContext();
context.setLastApplied(100);
- setLastLogEntry((MockRaftActorContext) context, 1, 100, new MockRaftActorContext.MockPayload(""));
+ setLastLogEntry((MockRaftActorContext) context, 1, 100,
+ new MockRaftActorContext.MockPayload(""));
((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
List<ReplicatedLogEntry> entries =
Arrays.asList(
- (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
- new MockRaftActorContext.MockPayload("foo"))
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
+ new MockRaftActorContext.MockPayload("foo"))
);
// The new commitIndex is 101
}};
}
+
+ /**
+ * This test verifies that when InstallSnapshot is received by
+ * the follower its applied correctly.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testHandleInstallSnapshot() throws Exception {
+ JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(
+ MessageCollectorActor.class));
+
+ MockRaftActorContext context = (MockRaftActorContext)
+ createActorContext(getRef());
+
+ Follower follower = (Follower)createBehavior(context);
+
+ HashMap<String, String> followerSnapshot = new HashMap<>();
+ followerSnapshot.put("1", "A");
+ followerSnapshot.put("2", "B");
+ followerSnapshot.put("3", "C");
+
+ ByteString bsSnapshot = toByteString(followerSnapshot);
+ ByteString chunkData = ByteString.EMPTY;
+ int offset = 0;
+ int snapshotLength = bsSnapshot.size();
+ int i = 1;
+
+ do {
+ chunkData = getNextChunk(bsSnapshot, offset);
+ final InstallSnapshot installSnapshot =
+ new InstallSnapshot(1, "leader-1", i, 1,
+ chunkData, i, 3);
+ follower.handleMessage(leaderActor, installSnapshot);
+ offset = offset + 50;
+ i++;
+ } while ((offset+50) < snapshotLength);
+
+ final InstallSnapshot installSnapshot3 = new InstallSnapshot(1, "leader-1", 3, 1, chunkData, 3, 3);
+ follower.handleMessage(leaderActor, installSnapshot3);
+
+ String[] matches = new ReceiveWhile<String>(String.class, duration("2 seconds")) {
+ @Override
+ protected String match(Object o) throws Exception {
+ if (o instanceof ApplySnapshot) {
+ ApplySnapshot as = (ApplySnapshot)o;
+ if (as.getSnapshot().getLastIndex() != installSnapshot3.getLastIncludedIndex()) {
+ return "applySnapshot-lastIndex-mismatch";
+ }
+ if (as.getSnapshot().getLastAppliedTerm() != installSnapshot3.getLastIncludedTerm()) {
+ return "applySnapshot-lastAppliedTerm-mismatch";
+ }
+ if (as.getSnapshot().getLastAppliedIndex() != installSnapshot3.getLastIncludedIndex()) {
+ return "applySnapshot-lastAppliedIndex-mismatch";
+ }
+ if (as.getSnapshot().getLastTerm() != installSnapshot3.getLastIncludedTerm()) {
+ return "applySnapshot-lastTerm-mismatch";
+ }
+ return "applySnapshot";
+ }
+
+ return "ignoreCase";
+ }
+ }.get();
+
+ String applySnapshotMatch = "";
+ for (String reply: matches) {
+ if (reply.startsWith("applySnapshot")) {
+ applySnapshotMatch = reply;
+ }
+ }
+
+ assertEquals("applySnapshot", applySnapshotMatch);
+
+ Object messages = executeLocalOperation(leaderActor, "get-all-messages");
+
+ assertNotNull(messages);
+ assertTrue(messages instanceof List);
+ List<Object> listMessages = (List<Object>) messages;
+
+ int installSnapshotReplyReceivedCount = 0;
+ for (Object message: listMessages) {
+ if (message instanceof InstallSnapshotReply) {
+ ++installSnapshotReplyReceivedCount;
+ }
+ }
+
+ assertEquals(3, installSnapshotReplyReceivedCount);
+
+ }};
+ }
+
+ public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
+ FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+ Timeout operationTimeout = new Timeout(operationDuration);
+ Future<Object> future = ask(actor, message, operationTimeout);
+
+ try {
+ return Await.result(future, operationDuration);
+ } catch (Exception e) {
+ throw e;
+ }
+ }
+
+ public ByteString getNextChunk (ByteString bs, int offset){
+ int snapshotLength = bs.size();
+ int start = offset;
+ int size = 50;
+ if (50 > snapshotLength) {
+ size = snapshotLength;
+ } else {
+ if ((start + 50) > snapshotLength) {
+ size = snapshotLength - start;
+ }
+ }
+ return bs.substring(start, start + size);
+ }
+
+ private ByteString toByteString(Map<String, String> state) {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(state);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ } catch (IOException e) {
+ org.junit.Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
+ }
+ return null;
+ }
}
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.UntypedActor;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+public class MessageCollectorActor extends UntypedActor {
+ private List<Object> messages = new ArrayList<>();
+
+ @Override public void onReceive(Object message) throws Exception {
+ if(message instanceof String){
+ if("get-all-messages".equals(message)){
+ getSender().tell(messages, getSelf());
+ }
+ } else {
+ messages.add(message);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import scala.concurrent.Future;
+
+
+public class MockSnapshotStore extends SnapshotStore {
+
+ private static Snapshot mockSnapshot;
+ private static String persistenceId;
+
+ public static void setMockSnapshot(Snapshot s) {
+ mockSnapshot = s;
+ }
+
+ public static void setPersistenceId(String pId) {
+ persistenceId = pId;
+ }
+
+ @Override
+ public Future<Option<SelectedSnapshot>> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) {
+ if (mockSnapshot == null) {
+ return Futures.successful(Option.<SelectedSnapshot>none());
+ }
+
+ SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345);
+ SelectedSnapshot selectedSnapshot =
+ new SelectedSnapshot(smd, mockSnapshot);
+ return Futures.successful(Option.some(selectedSnapshot));
+ }
+
+ @Override
+ public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ return null;
+ }
+
+ @Override
+ public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+
+ }
+
+ @Override
+ public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+
+ }
+
+ @Override
+ public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception {
+
+ }
+}
akka {
+ persistence.snapshot-store.plugin = "mock-snapshot-store"
+
loglevel = "DEBUG"
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
}
}
}
+
+mock-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
Logging.getLogger(getContext().system(), this);
public AbstractUntypedActor() {
- LOG.debug("Actor created {}", getSelf());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor created {}", getSelf());
+ }
getContext().
system().
actorSelection("user/termination-monitor").
@Override public void onReceive(Object message) throws Exception {
final String messageType = message.getClass().getSimpleName();
- LOG.debug("Received message {}", messageType);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received message {}", messageType);
+ }
handleReceive(message);
-
- LOG.debug("Done handling message {}", messageType);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Done handling message {}", messageType);
+ }
}
protected abstract void handleReceive(Object message) throws Exception;
}
protected void unknownMessage(Object message) throws Exception {
- LOG.debug("Received unhandled message {}", message);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received unhandled message {}", message);
+ }
unhandled(message);
}
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore;
+package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
import com.google.common.base.Preconditions;
import com.google.protobuf.GeneratedMessage;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.UnknownFieldSet;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: InstallSnapshot.proto
-package org.opendaylight.controller.cluster.raft.protobuff.messages;
+package org.opendaylight.controller.protobuff.messages.cluster.raft;
public final class InstallSnapshotMessages {
private InstallSnapshotMessages() {}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
}
public static com.google.protobuf.Parser<InstallSnapshot> PARSER =
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
- com.google.protobuf.ByteString bs =
+ com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
getLeaderIdBytes() {
java.lang.Object ref = leaderId_;
if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
leaderId_ = b;
return super.writeReplace();
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot prototype) {
+ public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshotOrBuilder {
+ implements org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshotOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
}
- // Construct using org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.newBuilder()
+ // Construct using org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot build() {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot build() {
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot buildPartial() {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot(this);
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot buildPartial() {
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) {
- return mergeFrom((org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot)other);
+ if (other instanceof org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) {
+ return mergeFrom((org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot other) {
- if (other == org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot other) {
+ if (other == org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
if (other.hasTerm()) {
setTerm(other.getTerm());
}
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
+ parsedMessage = (org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
getLeaderIdBytes() {
java.lang.Object ref = leaderId_;
if (ref instanceof String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
leaderId_ = b;
"\021lastIncludedIndex\030\003 \001(\003\022\030\n\020lastIncluded" +
"Term\030\004 \001(\003\022\014\n\004data\030\005 \001(\014\022\022\n\nchunkIndex\030\006" +
" \001(\005\022\023\n\013totalChunks\030\007 \001(\005BX\n;org.openday" +
- "light.controller.cluster.raft.protobuff." +
- "messagesB\027InstallSnapshotMessagesH\001"
+ "light.controller.protobuff.messages.clus" +
+ "ter.raftB\027InstallSnapshotMessagesH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
for (Entry<URI, String> e: prefixes.getPrefixes()) {
writer.writeNamespace(e.getValue(), e.getKey().toString());
}
- LOG.debug("Instance identifier with Random prefix is now {}", str);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Instance identifier with Random prefix is now {}", str);
+ }
writer.writeCharacters(str);
}
DataSchemaNode childSchema = null;
if (schema instanceof DataNodeContainer) {
childSchema = SchemaUtils.findFirstSchema(child.getNodeType(), ((DataNodeContainer) schema).getChildNodes()).orNull();
- if (childSchema == null) {
+ if (childSchema == null && LOG.isDebugEnabled()) {
LOG.debug("Probably the data node \"{}\" does not conform to schema", child == null ? "" : child.getNodeType().getLocalName());
}
}
*/
public void writeValue(final @Nonnull XMLStreamWriter writer, final @Nonnull TypeDefinition<?> type, final Object value) throws XMLStreamException {
if (value == null) {
- LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+ if(LOG.isDebugEnabled()){
+ LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+ }
return;
}
writer.writeNamespace(prefix, qname.getNamespace().toString());
writer.writeCharacters(prefix + ':' + qname.getLocalName());
} else {
- LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+ }
writer.writeCharacters(String.valueOf(value));
}
}
private static void write(final @Nonnull XMLStreamWriter writer, final @Nonnull InstanceIdentifierTypeDefinition type, final @Nonnull Object value) throws XMLStreamException {
if (value instanceof YangInstanceIdentifier) {
- LOG.debug("Writing InstanceIdentifier object {}", value);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Writing InstanceIdentifier object {}", value);
+ }
write(writer, (YangInstanceIdentifier)value);
} else {
- LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
- writer.writeCharacters(String.valueOf(value));
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+ }
+ writer.writeCharacters(String.valueOf(value));
}
}
}
* @return xml String
*/
public static String inputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
- LOG.debug("Converting input composite node to xml {}", cNode);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converting input composite node to xml {}", cNode);
+ }
if (cNode == null) {
return BLANK;
}
Set<RpcDefinition> rpcs = schemaContext.getOperations();
for(RpcDefinition rpc : rpcs) {
if(rpc.getQName().equals(cNode.getNodeType())){
- LOG.debug("Found the rpc definition from schema context matching with input composite node {}", rpc.getQName());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Found the rpc definition from schema context matching with input composite node {}", rpc.getQName());
+ }
CompositeNode inputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "input"));
domTree = XmlDocumentUtils.toDocument(inputContainer, rpc.getInput(), XmlDocumentUtils.defaultValueCodecProvider());
-
- LOG.debug("input composite node to document conversion complete, document is {}", domTree);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("input composite node to document conversion complete, document is {}", domTree);
+ }
break;
}
}
* @return xml string
*/
public static String outputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
- LOG.debug("Converting output composite node to xml {}", cNode);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converting output composite node to xml {}", cNode);
+ }
if (cNode == null) {
return BLANK;
}
Set<RpcDefinition> rpcs = schemaContext.getOperations();
for(RpcDefinition rpc : rpcs) {
if(rpc.getQName().equals(cNode.getNodeType())){
- LOG.debug("Found the rpc definition from schema context matching with output composite node {}", rpc.getQName());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Found the rpc definition from schema context matching with output composite node {}", rpc.getQName());
+ }
CompositeNode outputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "output"));
domTree = XmlDocumentUtils.toDocument(outputContainer, rpc.getOutput(), XmlDocumentUtils.defaultValueCodecProvider());
-
- LOG.debug("output composite node to document conversion complete, document is {}", domTree);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("output composite node to document conversion complete, document is {}", domTree);
+ }
break;
}
}
LOG.error("Error during translation of Document to OutputStream", e);
}
- LOG.debug("Document to string conversion complete, xml string is {} ", writer.toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Document to string conversion complete, xml string is {} ", writer.toString());
+ }
return writer.toString();
}
* @return CompositeNode object based on the input, if any of the input parameter is null, a null object is returned
*/
public static CompositeNode inputXmlToCompositeNode(QName rpc, String xml, SchemaContext schemaContext){
- LOG.debug("Converting input xml to composite node {}", xml);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converting input xml to composite node {}", xml);
+ }
if (xml==null || xml.length()==0) {
return null;
}
Set<RpcDefinition> rpcs = schemaContext.getOperations();
for(RpcDefinition rpcDef : rpcs) {
if(rpcDef.getQName().equals(rpc)){
- LOG.debug("found the rpc definition from schema context matching rpc {}", rpc);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("found the rpc definition from schema context matching rpc {}", rpc);
+ }
if(rpcDef.getInput() == null) {
LOG.warn("found rpc definition's input is null");
return null;
List<Node<?>> dataNodes = XmlDocumentUtils.toDomNodes(xmlData,
Optional.of(rpcDef.getInput().getChildNodes()), schemaContext);
-
- LOG.debug("Converted xml input to list of nodes {}", dataNodes);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converted xml input to list of nodes {}", dataNodes);
+ }
final CompositeNodeBuilder<ImmutableCompositeNode> it = ImmutableCompositeNode.builder();
it.setQName(rpc);
it.add(ImmutableCompositeNode.create(input, dataNodes));
} catch (IOException e) {
LOG.error("Error during building data tree from XML", e);
}
-
- LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+ }
return compositeNode;
}
package org.opendaylight.controller.cluster.raft;
-option java_package = "org.opendaylight.controller.cluster.raft.protobuff.messages";
+option java_package = "org.opendaylight.controller.protobuff.messages.cluster.raft";
option java_outer_classname = "InstallSnapshotMessages";
option optimize_for = SPEED;
metric-capture-enabled = true
akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+
actor {
+
provider = "akka.cluster.ClusterActorRefProvider"
serializers {
java = "akka.serialization.JavaSerializer"
metric-capture-enabled = true
akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+
actor {
provider = "akka.cluster.ClusterActorRefProvider"
package org.opendaylight.controller.md.sal.common.api.data;
+import com.google.common.base.Supplier;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-
-import com.google.common.base.Function;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
/**
* A type of TransactionCommitFailedException that indicates a situation that would result in a
* @author Thomas Pantelis
*/
public class TransactionCommitDeadlockException extends TransactionCommitFailedException {
-
private static final long serialVersionUID = 1L;
-
private static final String DEADLOCK_MESSAGE =
"An attempt to block on a ListenableFuture via a get method from a write " +
"transaction submit was detected that would result in deadlock. The commit " +
"result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock.";
+ private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE);
- public static Function<Void, Exception> DEADLOCK_EXECUTOR_FUNCTION = new Function<Void, Exception>() {
+ public static final Supplier<Exception> DEADLOCK_EXCEPTION_SUPPLIER = new Supplier<Exception>() {
@Override
- public Exception apply(Void notUsed) {
- return new TransactionCommitDeadlockException( DEADLOCK_MESSAGE,
- RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE));
+ public Exception get() {
+ return new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR);
}
};
- public TransactionCommitDeadlockException(String message, final RpcError... errors) {
+ public TransactionCommitDeadlockException(final String message, final RpcError... errors) {
super(message, errors);
}
}
*/
package org.opendaylight.controller.md.sal.common.impl.service;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.AbstractDataModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-
public abstract class AbstractDataTransaction<P extends Path<P>, D extends Object> extends
AbstractDataModification<P, D> {
- private final static Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final ListenableFuture<RpcResult<TransactionStatus>> SUCCESS_FUTURE =
+ Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
private final Object identifier;
private final long allocationTime;
@Override
public Future<RpcResult<TransactionStatus>> commit() {
readyTime = System.nanoTime();
- LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ }
changeStatus(TransactionStatus.SUBMITED);
-
return this.broker.commit(this);
}
}
@Override
- public boolean equals(Object obj) {
+ public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
public void succeeded() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.COMMITED);
}
public void failed() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.FAILED);
}
this.onStatusChange(status);
}
- public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(
- CheckedFuture<Void,TransactionCommitFailedException> from ) {
+ public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(final CheckedFuture<Void,TransactionCommitFailedException> from) {
return Futures.transform(from, new AsyncFunction<Void, RpcResult<TransactionStatus>>() {
@Override
- public ListenableFuture<RpcResult<TransactionStatus>> apply(Void input) throws Exception {
- return Futures.immediateFuture(RpcResultBuilder.<TransactionStatus>
- success(TransactionStatus.COMMITED).build());
+ public ListenableFuture<RpcResult<TransactionStatus>> apply(final Void input) {
+ return SUCCESS_FUTURE;
}
- } );
+ });
}
}
import javax.annotation.Nullable;
import org.opendaylight.yangtools.util.concurrent.CountingRejectedExecutionHandler;
import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* MXBean implementation of the ThreadExecutorStatsMXBean interface that retrieves statistics
*/
public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
implements ThreadExecutorStatsMXBean {
-
+ private static final Logger LOG = LoggerFactory.getLogger(ThreadExecutorStatsMXBeanImpl.class);
private final ThreadPoolExecutor executor;
/**
* @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
* @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
*/
- public ThreadExecutorStatsMXBeanImpl(Executor executor, String mBeanName,
- String mBeanType, @Nullable String mBeanCategory) {
+ public ThreadExecutorStatsMXBeanImpl(final ThreadPoolExecutor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
super(mBeanName, mBeanType, mBeanCategory);
+ this.executor = Preconditions.checkNotNull(executor);
+ }
+
+ private static ThreadExecutorStatsMXBeanImpl createInternal(final Executor executor,
+ final String mBeanName, final String mBeanType, final String mBeanCategory) {
+ if (executor instanceof ThreadPoolExecutor) {
+ final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl(
+ (ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+ return ret;
+ }
+
+ LOG.info("Executor {} is not supported", executor);
+ return null;
+ }
+
+ /**
+ * Creates a new bean if the backing executor is a ThreadPoolExecutor and registers it.
+ *
+ * @param executor the backing {@link Executor}
+ * @param mBeanName Used as the <code>name</code> property in the bean's ObjectName.
+ * @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
+ * @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
+ * @return a registered ThreadExecutorStatsMXBeanImpl instance if the backing executor
+ * is a ThreadPoolExecutor, otherwise null.
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
+ ThreadExecutorStatsMXBeanImpl ret = createInternal(executor, mBeanName, mBeanType, mBeanCategory);
+ if(ret != null) {
+ ret.registerMBean();
+ }
- Preconditions.checkArgument(executor instanceof ThreadPoolExecutor,
- "The ExecutorService of type {} is not an instanceof ThreadPoolExecutor",
- executor.getClass());
- this.executor = (ThreadPoolExecutor)executor;
+ return ret;
+ }
+
+ /**
+ * Creates a new bean if the backing executor is a ThreadPoolExecutor.
+ *
+ * @param executor the backing {@link Executor}
+ * @return a ThreadExecutorStatsMXBeanImpl instance if the backing executor
+ * is a ThreadPoolExecutor, otherwise null.
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor) {
+ return createInternal(executor, "", "", null);
}
@Override
Preconditions.checkNotNull(path, "path should not be null");
Preconditions.checkNotNull(listener, "listener should not be null");
-
- LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+ }
ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
DataChangeListener.props(listener ));
}, actorContext.getActorSystem().dispatcher());
return listenerRegistrationProxy;
}
-
- LOG.debug(
- "No local shard for shardName {} was found so returning a noop registration",
- shardName);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "No local shard for shardName {} was found so returning a noop registration",
+ shardName);
+ }
return new NoOpDataChangeListenerRegistration(listener);
}
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
}
@Override public void onReceiveRecover(Object message) {
- LOG.debug("onReceiveRecover: Received message {} from {}", message.getClass().toString(),
- getSender());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("onReceiveRecover: Received message {} from {}",
+ message.getClass().toString(),
+ getSender());
+ }
if (message instanceof RecoveryFailure){
LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
}
@Override public void onReceiveCommand(Object message) {
- LOG.debug("onReceiveCommand: Received message {} from {}", message.getClass().toString(),
- getSender());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("onReceiveCommand: Received message {} from {}",
+ message.getClass().toString(),
+ getSender());
+ }
if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
// This must be for install snapshot. Don't want to open this up and trigger
.tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
self());
+ createSnapshotTransaction = null;
// Send a PoisonPill instead of sending close transaction because we do not really need
// a response
getSender().tell(PoisonPill.getInstance(), self());
ShardTransactionIdentifier.builder()
.remoteTransactionId(remoteTransactionId)
.build();
- LOG.debug("Creating transaction : {} ", transactionId);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Creating transaction : {} ", transactionId);
+ }
ActorRef transactionActor =
createTypedTransactionActor(transactionType, transactionId, transactionChainId);
DOMStoreThreePhaseCommitCohort cohort =
modificationToCohort.remove(serialized);
if (cohort == null) {
- LOG.debug(
- "Could not find cohort for modification : {}. Writing modification using a new transaction",
- modification);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Could not find cohort for modification : {}. Writing modification using a new transaction",
+ modification);
+ }
+
DOMStoreWriteTransaction transaction =
store.newWriteOnlyTransaction();
- LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
+ }
modification.apply(transaction);
try {
return;
}
- final ListenableFuture<Void> future = cohort.commit();
- final ActorRef self = getSelf();
+ ListenableFuture<Void> future = cohort.commit();
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(Void v) {
- sender.tell(new CommitTransactionReply().toSerializable(), self);
+ sender.tell(new CommitTransactionReply().toSerializable(), getSelf());
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
}
public void onFailure(Throwable t) {
LOG.error(t, "An exception happened during commit");
shardMBean.incrementFailedTransactionsCount();
- sender.tell(new akka.actor.Status.Failure(t), self);
+ sender.tell(new akka.actor.Status.Failure(t), getSelf());
}
});
private void registerChangeListener(
RegisterChangeListener registerChangeListener) {
- LOG.debug("registerDataChangeListener for {}", registerChangeListener
- .getPath());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("registerDataChangeListener for {}", registerChangeListener
+ .getPath());
+ }
ActorSelection dataChangeListenerPath = getContext()
getContext().actorOf(
DataChangeListenerRegistration.props(registration));
- LOG.debug(
- "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
- , listenerRegistration.path().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
+ , listenerRegistration.path().toString());
+ }
getSender()
.tell(new RegisterChangeListenerReply(listenerRegistration.path()),
getSelf());
}
- private void createTransactionChain() {
- DOMStoreTransactionChain chain = store.createTransactionChain();
- ActorRef transactionChain = getContext().actorOf(
- ShardTransactionChain.props(chain, schemaContext, datastoreContext, shardMBean));
- getSender().tell(new CreateTransactionChainReply(transactionChain.path()).toSerializable(),
- getSelf());
- }
-
private boolean isMetricsCaptureEnabled(){
CommonConfig config = new CommonConfig(getContext().system().settings().config());
return config.isMetricCaptureEnabled();
}
} else {
- LOG.error("Unknown state received {}", data);
+ LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
}
// Update stats
// Since this will be done only on Recovery or when this actor is a Follower
// we can safely commit everything in here. We not need to worry about event notifications
// as they would have already been disabled on the follower
+
+ LOG.info("Applying snapshot");
try {
DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
syncCommitTransaction(transaction);
} catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
LOG.error(e, "An exception occurred when applying snapshot");
+ } finally {
+ LOG.info("Done applying snapshot");
}
}
.tell(new EnableNotification(isLeader()), getSelf());
}
- if (getLeaderId() != null) {
- shardMBean.setLeader(getLeaderId());
- }
-
shardMBean.setRaftState(getRaftState().name());
shardMBean.setCurrentTerm(getCurrentTerm());
// If this actor is no longer the leader close all the transaction chains
if(!isLeader()){
for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
- LOG.debug("onStateChanged: Closing transaction chain {} because shard {} is no longer the leader", entry.getKey(), getId());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
+ entry.getKey(), getId());
+ }
entry.getValue().close();
}
}
}
+ @Override protected void onLeaderChanged(String oldLeader, String newLeader) {
+ shardMBean.setLeader(newLeader);
+ }
+
@Override public String persistenceId() {
return this.name.toString();
}
peerAddress);
if(peerAddresses.containsKey(peerId)){
peerAddresses.put(peerId, peerAddress);
-
- LOG.debug(
- "Sending PeerAddressResolved for peer {} with address {} to {}",
- peerId, peerAddress, actor.path());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Sending PeerAddressResolved for peer {} with address {} to {}",
+ peerId, peerAddress, actor.path());
+ }
actor
.tell(new PeerAddressResolved(peerId, peerAddress),
getSelf());
getSender().tell(new GetCompositeModificationReply(
new ImmutableCompositeModification(modification)), getSelf());
} else if (message instanceof ReceiveTimeout) {
- LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+ }
closeTransaction(false);
} else {
throw new UnknownMessageException(message);
protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
modification.addModification(
new WriteModification(message.getPath(), message.getData(),schemaContext));
- LOG.debug("writeData at path : " + message.getPath().toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("writeData at path : " + message.getPath().toString());
+ }
try {
transaction.write(message.getPath(), message.getData());
getSender().tell(new WriteDataReply().toSerializable(), getSelf());
protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
modification.addModification(
new MergeModification(message.getPath(), message.getData(), schemaContext));
- LOG.debug("mergeData at path : " + message.getPath().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("mergeData at path : " + message.getPath().toString());
+ }
try {
transaction.merge(message.getPath(), message.getData());
getSender().tell(new MergeDataReply().toSerializable(), getSelf());
}
protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
- LOG.debug("deleteData at path : " + message.getPath().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("deleteData at path : " + message.getPath().toString());
+ }
modification.addModification(new DeleteModification(message.getPath()));
try {
transaction.delete(message.getPath());
@Override public void onReceive(Object message) throws Exception {
if(message instanceof Terminated){
Terminated terminated = (Terminated) message;
- LOG.debug("Actor terminated : {}", terminated.actor());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor terminated : {}", terminated.actor());
+ }
} else if(message instanceof Monitor){
Monitor monitor = (Monitor) message;
getContext().watch(monitor.getActorRef());
private void commit(CommitTransaction message) {
// Forward the commit to the shard
- log.debug("Forward commit transaction to Shard {} ", shardActor);
+ if(log.isDebugEnabled()) {
+ log.debug("Forward commit transaction to Shard {} ", shardActor);
+ }
shardActor.forward(new ForwardedCommitTransaction(cohort, modification),
getContext());
@Override
public Void apply(Iterable<ActorPath> paths) {
cohortPaths = Lists.newArrayList(paths);
-
- LOG.debug("Tx {} successfully built cohort path list: {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} successfully built cohort path list: {}",
transactionId, cohortPaths);
+ }
return null;
}
}, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
@Override
public ListenableFuture<Boolean> canCommit() {
- LOG.debug("Tx {} canCommit", transactionId);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} canCommit", transactionId);
+ }
final SettableFuture<Boolean> returnFuture = SettableFuture.create();
// The first phase of canCommit is to gather the list of cohort actor paths that will
@Override
public void onComplete(Throwable failure, Void notUsed) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+ }
returnFuture.setException(failure);
} else {
finishCanCommit(returnFuture);
}
private void finishCanCommit(final SettableFuture<Boolean> returnFuture) {
-
- LOG.debug("Tx {} finishCanCommit", transactionId);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finishCanCommit", transactionId);
+ }
// The last phase of canCommit is to invoke all the cohort actors asynchronously to perform
// their canCommit processing. If any one fails then we'll fail canCommit.
@Override
public void onComplete(Throwable failure, Iterable<Object> responses) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+ }
returnFuture.setException(failure);
return;
}
return;
}
}
-
- LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
+ }
returnFuture.set(Boolean.valueOf(result));
}
}, actorContext.getActorSystem().dispatcher());
private Future<Iterable<Object>> invokeCohorts(Object message) {
List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohortPaths.size());
for(ActorPath actorPath : cohortPaths) {
-
- LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
+ }
ActorSelection cohort = actorContext.actorSelection(actorPath);
futureList.add(actorContext.executeRemoteOperationAsync(cohort, message));
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
final Class<?> expectedResponseClass, final boolean propagateException) {
- LOG.debug("Tx {} {}", transactionId, operationName);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} {}", transactionId, operationName);
+ }
final SettableFuture<Void> returnFuture = SettableFuture.create();
// The cohort actor list should already be built at this point by the canCommit phase but,
@Override
public void onComplete(Throwable failure, Void notUsed) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
operationName, failure);
-
+ }
if(propagateException) {
returnFuture.setException(failure);
} else {
private void finishVoidOperation(final String operationName, final Object message,
final Class<?> expectedResponseClass, final boolean propagateException,
final SettableFuture<Void> returnFuture) {
-
- LOG.debug("Tx {} finish {}", transactionId, operationName);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finish {}", transactionId, operationName);
+ }
Future<Iterable<Object>> combinedFuture = invokeCohorts(message);
combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
}
if(exceptionToPropagate != null) {
- LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
operationName, exceptionToPropagate);
-
+ }
if(propagateException) {
// We don't log the exception here to avoid redundant logging since we're
// propagating to the caller in MD-SAL core who will log it.
// Since the caller doesn't want us to propagate the exception we'll also
// not log it normally. But it's usually not good to totally silence
// exceptions so we'll log it to debug level.
- LOG.debug(String.format("%s failed", message.getClass().getSimpleName()),
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(String.format("%s failed", message.getClass().getSimpleName()),
exceptionToPropagate);
+ }
returnFuture.set(null);
}
} else {
- LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+ }
returnFuture.set(null);
}
}
new TransactionProxyCleanupPhantomReference(this);
phantomReferenceCache.put(cleanup, cleanup);
}
-
- LOG.debug("Created txn {} of type {}", identifier, transactionType);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Created txn {} of type {}", identifier, transactionType);
+ }
}
@Override
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
- LOG.debug("Tx {} read {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} read {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
return transactionContext(path).readData(path);
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Exists operation on write-only transaction is not allowed");
- LOG.debug("Tx {} exists {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} exists {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
return transactionContext(path).dataExists(path);
checkModificationState();
- LOG.debug("Tx {} write {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} write {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
transactionContext(path).writeData(path, data);
checkModificationState();
- LOG.debug("Tx {} merge {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} merge {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
transactionContext(path).mergeData(path, data);
public void delete(YangInstanceIdentifier path) {
checkModificationState();
-
- LOG.debug("Tx {} delete {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} delete {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
transactionContext(path).deleteData(path);
inReadyState = true;
- LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
remoteTransactionPaths.size());
-
+ }
List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- LOG.debug("Tx {} Readying transaction for shard {}", identifier,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Readying transaction for shard {}", identifier,
transactionContext.getShardName());
-
+ }
cohortPathFutures.add(transactionContext.readyTransaction());
}
String transactionPath = reply.getTransactionPath();
- LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
+ }
ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
if (transactionType == TransactionType.READ_ONLY) {
"Invalid reply type {} for CreateTransaction", response.getClass()));
}
} catch (Exception e) {
- LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+ }
remoteTransactionPaths
.put(shardName, new NoOpTransactionContext(shardName, e, identifier));
}
@Override
public void closeTransaction() {
- LOG.debug("Tx {} closeTransaction called", identifier);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} closeTransaction called", identifier);
+ }
actorContext.sendRemoteOperationAsync(getActor(), new CloseTransaction().toSerializable());
}
@Override
public Future<ActorPath> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
identifier, recordedOperationFutures.size());
-
+ }
// Send the ReadyTransaction message to the Tx actor.
final Future<Object> replyFuture = actorContext.executeRemoteOperationAsync(getActor(),
return combinedFutures.transform(new AbstractFunction1<Iterable<Object>, ActorPath>() {
@Override
public ActorPath apply(Iterable<Object> notUsed) {
-
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
identifier);
-
+ }
// At this point all the Futures succeeded and we need to extract the cohort
// actor path from the ReadyTransactionReply. For the recorded operations, they
// don't return any data so we're only interested that they completed
String resolvedCohortPath = getResolvedCohortPath(
reply.getCohortPath().toString());
- LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
identifier, resolvedCohortPath);
-
+ }
return actorContext.actorFor(resolvedCohortPath);
} else {
// Throwing an exception here will fail the Future.
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ }
recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
new DeleteData(path).toSerializable() ));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ }
recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
new MergeData(path, data, schemaContext).toSerializable()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ }
recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
new WriteData(path, data, schemaContext).toSerializable()));
}
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
final YangInstanceIdentifier path) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
+ }
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishReadData(path, returnFuture);
} else {
- LOG.debug("Tx {} readData: verifying {} previous recorded operations",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
-
+ }
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} readData: a recorded operation failed: {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData: a recorded operation failed: {}",
identifier, failure);
-
+ }
returnFuture.setException(new ReadFailedException(
"The read could not be performed because a previous put, merge,"
+ "or delete operation failed", failure));
private void finishReadData(final YangInstanceIdentifier path,
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
- LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
+ }
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object readResponse) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} read operation failed: {}", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+ }
returnFuture.setException(new ReadFailedException(
"Error reading data for path " + path, failure));
} else {
- LOG.debug("Tx {} read operation succeeded", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} read operation succeeded", identifier, failure);
+ }
if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,
path, readResponse);
public CheckedFuture<Boolean, ReadFailedException> dataExists(
final YangInstanceIdentifier path) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ }
final SettableFuture<Boolean> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishDataExists(path, returnFuture);
} else {
- LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
-
+ }
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
identifier, failure);
-
+ }
returnFuture.setException(new ReadFailedException(
"The data exists could not be performed because a previous "
+ "put, merge, or delete operation failed", failure));
private void finishDataExists(final YangInstanceIdentifier path,
final SettableFuture<Boolean> returnFuture) {
- LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
+ }
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object response) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+ }
returnFuture.setException(new ReadFailedException(
"Error checking data exists for path " + path, failure));
} else {
- LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+ }
if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
returnFuture.set(Boolean.valueOf(DataExistsReply.
fromSerializable(response).exists()));
@Override
public void closeTransaction() {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+ }
}
@Override
public Future<ActorPath> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called", identifier);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction called", identifier);
+ }
return akka.dispatch.Futures.failed(failure);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ }
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ }
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ }
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
YangInstanceIdentifier path) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
+ }
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error reading data for path " + path, failure));
}
@Override
public CheckedFuture<Boolean, ReadFailedException> dataExists(
YangInstanceIdentifier path) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ }
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error checking exists for path " + path, failure));
}
}
public void setDataStoreExecutor(ExecutorService dsExecutor) {
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor,
- "notification-executor", getMBeanType(), getMBeanCategory());
+ this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor);
}
public void setNotificationManager(QueuedNotificationManager<?, ?> manager) {
this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", getMBeanType(), getMBeanCategory());
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
- "data-store-executor", getMBeanType(), getMBeanCategory());
+ this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor());
}
@Override
@Override
public ThreadExecutorStats getDataStoreExecutorStats() {
- return dataStoreExecutorStatsBean.toThreadExecutorStats();
+ return dataStoreExecutorStatsBean == null ? null :
+ dataStoreExecutorStatsBean.toThreadExecutorStats();
}
@Override
if (result instanceof LocalShardFound) {
LocalShardFound found = (LocalShardFound) result;
- LOG.debug("Local shard found {}", found.getPath());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Local shard found {}", found.getPath());
+ }
return found.getPath();
}
if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
PrimaryFound found = PrimaryFound.fromSerializable(result);
- LOG.debug("Primary found {}", found.getPrimaryPath());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Primary found {}", found.getPrimaryPath());
+ }
return found.getPrimaryPath();
}
throw new PrimaryNotFoundException("Could not find primary for shardName " + shardName);
*/
public Object executeRemoteOperation(ActorSelection actor, Object message) {
- LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
- actor.toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
+ actor.toString());
+ }
Future<Object> future = ask(actor, message, operationTimeout);
try {
*/
public Future<Object> executeRemoteOperationAsync(ActorSelection actor, Object message) {
- LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
+ }
return ask(actor, message, operationTimeout);
}
System.setProperty("shard.persistent", "false");
system = ActorSystem.create("test");
+
+ deletePersistenceFiles();
}
@AfterClass
public static void tearDownClass() throws IOException {
JavaTestKit.shutdownActorSystem(system);
system = null;
+
+ deletePersistenceFiles();
}
protected static void deletePersistenceFiles() throws IOException {
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import java.io.File;
subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
getRef());
- waitForLogMessage(Logging.Debug.class, subject, "CaptureSnapshotReply received by actor");
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
+ subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
+ getRef());
+
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
}
};
- Thread.sleep(2000);
deletePersistenceFiles();
}};
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import com.google.common.collect.Iterables;
+import scala.concurrent.Future;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class InMemorySnapshotStore extends SnapshotStore {
+
+ Map<String, List<Snapshot>> snapshots = new HashMap<>();
+
+ @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+ SnapshotSelectionCriteria snapshotSelectionCriteria) {
+ List<Snapshot> snapshotList = snapshots.get(s);
+ if(snapshotList == null){
+ return Futures.successful(Option.<SelectedSnapshot>none());
+ }
+
+ Snapshot snapshot = Iterables.getLast(snapshotList);
+ SelectedSnapshot selectedSnapshot =
+ new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
+ return Futures.successful(Option.some(selectedSnapshot));
+ }
+
+ @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+ if(snapshotList == null){
+ snapshotList = new ArrayList<>();
+ snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
+ }
+ snapshotList.add(new Snapshot(snapshotMetadata, o));
+
+ return Futures.successful(null);
+ }
+
+ @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+ }
+
+ @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+ List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+ if(snapshotList == null){
+ return;
+ }
+
+ int deleteIndex = -1;
+
+ for(int i=0;i<snapshotList.size(); i++){
+ Snapshot snapshot = snapshotList.get(i);
+ if(snapshotMetadata.equals(snapshot.getMetadata())){
+ deleteIndex = i;
+ break;
+ }
+ }
+
+ if(deleteIndex != -1){
+ snapshotList.remove(deleteIndex);
+ }
+
+ }
+
+ @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+ throws Exception {
+ List<Snapshot> snapshotList = snapshots.get(s);
+
+ if(snapshotList == null){
+ return;
+ }
+
+ // TODO : This is a quick and dirty implementation. Do actual match later.
+ snapshotList.clear();
+ snapshots.remove(s);
+ }
+
+ private static class Snapshot {
+ private final SnapshotMetadata metadata;
+ private final Object data;
+
+ private Snapshot(SnapshotMetadata metadata, Object data) {
+ this.metadata = metadata;
+ this.data = data;
+ }
+
+ public SnapshotMetadata getMetadata() {
+ return metadata;
+ }
+
+ public Object getData() {
+ return data;
+ }
+ }
+}
import akka.actor.Props;
import akka.actor.UntypedActor;
import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import akka.actor.Props;
import akka.actor.UntypedActor;
import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
actor {
}
}
}
+
+in-memory-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
bounded-mailbox {
mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
*/
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import com.google.common.collect.ImmutableMap;
/**
*
//we will default to InMemoryDOMDataStore creation
configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
}
- ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
- .<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
- .put(LogicalDatastoreType.CONFIGURATION, configStore).build();
+
+ final Map<LogicalDatastoreType, DOMStore> datastores = new EnumMap<>(LogicalDatastoreType.class);
+ datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
+ datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
/*
* We use a single-threaded executor for commits with a bounded queue capacity. If the
DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
listenableFutureExecutor));
final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE);
commitStatsMXBean.registerMBean();
- final ThreadExecutorStatsMXBeanImpl commitExecutorStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(commitExecutor, "CommitExecutorStats",
+ final AbstractMXBean commitExecutorStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
JMX_BEAN_TYPE, null);
- commitExecutorStatsMXBean.registerMBean();
-
- final ThreadExecutorStatsMXBeanImpl commitFutureStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(listenableFutureExecutor,
+ final AbstractMXBean commitFutureStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
"CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
- commitFutureStatsMXBean.registerMBean();
newDataBroker.setCloseable(new AutoCloseable() {
@Override
public void close() {
commitStatsMXBean.unregisterMBean();
- commitExecutorStatsMXBean.unregisterMBean();
- commitFutureStatsMXBean.unregisterMBean();
+ if (commitExecutorStatsMXBean != null) {
+ commitExecutorStatsMXBean.unregisterMBean();
+ }
+ if (commitFutureStatsMXBean != null) {
+ commitFutureStatsMXBean.unregisterMBean();
+ }
}
});
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
* Composite DOM Transaction backed by {@link DOMStoreTransaction}.
*
abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTransaction> implements
AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- private final ImmutableMap<K, T> backingTxs;
+ private final Map<K, T> backingTxs;
private final Object identifier;
/**
* @param backingTxs
* Key,value map of backing transactions.
*/
- protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final ImmutableMap<K, T> backingTxs) {
+ protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map<K, T> backingTxs) {
this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null");
}
*/
protected final T getSubtransaction(final K key) {
Preconditions.checkNotNull(key, "key must not be null.");
- Preconditions.checkArgument(backingTxs.containsKey(key), "No subtransaction associated with %s", key);
- return backingTxs.get(key);
+
+ final T ret = backingTxs.get(key);
+ Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+ return ret;
}
/**
* Returns immutable Iterable of all subtransactions.
*
*/
- protected Iterable<T> getSubtransactions() {
+ protected Collection<T> getSubtransactions() {
return backingTxs.values();
}
protected void closeSubtransactions() {
/*
- * We share one exception for all failures, which are added
- * as supressedExceptions to it.
- *
+ * We share one exception for all failures, which are added
+ * as supressedExceptions to it.
*/
IllegalStateException failure = null;
for (T subtransaction : backingTxs.values()) {
subtransaction.close();
} catch (Exception e) {
// If we did not allocated failure we allocate it
- if(failure == null) {
- failure = new IllegalStateException("Uncaught exception occured during closing transaction.", e);
+ if (failure == null) {
+ failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
} else {
- // We update it with addotional exceptions, which occured during error.
+ // We update it with additional exceptions, which occurred during error.
failure.addSuppressed(e);
}
}
}
// If we have failure, we throw it at after all attempts to close.
- if(failure != null) {
+ if (failure != null) {
throw failure;
}
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.EnumMap;
import java.util.Map;
import java.util.Map.Entry;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
*
* Abstract composite transaction factory.
* @param <T>
* Type of {@link DOMStoreTransactionFactory} factory.
*/
-public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
-
- private final ImmutableMap<LogicalDatastoreType, T> storeTxFactories;
-
- private boolean closed;
+abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicIntegerFieldUpdater<AbstractDOMForwardedTransactionFactory> UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed");
+ private final Map<LogicalDatastoreType, T> storeTxFactories;
+ private volatile int closed = 0;
protected AbstractDOMForwardedTransactionFactory(final Map<LogicalDatastoreType, ? extends T> txFactories) {
- this.storeTxFactories = ImmutableMap.copyOf(txFactories);
+ this.storeTxFactories = new EnumMap<>(txFactories);
}
/**
*
* @return New composite read-only transaction.
*/
- public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newReadOnlyTransaction());
}
- return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), builder.build());
+ return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns);
}
-
-
/**
* Creates a new composite write-only transaction
*
* @return New composite write-only transaction associated with this
* factory.
*/
- public DOMDataWriteTransaction newWriteOnlyTransaction() {
+ public final DOMDataWriteTransaction newWriteOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
}
- return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), builder.build(),
- this);
+ return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), txns, this);
}
/**
*
* @return New composite read-write transaction associated with this
* factory.
- *
*/
- public DOMDataReadWriteTransaction newReadWriteTransaction() {
+ public final DOMDataReadWriteTransaction newReadWriteTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadWriteTransaction());
+ txns.put(store.getKey(), store.getValue().newReadWriteTransaction());
}
- return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), builder.build(), this);
+ return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this);
}
/**
}
/**
- *
* Checks if instance is not closed.
*
* @throws IllegalStateException If instance of this class was closed.
*
*/
- @GuardedBy("this")
- protected synchronized void checkNotClosed() {
- Preconditions.checkState(!closed,"Transaction factory was closed. No further operations allowed.");
+ protected final void checkNotClosed() {
+ Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
}
@Override
- @GuardedBy("this")
- public synchronized void close() {
- closed = true;
+ public void close() {
+ final boolean success = UPDATER.compareAndSet(this, 0, 1);
+ Preconditions.checkState(success, "Transaction factory was already closed");
}
-
}
+
package org.opendaylight.controller.md.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkState;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DOMStore> implements DOMDataBroker,
AutoCloseable {
private final AtomicLong chainNum = new AtomicLong();
private volatile AutoCloseable closeable;
- public DOMDataBrokerImpl(final ImmutableMap<LogicalDatastoreType, DOMStore> datastores,
+ public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
final ListeningExecutorService executor) {
super(datastores);
this.coordinator = new DOMDataCommitCoordinatorImpl(executor);
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
@Override
public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreTransactionChain> backingChainsBuilder = ImmutableMap
- .builder();
+ checkNotClosed();
+
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
- backingChainsBuilder.put(entry.getKey(), entry.getValue().createTransactionChain());
+ backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
}
- long chainId = chainNum.getAndIncrement();
- ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = backingChainsBuilder.build();
+
+ final long chainId = chainNum.getAndIncrement();
LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener,
backingChains);
return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, coordinator, listener);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
* NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which is backed
* by several {@link DOMStoreTransactionChain} differentiated by provided
implements DOMTransactionChain, DOMDataCommitErrorListener {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
+ private final AtomicLong txNum = new AtomicLong();
private final DOMDataCommitExecutor coordinator;
private final TransactionChainListener listener;
private final long chainId;
- private final AtomicLong txNum = new AtomicLong();
- @GuardedBy("this")
- private boolean failed = false;
+
+ private volatile boolean failed = false;
/**
*
* If any of arguments is null.
*/
public DOMDataBrokerTransactionChainImpl(final long chainId,
- final ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
final DOMDataCommitExecutor coordinator, final TransactionChainListener listener) {
super(chains);
this.chainId = chainId;
}
@Override
- public synchronized CheckedFuture<Void,TransactionCommitFailedException> submit(
+ public CheckedFuture<Void,TransactionCommitFailedException> submit(
final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotClosed();
+
return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
}
@Override
- public synchronized void close() {
+ public void close() {
super.close();
+
for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
subChain.close();
}
if (!failed) {
LOG.debug("Transaction chain {}Â successfully finished.", this);
+ // FIXME: this event should be emitted once all operations complete
listener.onTransactionChainSuccessful(this);
}
}
@Override
- public synchronized void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
failed = true;
LOG.debug("Transaction chain {}Â failed.", this, cause);
listener.onTransactionChainFailed(this, tx, cause);
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
-import javax.annotation.concurrent.GuardedBy;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
}
/**
- *
* Implementation of blocking three-phase commit-coordination tasks without
- * support of cancelation.
- *
+ * support of cancellation.
*/
- private static class CommitCoordinationTask implements Callable<Void> {
-
+ private static final class CommitCoordinationTask implements Callable<Void> {
+ private static final AtomicReferenceFieldUpdater<CommitCoordinationTask, CommitPhase> PHASE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase");
private final DOMDataWriteTransaction tx;
private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
private final DurationStatsTracker commitStatTracker;
private final int cohortSize;
-
- @GuardedBy("this")
- private CommitPhase currentPhase;
+ private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
final DurationStatsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
- this.currentPhase = CommitPhase.SUBMITTED;
this.commitStatTracker = commitStatTracker;
this.cohortSize = Iterables.size(cohorts);
}
@Override
public Void call() throws TransactionCommitFailedException {
+ final long startTime = commitStatTracker != null ? System.nanoTime() : 0;
- long startTime = System.nanoTime();
try {
canCommitBlocking();
preCommitBlocking();
commitBlocking();
return null;
} catch (TransactionCommitFailedException e) {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, e);
- abortBlocking(e);
+ final CommitPhase phase = currentPhase;
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e);
+ abortBlocking(e, phase);
throw e;
} finally {
- if(commitStatTracker != null) {
+ if (commitStatTracker != null) {
commitStatTracker.addDuration(System.nanoTime() - startTime);
}
}
* Exception which should be used to fail transaction for
* consumers of transaction
* future and listeners of transaction failure.
+ * @param phase phase in which the problem ensued
* @throws TransactionCommitFailedException
* on invocation of this method.
* originalCa
* @throws IllegalStateException
* if abort failed.
*/
- private void abortBlocking(final TransactionCommitFailedException originalCause)
+ private void abortBlocking(final TransactionCommitFailedException originalCause, final CommitPhase phase)
throws TransactionCommitFailedException {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, originalCause);
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, originalCause);
Exception cause = originalCause;
try {
- abortAsyncAll().get();
+ abortAsyncAll(phase).get();
} catch (InterruptedException | ExecutionException e) {
LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
cause = new IllegalStateException("Abort failed.", e);
}
/**
- *
* Invokes abort on underlying cohorts and returns future which
- * completes
- * once all abort on cohorts are completed.
+ * completes once all abort on cohorts are completed.
*
+ * @param phase phase in which the problem ensued
* @return Future which will complete once all cohorts completed
* abort.
- *
*/
- private ListenableFuture<Void> abortAsyncAll() {
- changeStateFrom(currentPhase, CommitPhase.ABORT);
+ private ListenableFuture<Void> abortAsyncAll(final CommitPhase phase) {
+ changeStateFrom(phase, CommitPhase.ABORT);
final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
int i = 0;
}
/*
- * We are returing all futures as list, not only succeeded ones in
+ * We are returning all futures as list, not only succeeded ones in
* order to fail composite future if any of them failed.
* See Futures.allAsList for this description.
*/
* @throws IllegalStateException
* If currentState of task does not match expected state
*/
- private synchronized void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
- Preconditions.checkState(currentPhase.equals(currentExpected),
- "Invalid state transition: Tx: %s current state: %s new state: %s", tx.getIdentifier(),
- currentPhase, newState);
- LOG.debug("Transaction {}: Phase {} Started ", tx.getIdentifier(), newState);
- currentPhase = newState;
- };
+ private void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
+ final boolean success = PHASE_UPDATER.compareAndSet(this, currentExpected, newState);
+ Preconditions.checkState(success, "Invalid state transition: Tx: %s expected: %s current: %s target: %s",
+ tx.getIdentifier(), currentExpected, currentPhase, newState);
+ LOG.debug("Transaction {}: Phase {} Started", tx.getIdentifier(), newState);
+ };
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
- *
* Read Only Transaction, which is composed of several
* {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by
* {@link LogicalDatastoreType} type parameter in
DOMDataReadOnlyTransaction {
protected DOMForwardedReadOnlyTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
super(identifier, backingTxs);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
public void close() {
closeSubtransactions();
}
-
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
*
* Read-Write Transaction, which is composed of several
* transactions.
*
*/
-
-class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements
- DOMDataReadWriteTransaction {
-
+final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
protected DOMForwardedReadWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs, commitImpl);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import static com.google.common.base.Preconditions.checkState;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- *
- *
* Read-Write Transaction, which is composed of several
- * {@link DOMStoreWriteTransaction} transactions. Subtransaction is selected by
+ * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by
* {@link LogicalDatastoreType} type parameter in:
*
* <ul>
* invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} for underlying
* transactions.
*
- * @param <T>
- * Subtype of {@link DOMStoreWriteTransaction} which is used as
+ * @param <T> Subtype of {@link DOMStoreWriteTransaction} which is used as
* subtransaction.
*/
class DOMForwardedWriteTransaction<T extends DOMStoreWriteTransaction> extends
AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, DOMDataCommitImplementation> IMPL_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, DOMDataCommitImplementation.class, "commitImpl");
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, Future> FUTURE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class);
+ private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
/**
- * Implementation of real commit.
- *
- * Transaction can not be commited if commitImpl is null,
- * so this seting this property to null is also used to
- * prevent write to
- * already commited / canceled transaction {@link #checkNotCanceled()
- *
- *
+ * Implementation of real commit. It also acts as an indication that
+ * the transaction is running -- which we flip atomically using
+ * {@link #IMPL_UPDATER}.
*/
- @GuardedBy("this")
private volatile DOMDataCommitImplementation commitImpl;
/**
+ * Future task of transaction commit. It starts off as null, but is
+ * set appropriately on {@link #submit()} and {@link #cancel()} via
+ * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
*
- * Future task of transaction commit.
- *
- * This value is initially null, and is once updated if transaction
- * is commited {@link #commit()}.
- * If this future exists, transaction MUST not be commited again
- * and all modifications should fail. See {@link #checkNotCommited()}.
- *
+ * Lazy set is safe for use because it is only referenced to in the
+ * {@link #cancel()} slow path, where we will busy-wait for it. The
+ * fast path gets the benefit of a store-store barrier instead of the
+ * usual store-load barrier.
*/
- @GuardedBy("this")
- private volatile CheckedFuture<Void, TransactionCommitFailedException> commitFuture;
+ private volatile Future<?> commitFuture;
protected DOMForwardedWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
+ final Map<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs);
this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
}
@Override
public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).write(path, data);
}
@Override
public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).delete(path);
}
@Override
public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).merge(path, data);
}
@Override
- public synchronized boolean cancel() {
- // Transaction is already canceled, we are safe to return true
- final boolean cancelationResult;
- if (commitImpl == null && commitFuture != null) {
- // Transaction is submitted, we try to cancel future.
- cancelationResult = commitFuture.cancel(false);
- } else if(commitImpl == null) {
+ public boolean cancel() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ if (impl != null) {
+ LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+ FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
return true;
- } else {
- cancelationResult = true;
- commitImpl = null;
}
- return cancelationResult;
+ // The transaction is in process of being submitted or cancelled. Busy-wait
+ // for the corresponding future.
+ Future<?> future;
+ do {
+ future = commitFuture;
+ } while (future == null);
+
+ return future.cancel(false);
}
@Override
- public synchronized ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
}
@Override
- public CheckedFuture<Void,TransactionCommitFailedException> submit() {
- checkNotReady();
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ checkRunning(impl);
- ImmutableList.Builder<DOMStoreThreePhaseCommitCohort> cohortsBuilder = ImmutableList.builder();
- for (DOMStoreWriteTransaction subTx : getSubtransactions()) {
- cohortsBuilder.add(subTx.ready());
- }
- ImmutableList<DOMStoreThreePhaseCommitCohort> cohorts = cohortsBuilder.build();
- commitFuture = commitImpl.submit(this, cohorts);
-
- /*
- *We remove reference to Commit Implementation in order
- *to prevent memory leak
- */
- commitImpl = null;
- return commitFuture;
- }
+ final Collection<T> txns = getSubtransactions();
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
- private void checkNotReady() {
- checkNotCommited();
- checkNotCanceled();
- }
+ // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+ for (DOMStoreWriteTransaction txn : txns) {
+ cohorts.add(txn.ready());
+ }
- private void checkNotCanceled() {
- Preconditions.checkState(commitImpl != null, "Transaction was canceled.");
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+ FUTURE_UPDATER.lazySet(this, ret);
+ return ret;
}
- private void checkNotCommited() {
- checkState(commitFuture == null, "Transaction was already submited.");
+ private void checkRunning(final DOMDataCommitImplementation impl) {
+ Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
}
-}
\ No newline at end of file
+}
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
public class DOMBrokerTest {
private SchemaContext schemaContext;
commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
domBroker = new DOMDataBrokerImpl(stores, executor);
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
Futures.addCallback( writeTx.submit(), new FutureCallback<Void>() {
@Override
- public void onSuccess( Void result ) {
+ public void onSuccess( final Void result ) {
commitCompletedLatch.countDown();
}
@Override
- public void onFailure( Throwable t ) {
+ public void onFailure( final Throwable t ) {
caughtCommitEx.set( t );
commitCompletedLatch.countDown();
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
private final CountDownLatch latch = new CountDownLatch( 1 );
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
this.change = change;
latch.countDown();
}
ExecutorService delegate;
- public CommitExecutorService( ExecutorService delegate ) {
+ public CommitExecutorService( final ExecutorService delegate ) {
this.delegate = delegate;
}
return this.bluePrint;
}
- public List<Object> collectModuleRoots(XSQLBluePrintNode table) {
+ public List<Object> collectModuleRoots(XSQLBluePrintNode table,LogicalDatastoreType type) {
if (table.getParent().isModule()) {
try {
List<Object> result = new LinkedList<Object>();
.toInstance();
DOMDataReadTransaction t = this.domDataBroker
.newReadOnlyTransaction();
- Object node = t.read(LogicalDatastoreType.OPERATIONAL,
+ Object node = t.read(type,
instanceIdentifier).get();
+
node = XSQLODLUtils.get(node, "reference");
if (node == null) {
return result;
XSQLAdapter.log(err);
}
} else {
- return collectModuleRoots(table.getParent());
+ return collectModuleRoots(table.getParent(),type);
}
return null;
}
public void execute(JDBCResultSet rs) {
List<XSQLBluePrintNode> tables = rs.getTables();
- List<Object> roots = collectModuleRoots(tables.get(0));
+ List<Object> roots = collectModuleRoots(tables.get(0),LogicalDatastoreType.OPERATIONAL);
+ roots.addAll(collectModuleRoots(tables.get(0),LogicalDatastoreType.CONFIGURATION));
+ if(roots.isEmpty()){
+ rs.setFinished(true);
+ }
XSQLBluePrintNode main = rs.getMainTable();
List<NETask> tasks = new LinkedList<XSQLAdapter.NETask>();
out.print(prompt);
char c = 0;
byte data[] = new byte[1];
- while (c != '\n') {
+ while (!socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && c != '\n') {
try {
in.read(data);
c = (char) data[0];
inputString.append(c);
} catch (Exception err) {
err.printStackTrace(out);
+ stopped = true;
+ break;
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
* to implement {@link DOMStore} contract.
*
*/
-public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
- TransactionReadyPrototype,AutoCloseable {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
private final ListenerTree listenerTree = ListenerTree.create();
private final AtomicLong txCounter = new AtomicLong(0);
- private final ListeningExecutorService listeningExecutor;
private final QueuedNotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> dataChangeListenerNotificationManager;
private final ExecutorService dataChangeListenerExecutor;
-
- private final ExecutorService domStoreExecutor;
+ private final ListeningExecutorService commitExecutor;
private final boolean debugTransactions;
private final String name;
private volatile AutoCloseable closeable;
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor) {
- this(name, domStoreExecutor, dataChangeListenerExecutor,
+ this(name, commitExecutor, dataChangeListenerExecutor,
InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false);
}
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor, final int maxDataChangeListenerQueueSize,
final boolean debugTransactions) {
this.name = Preconditions.checkNotNull(name);
- this.domStoreExecutor = Preconditions.checkNotNull(domStoreExecutor);
- this.listeningExecutor = MoreExecutors.listeningDecorator(this.domStoreExecutor);
+ this.commitExecutor = Preconditions.checkNotNull(commitExecutor);
this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
this.debugTransactions = debugTransactions;
"DataChangeListenerQueueMgr");
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
}
public ExecutorService getDomStoreExecutor() {
- return domStoreExecutor;
+ return commitExecutor;
}
@Override
@Override
public void close() {
- ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+ ExecutorServiceUtil.tryGracefulShutdown(commitExecutor, 30, TimeUnit.SECONDS);
ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
if(closeable != null) {
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction writeTx) {
- LOG.debug("Tx: {} is submitted. Modifications: {}", writeTx.getIdentifier(), writeTx.getMutatedView());
- return new ThreePhaseCommitImpl(writeTx);
+ protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ LOG.debug("Tx: {} is closed.", tx.getIdentifier());
+ }
+
+ @Override
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
+ return new ThreePhaseCommitImpl(tx, tree);
}
private Object nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private class DOMStoreTransactionChainImpl implements DOMStoreTransactionChain, TransactionReadyPrototype {
-
+ private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
+ @GuardedBy("this")
+ private SnapshotBackedWriteTransaction allocatedTransaction;
+ @GuardedBy("this")
+ private DataTreeSnapshot readySnapshot;
@GuardedBy("this")
- private SnapshotBackedWriteTransaction latestOutstandingTx;
-
private boolean chainFailed = false;
+ @GuardedBy("this")
private void checkFailed() {
Preconditions.checkState(!chainFailed, "Transaction chain is failed.");
}
- @Override
- public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
- final DataTreeSnapshot snapshot;
+ @GuardedBy("this")
+ private DataTreeSnapshot getSnapshot() {
checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
+
+ if (allocatedTransaction != null) {
+ Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier());
+ return readySnapshot;
} else {
- snapshot = dataTree.takeSnapshot();
+ return dataTree.takeSnapshot();
}
+ }
+
+ @GuardedBy("this")
+ private <T extends SnapshotBackedWriteTransaction> T recordTransaction(final T transaction) {
+ allocatedTransaction = transaction;
+ readySnapshot = null;
+ return transaction;
+ }
+
+ @Override
+ public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
+ final DataTreeSnapshot snapshot = getSnapshot();
return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot);
}
@Override
public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
- }
- final SnapshotBackedReadWriteTransaction ret = new SnapshotBackedReadWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
}
@Override
public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
+ }
+
+ @Override
+ protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ if (tx.equals(allocatedTransaction)) {
+ Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot);
+ allocatedTransaction = null;
}
- final SnapshotBackedWriteTransaction ret = new SnapshotBackedWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction tx) {
- DOMStoreThreePhaseCommitCohort storeCohort = InMemoryDOMDataStore.this.ready(tx);
- return new ChainedTransactionCommitImpl(tx, storeCohort, this);
+ protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction);
+ if (readySnapshot != null) {
+ // The snapshot should have been cleared
+ LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree);
+ }
+
+ final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree);
+ readySnapshot = tree;
+ return new ChainedTransactionCommitImpl(tx, cohort, this);
}
@Override
public void close() {
-
// FIXME: this call doesn't look right here - listeningExecutor is shared and owned
// by the outer class.
//listeningExecutor.shutdownNow();
protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
final Throwable t) {
chainFailed = true;
-
}
public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If committed transaction is latestOutstandingTx we clear
- // latestOutstandingTx
- // field in order to base new transactions on Datastore Data Tree
- // directly.
- if (transaction.equals(latestOutstandingTx)) {
- latestOutstandingTx = null;
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ if (transaction.equals(allocatedTransaction)) {
+ if (readySnapshot == null) {
+ LOG.warn("Transaction {} committed while no ready snapshot present", transaction);
+ }
+
+ allocatedTransaction = null;
+ readySnapshot = null;
}
}
-
}
private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DOMStoreThreePhaseCommitCohort delegate;
-
private final DOMStoreTransactionChainImpl txChain;
protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- super();
this.transaction = transaction;
this.delegate = delegate;
this.txChain = txChain;
public void onSuccess(final Void result) {
txChain.onTransactionCommited(transaction);
}
-
});
return commitFuture;
}
-
}
private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DataTreeModification modification;
private ResolveDataChangeEventsTask listenerResolver;
private DataTreeCandidate candidate;
- public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction) {
+ public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
this.transaction = writeTransaction;
- this.modification = transaction.getMutatedView();
+ this.modification = modification;
}
@Override
public ListenableFuture<Boolean> canCommit() {
- return listeningExecutor.submit(new Callable<Boolean>() {
+ return commitExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws TransactionCommitFailedException {
try {
@Override
public ListenableFuture<Void> preCommit() {
- return listeningExecutor.submit(new Callable<Void>() {
+ return commitExecutor.submit(new Callable<Void>() {
@Override
public Void call() {
candidate = dataTree.prepare(modification);
* The commit has to occur atomically with regard to listener
* registrations.
*/
- synchronized (this) {
+ synchronized (InMemoryDOMDataStore.this) {
dataTree.commit(candidate);
listenerResolver.resolve(dataChangeListenerNotificationManager);
}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
@Nullable final InMemoryDOMDataStoreConfigProperties properties) {
InMemoryDOMDataStoreConfigProperties actualProperties = properties;
- if(actualProperties == null) {
+ if (actualProperties == null) {
actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault();
}
// task execution time to get higher throughput as DataChangeListeners typically provide
// much of the business logic for a data model. If the executor queue size limit is reached,
// subsequent submitted notifications will block the calling thread.
-
int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize();
int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize();
ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
- ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- actualProperties.getMaxDataStoreExecutorQueueSize(), "DOMStore-" + name );
-
- InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
- domStoreExecutor, dataChangeListenerExecutor,
+ final ListeningExecutorService commitExecutor = MoreExecutors.sameThreadExecutor();
+ final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+ commitExecutor, dataChangeListenerExecutor,
actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions);
- if(schemaService != null) {
+ if (schemaService != null) {
schemaService.registerSchemaContextListener(dataStore);
}
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
-
import java.util.Collection;
import java.util.Map.Entry;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
Preconditions.checkArgument(node.getDataAfter().isPresent(),
"Modification at {} has type {} but no after-data", state.getPath(), node.getModificationType());
if (!node.getDataBefore().isPresent()) {
- resolveCreateEvent(state, node.getDataAfter().get());
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ final NormalizedNode<PathArgument, ?> afterNode = (NormalizedNode)node.getDataAfter().get();
+ resolveSameEventRecursivelly(state, afterNode, DOMImmutableDataChangeEvent.getCreateEventFactory());
return true;
}
case DELETE:
Preconditions.checkArgument(node.getDataBefore().isPresent(),
"Modification at {} has type {} but no before-data", state.getPath(), node.getModificationType());
- resolveDeleteEvent(state, node.getDataBefore().get());
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ final NormalizedNode<PathArgument, ?> beforeNode = (NormalizedNode)node.getDataBefore().get();
+ resolveSameEventRecursivelly(state, beforeNode, DOMImmutableDataChangeEvent.getRemoveEventFactory());
return true;
case UNMODIFIED:
return false;
return true;
}
- /**
- * Resolves create events deep down the interest listener tree.
- *
- * @param path
- * @param listeners
- * @param afterState
- * @return
- */
- private void resolveCreateEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> afterState) {
- @SuppressWarnings({ "unchecked", "rawtypes" })
- final NormalizedNode<PathArgument, ?> node = (NormalizedNode) afterState;
- resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getCreateEventFactory());
- }
-
- private void resolveDeleteEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> beforeState) {
- @SuppressWarnings({ "unchecked", "rawtypes" })
- final NormalizedNode<PathArgument, ?> node = (NormalizedNode) beforeState;
- resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getRemoveEventFactory());
- }
-
private void resolveSameEventRecursivelly(final ResolveDataChangeState state,
final NormalizedNode<PathArgument, ?> node, final SimpleEventFactory eventFactory) {
if (!state.needsProcessing()) {
Preconditions.checkArgument(modification.getDataBefore().isPresent(), "Subtree change with before-data not present at path %s", state.getPath());
Preconditions.checkArgument(modification.getDataAfter().isPresent(), "Subtree change with after-data not present at path %s", state.getPath());
+ if (!state.needsProcessing()) {
+ LOG.trace("Not processing modified subtree {}", state.getPath());
+ return true;
+ }
+
DataChangeScope scope = null;
for (DataTreeCandidateNode childMod : modification.getChildNodes()) {
final ResolveDataChangeState childState = state.child(childMod.getIdentifier());
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkNotNull;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* and executed according to {@link TransactionReadyPrototype}.
*
*/
-class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
- implements DOMStoreReadWriteTransaction {
-
+final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
/**
LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
checkNotNull(path, "Path must not be null.");
- DataTreeModification dataView = getMutatedView();
- if(dataView == null) {
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
- }
-
+ final Optional<NormalizedNode<?, ?>> result;
try {
- return Futures.immediateCheckedFuture(dataView.readNode(path));
+ result = readSnapshotNode(path);
} catch (Exception e) {
LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e));
+ }
+
+ if (result == null) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
+ } else {
+ return Futures.immediateCheckedFuture(result);
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*
*/
class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
-
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
- private DataTreeModification mutableTree;
- private boolean ready = false;
- private TransactionReadyPrototype readyImpl;
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
+
+ // non-null when not ready
+ private volatile TransactionReadyPrototype readyImpl;
+ // non-null when not committed/closed
+ private volatile DataTreeModification mutableTree;
/**
* Creates new write-only transaction.
public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
super(identifier, debug);
- mutableTree = snapshot.newModification();
this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
+ mutableTree = snapshot.newModification();
LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot);
}
- @Override
- public void close() {
- LOG.debug("Store transaction: {} : Closed", getIdentifier());
- this.mutableTree = null;
- this.readyImpl = null;
- }
-
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
- mutableTree.write(path, data);
+ tree.write(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
- mutableTree.merge(path, data);
+ tree.merge(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void delete(final YangInstanceIdentifier path) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
+
try {
- LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
- mutableTree.delete(path);
+ tree.delete(path);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, mutableTree, e);
+ LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
}
}
- protected final boolean isReady() {
- return ready;
+ /**
+ * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does
+ * not allow data access after the transaction has been closed or readied.
+ *
+ * @param path Path to read
+ * @return null if the the transaction has been closed;
+ */
+ protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+ return readyImpl == null ? null : mutableTree.readNode(path);
}
- protected final void checkNotReady() {
- checkState(!ready, "Transaction %s is ready. No further modifications allowed.", getIdentifier());
+ private final void checkNotReady() {
+ checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.", getIdentifier());
}
@Override
- public synchronized DOMStoreThreePhaseCommitCohort ready() {
- checkState(!ready, "Transaction %s is already ready.", getIdentifier());
- ready = true;
+ public DOMStoreThreePhaseCommitCohort ready() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
+
LOG.debug("Store transaction: {} : Ready", getIdentifier());
- mutableTree.ready();
- return readyImpl.ready(this);
+
+ final DataTreeModification tree = mutableTree;
+ TREE_UPDATER.lazySet(this, null);
+ tree.ready();
+ return wasReady.transactionReady(this, tree);
}
- protected DataTreeModification getMutatedView() {
- return mutableTree;
+ @Override
+ public void close() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ if (wasReady != null) {
+ LOG.debug("Store transaction: {} : Closed", getIdentifier());
+ TREE_UPDATER.lazySet(this, null);
+ wasReady.transactionAborted(this);
+ } else {
+ LOG.debug("Store transaction: {} : Closed after submit", getIdentifier());
+ }
}
@Override
protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("ready", isReady());
+ return toStringHelper.add("ready", readyImpl == null);
}
/**
* providing underlying logic for applying implementation.
*
*/
- public static interface TransactionReadyPrototype {
+ abstract static class TransactionReadyPrototype {
+ /**
+ * Called when a transaction is closed without being readied. This is not invoked for
+ * transactions which are ready.
+ *
+ * @param tx Transaction which got aborted.
+ */
+ protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
/**
* Returns a commit coordinator associated with supplied transactions.
*
* @param tx
* Transaction on which ready was invoked.
+ * @param tree
+ * Modified data tree which has been constructed.
* @return DOMStoreThreePhaseCommitCohort associated with transaction
*/
- DOMStoreThreePhaseCommitCohort ready(SnapshotBackedWriteTransaction tx);
+ protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
}
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl.jmx;
import java.util.concurrent.ExecutorService;
-
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
*/
public class InMemoryDataStoreStats implements AutoCloseable {
- private final ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean;
- private final ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean;
+ private final AbstractMXBean notificationExecutorStatsBean;
+ private final AbstractMXBean dataStoreExecutorStatsBean;
private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
- public InMemoryDataStoreStats(String mBeanType, QueuedNotificationManager<?, ?> manager,
- ExecutorService dataStoreExecutor) {
+ public InMemoryDataStoreStats(final String mBeanType, final QueuedNotificationManager<?, ?> manager,
+ final ExecutorService dataStoreExecutor) {
- this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
+ notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", mBeanType, null);
notificationManagerStatsBean.registerMBean();
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
"notification-executor", mBeanType, null);
- this.notificationExecutorStatsBean.registerMBean();
+ if (notificationExecutorStatsBean != null) {
+ notificationExecutorStatsBean.registerMBean();
+ }
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dataStoreExecutor,
+ dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dataStoreExecutor,
"data-store-executor", mBeanType, null);
- this.dataStoreExecutorStatsBean.registerMBean();
+ if (dataStoreExecutorStatsBean != null) {
+ dataStoreExecutorStatsBean.registerMBean();
+ }
}
@Override
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
-
+import java.net.URI;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
-
import org.opendaylight.controller.netconf.client.NetconfClientSession;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
return fromStrings(session.getServerCapabilities());
}
- private static final QName cachedQName(String namespace, String revision, String moduleName) {
+ private static QName cachedQName(final String namespace, final String revision, final String moduleName) {
return QName.cachedReference(QName.create(namespace, revision, moduleName));
}
+ private static QName cachedQName(final String namespace, final String moduleName) {
+ return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
+ }
+
public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
final Set<QName> moduleBasedCaps = new HashSet<>();
final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
String revision = REVISION_PARAM.from(queryParams);
if (revision != null) {
- moduleBasedCaps.add(cachedQName(namespace, revision, moduleName));
- nonModuleCaps.remove(capability);
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
continue;
}
* We have seen devices which mis-escape revision, but the revision may not
* even be there. First check if there is a substring that matches revision.
*/
- if (!Iterables.any(queryParams, CONTAINS_REVISION)) {
+ if (Iterables.any(queryParams, CONTAINS_REVISION)) {
+
+ LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
+ revision = BROKEN_REVISON_PARAM.from(queryParams);
+ if (revision == null) {
+ LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
+ } else {
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
+ }
continue;
}
- LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
- revision = BROKEN_REVISON_PARAM.from(queryParams);
- if (revision == null) {
- LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
- }
-
- // FIXME: do we really want to continue here?
- moduleBasedCaps.add(cachedQName(namespace, revision, moduleName));
- nonModuleCaps.remove(capability);
+ // Fallback, no revision provided for module
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
}
return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
}
+
+
+ private static void addModuleQName(final Set<QName> moduleBasedCaps, final Set<String> nonModuleCaps, final String capability, final QName qName) {
+ moduleBasedCaps.add(qName);
+ nonModuleCaps.remove(capability);
+ }
}
assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
}
+ @Test
+ public void testCapabilityNoRevision() throws Exception {
+ final List<String> caps1 = Lists.newArrayList(
+ "namespace:2?module=module2",
+ "namespace:2?module=module2&revision=2012-12-12",
+ "namespace:2?module=module1&RANDOMSTRING;revision=2013-12-12",
+ "namespace:2?module=module2&RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
+ );
+
+ final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ assertCaps(sessionCaps1, 0, 3);
+ }
+
private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
Thread.currentThread().getContextClassLoader());
Config actorSystemConfig = config.get();
- LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render());
+ }
if (config.isMetricCaptureEnabled()) {
LOG.info("Instrumentation is enabled in actor system {}. Metrics can be viewed in JMX console.",
config.getActorSystemName());
* @param announcements
*/
private void announce(Set<RpcRouter.RouteIdentifier<?, ?, ?>> announcements) {
- LOG.debug("Announcing [{}]", announcements);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Announcing [{}]", announcements);
+ }
RpcRegistry.Messages.AddOrUpdateRoutes addRpcMsg = new RpcRegistry.Messages.AddOrUpdateRoutes(new ArrayList<>(announcements));
rpcRegistry.tell(addRpcMsg, ActorRef.noSender());
}
* @param removals
*/
private void remove(Set<RpcRouter.RouteIdentifier<?, ?, ?>> removals){
- LOG.debug("Removing [{}]", removals);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Removing [{}]", removals);
+ }
RpcRegistry.Messages.RemoveRoutes removeRpcMsg = new RpcRegistry.Messages.RemoveRoutes(new ArrayList<>(removals));
rpcRegistry.tell(removeRpcMsg, ActorRef.noSender());
}
}
private void invokeRemoteRpc(final InvokeRpc msg) {
- LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
+ }
RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(
null, msg.getRpc(), msg.getIdentifier());
RpcRegistry.Messages.FindRouters findMsg = new RpcRegistry.Messages.FindRouters(routeId);
}
private void executeRpc(final ExecuteRpc msg) {
- LOG.debug("Executing rpc {}", msg.getRpc());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Executing rpc {}", msg.getRpc());
+ }
Future<RpcResult<CompositeNode>> future = brokerSession.rpc(msg.getRpc(),
XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(),
schemaContext));
@Override
public void onRpcImplementationAdded(QName rpc) {
- LOG.debug("Adding registration for [{}]", rpc);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Adding registration for [{}]", rpc);
+ }
RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
routeIds.add(routeId);
@Override
public void onRpcImplementationRemoved(QName rpc) {
- LOG.debug("Removing registration for [{}]", rpc);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Removing registration for [{}]", rpc);
+ }
RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
routeIds.add(routeId);
@Override public void onReceive(Object message) throws Exception {
if(message instanceof Terminated){
Terminated terminated = (Terminated) message;
- LOG.debug("Actor terminated : {}", terminated.actor());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor terminated : {}", terminated.actor());
+ }
}else if(message instanceof Monitor){
Monitor monitor = (Monitor) message;
getContext().watch(monitor.getActorRef());
receiveUpdateRemoteBuckets(
((UpdateRemoteBuckets) message).getBuckets());
} else {
- log.debug("Unhandled message [{}]", message);
+ if(log.isDebugEnabled()) {
+ log.debug("Unhandled message [{}]", message);
+ }
unhandled(message);
}
}
versions.put(entry.getKey(), remoteVersion);
}
}
-
- log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+ if(log.isDebugEnabled()) {
+ log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+ }
}
///
}
clusterMembers.remove(member.address());
- log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+ if(log.isDebugEnabled()) {
+ log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+ }
}
/**
if (!clusterMembers.contains(member.address()))
clusterMembers.add(member.address());
-
- log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+ if(log.isDebugEnabled()) {
+ log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+ }
}
/**
Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
remoteMemberToGossipTo = clusterMembers.get(randomIndex);
}
-
- log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+ if(log.isDebugEnabled()) {
+ log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+ }
getLocalStatusAndSendTo(remoteMemberToGossipTo);
}
void receiveGossip(GossipEnvelope envelope){
//TODO: Add more validations
if (!selfAddress.equals(envelope.to())) {
- log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+ if(log.isDebugEnabled()) {
+ log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+ }
return;
}
ActorSelection remoteRef = getContext().system().actorSelection(
remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress());
- log.debug("Sending bucket versions to [{}]", remoteRef);
+ if(log.isDebugEnabled()) {
+ log.debug("Sending bucket versions to [{}]", remoteRef);
+ }
futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher());
public Void apply(Object msg) {
if (msg instanceof GetBucketsByMembersReply) {
Map<Address, Bucket> buckets = ((GetBucketsByMembersReply) msg).getBuckets();
- log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+ if(log.isDebugEnabled()) {
+ log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+ }
GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets);
sender.tell(envelope, getSelf());
}
@GET
@Path("/modules")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModules(@Context UriInfo uriInfo);
@GET
@Path("/modules/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/modules/module/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/operations")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getOperations(@Context UriInfo uriInfo);
@GET
@Path("/operations/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getOperations(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/streams")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getAvailableStreams(@Context UriInfo uriInfo);
LOG.debug("In toResponse: {}", exception.getMessage());
- // Default to the content type if there's no Accept header
- MediaType mediaType = headers.getMediaType();
List<MediaType> accepts = headers.getAcceptableMediaTypes();
+ accepts.remove(MediaType.WILDCARD_TYPE);
LOG.debug("Accept headers: {}", accepts);
+ final MediaType mediaType;
if (accepts != null && accepts.size() > 0) {
mediaType = accepts.get(0); // just pick the first one
+ } else {
+ // Default to the content type if there's no Accept header
+ mediaType = MediaType.APPLICATION_JSON_TYPE;
}
LOG.debug("Using MediaType: {}", mediaType);
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-config</artifactId>
+ <packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/20-clustering-test-app.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/modules.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider">
+ prefix:clustering-it-provider
+ </type>
+ <name>clustering-it-provider</name>
+
+ <rpc-registry>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <name>binding-rpc-broker</name>
+ </rpc-registry>
+ <data-broker>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
+ </data-broker>
+ <notification-service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
+ binding:binding-notification-service
+ </type>
+ <name>binding-notification-broker</name>
+ </notification-service>
+ </module>
+ </modules>
+ </data>
+
+ </configuration>
+
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider?module=clustering-it-provider&revision=2014-08-19</capability>
+
+ </required-capabilities>
+
+</snapshot>
+
--- /dev/null
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+# name = "<friendly_name_of_the_module>"
+# shards = [
+# {
+# name="<any_name_that_is_unique_for_the_module>"
+# replicas = [
+# "<name_of_member_on_which_to_run>"
+# ]
+# ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+ {
+ name = "default"
+ shards = [
+ {
+ name="default"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "topology"
+ shards = [
+ {
+ name="topology"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "inventory"
+ shards = [
+ {
+ name="inventory"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "toaster"
+ shards = [
+ {
+ name="toaster"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car"
+ shards = [
+ {
+ name="car"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "people"
+ shards = [
+ {
+ name="people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car-people"
+ shards = [
+ {
+ name="car-people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+
+]
--- /dev/null
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+# name = "<friendly_name_of_module>"
+# namespace = "<the yang namespace of the module>"
+# shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+ {
+ name = "inventory"
+ namespace = "urn:opendaylight:inventory"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "topology"
+ namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "toaster"
+ namespace = "http://netconfcentral.org/ns/toaster"
+ shard-strategy = "module"
+ },
+ {
+ name = "car"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+ shard-strategy = "module"
+ }
+ {
+ name = "people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+ shard-strategy = "module"
+ }
+
+ {
+ name = "car-people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+ shard-strategy = "module"
+ }
+]
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-model</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>org.opendaylight.controller.sal-clustering-it-model</Bundle-Name>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/generated-sources/sal</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!--This plugin's configuration is used to store Eclipse
+ m2e settings only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ <version>${ietf-inet-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ <version>${ietf-yang-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ <version>${yang-ext.version}</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+module car-people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "c"; revision-date 2014-08-18; }
+ import people { prefix "people"; revision-date 2014-08-18; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ container car-people {
+ description
+ "Top-level container for all people car map";
+
+ list car-person {
+ key "car-id person-id";
+ description "A mapping of cars and people.";
+ leaf car-id {
+ type c:car-id;
+ }
+
+ leaf person-id {
+ type people:person-id;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car-purchase {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
+
+ prefix cp;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "car"; revision-date 2014-08-18; }
+ import people { prefix "person"; revision-date 2014-08-18; }
+ import yang-ext {prefix "ext"; revision-date "2013-07-09";}
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car purchase for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ rpc buy-car {
+ description
+ "buy a new car";
+ input {
+ leaf person {
+ ext:context-reference "person:person-context";
+ type person:person-ref;
+ description "A reference to a particular person.";
+ }
+
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+ }
+
+ notification carBought {
+ description
+ "Indicates that a person bought a car.";
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef car-id {
+ type inet:uri;
+ description "An identifier for car entry.";
+ }
+
+ grouping car-entry {
+ description "Describes the contents of a car entry -
+ Details of the car manufacturer, model etc";
+ leaf id {
+ type car-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf model {
+ type string;
+ }
+ leaf manufacturer {
+ type string;
+ }
+
+ leaf year {
+ type uint32;
+ }
+
+ leaf category {
+ type string;
+ }
+ }
+
+ container cars {
+ description
+ "Top-level container for all car objects.";
+ list car-entry {
+ key "id";
+ description "A list of cars (as defined by the 'grouping car-entry').";
+ uses car-entry;
+ }
+ }
+
+
+}
\ No newline at end of file
--- /dev/null
+module people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people";
+
+ prefix people;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for person for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef person-id {
+ type inet:uri;
+ description "An identifier for person.";
+ }
+
+ typedef person-ref {
+ type instance-identifier;
+ description "A reference that points to an people:people/person in the data tree.";
+ }
+ identity person-context {
+ description "A person-context is a classifier for person elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
+ }
+
+ grouping person {
+ description "Describes the details of the person";
+
+ leaf id {
+ type person-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf gender {
+ type string;
+ }
+
+ leaf age {
+ type uint32;
+ }
+
+ leaf address {
+ type string;
+ }
+
+ leaf contactNo {
+ type string;
+ }
+ }
+
+ container people {
+ description
+ "Top-level container for all people";
+
+ list person {
+ key "id";
+ description "A list of people (as defined by the 'grouping person').";
+ uses person;
+ }
+ }
+
+ rpc add-person {
+ description
+ "Add a person entry into database";
+ input {
+ uses person;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sal-samples</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it</artifactId>
+ <packaging>pom</packaging>
+ <modules>
+ <module>configuration</module>
+ <module>model</module>
+ <module>provider</module>
+ </modules>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-provider</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.controller.config.yang.config.clustering_it_provider</Export-Package>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-util</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>equinoxSDK381</groupId>
+ <artifactId>org.eclipse.osgi</artifactId>
+ <version>3.8.1.v20120830-144521</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.listener;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class PeopleCarListener implements CarPurchaseListener {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleCarListener.class);
+
+ private DataBroker dataProvider;
+
+
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+ @Override
+ public void onCarBought(CarBought notification) {
+ log.info("onCarBought notification : Adding car person entry");
+
+ final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
+ carPersonBuilder.setCarId(notification.getCarId());
+ carPersonBuilder.setPersonId(notification.getPersonId());
+ CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
+ carPersonBuilder.setKey(key);
+ final CarPerson carPerson = carPersonBuilder.build();
+
+ InstanceIdentifier<CarPerson> carPersonIId =
+ InstanceIdentifier.<CarPeople>builder(CarPeople.class).child(CarPerson.class, carPerson.getKey()).build();
+
+
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("Car bought, entry added to map of people and car [{}]", carPerson);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("Car bought, Failed entry addition to map of people and car [{}]", carPerson);
+ }
+ });
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+public class PeopleProvider implements PeopleService, AutoCloseable {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleProvider.class);
+
+ private DataBroker dataProvider;
+
+ private BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration;
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+
+ public void setRpcRegistration(BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration) {
+ this.rpcRegistration = rpcRegistration;
+ }
+
+ @Override
+ public Future<RpcResult<Void>> addPerson(AddPersonInput input) {
+ log.info("RPC addPerson : adding person [{}]", input);
+
+ PersonBuilder builder = new PersonBuilder(input);
+ final Person person = builder.build();
+ final SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+
+ // Each entry will be identifiable by a unique key, we have to create that identifier
+ final InstanceIdentifier.InstanceIdentifierBuilder<Person> personIdBuilder =
+ InstanceIdentifier.<People>builder(People.class)
+ .child(Person.class, person.getKey());
+ final InstanceIdentifier personId = personIdBuilder.build();
+ // Place entry in data store tree
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, personId, person);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("RPC addPerson : person added successfully [{}]", person);
+ rpcRegistration.registerPath(PersonContext.class, personId);
+ log.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("RPC addPerson : person addition failed [{}]", person);
+ futureResult.set(RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, t.getMessage()).build());
+ }
+ });
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+
+public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable{
+
+ private static final Logger log = LoggerFactory.getLogger(PurchaseCarProvider.class);
+
+ private NotificationProviderService notificationProvider;
+
+
+ public void setNotificationProvider(final NotificationProviderService salService) {
+ this.notificationProvider = salService;
+ }
+
+
+ @Override
+ public Future<RpcResult<Void>> buyCar(BuyCarInput input) {
+ log.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+ SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+ CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder();
+ carBoughtBuilder.setCarId(input.getCarId());
+ carBoughtBuilder.setPersonId(input.getPersonId());
+ notificationProvider.publish(carBoughtBuilder.build());
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+
+
+import org.opendaylight.controller.clustering.it.listener.PeopleCarListener;
+import org.opendaylight.controller.clustering.it.provider.PeopleProvider;
+import org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+public class ClusteringItProviderModule extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModule {
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.clustering_it_provider.ClusteringItProviderModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ DataBroker dataBrokerService = getDataBrokerDependency();
+ NotificationProviderService notificationProvider = getNotificationServiceDependency();
+
+ // Add routed RPC registration for car purchase
+ final PurchaseCarProvider purchaseCar = new PurchaseCarProvider();
+ purchaseCar.setNotificationProvider(notificationProvider);
+
+ final BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> purchaseCarRpc = getRpcRegistryDependency()
+ .addRoutedRpcImplementation(CarPurchaseService.class, purchaseCar);
+
+ // Add people provider registration
+ final PeopleProvider people = new PeopleProvider();
+ people.setDataProvider(dataBrokerService);
+
+ people.setRpcRegistration(purchaseCarRpc);
+
+ final BindingAwareBroker.RpcRegistration<PeopleService> peopleRpcReg = getRpcRegistryDependency()
+ .addRpcImplementation(PeopleService.class, people);
+
+
+
+ final PeopleCarListener peopleCarListener = new PeopleCarListener();
+ peopleCarListener.setDataProvider(dataBrokerService);
+
+ final ListenerRegistration<NotificationListener> listenerReg =
+ getNotificationServiceDependency().registerNotificationListener( peopleCarListener );
+
+ // Wrap toaster as AutoCloseable and close registrations to md-sal at
+ // close()
+ final class AutoCloseableToaster implements AutoCloseable {
+
+ @Override
+ public void close() throws Exception {
+ peopleRpcReg.close();
+ purchaseCarRpc.close();
+ people.close();
+ purchaseCar.close();
+ listenerReg.close();
+ }
+ }
+
+ AutoCloseable ret = new AutoCloseableToaster();
+ return ret;
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: clustering-it-provider yang module local name: clustering-it-provider
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Aug 19 14:44:46 PDT 2014
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+public class ClusteringItProviderModuleFactory extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModuleFactory {
+
+}
--- /dev/null
+module clustering-it-provider {
+
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider";
+ prefix "clustering-it-provider";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; }
+
+ description
+ "This module contains the base YANG definitions for
+ clustering-it-provider implementation.";
+
+ revision "2014-08-19" {
+ description
+ "Initial revision.";
+ }
+
+ // This is the definition of the service implementation as a module identity.
+ identity clustering-it-provider {
+ base config:module-type;
+
+ // Specifies the prefix for generated java classes.
+ config:java-name-prefix ClusteringItProvider;
+ }
+
+ // Augments the 'configuration' choice node under modules/module.
+ augment "/config:modules/config:module/config:configuration" {
+ case clustering-it-provider {
+ when "/config:modules/config:module/config:type = 'clustering-it-provider'";
+
+ container rpc-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-rpc-registry;
+ }
+ }
+ }
+
+ container notification-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-notification-service;
+ }
+ }
+ }
+
+ container data-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity mdsal:binding-async-data-broker;
+ }
+ }
+ }
+ }
+ }
+}
<module>toaster-provider</module>
<module>toaster-config</module>
<module>l2switch</module>
+ <module>clustering-test-app</module>
</modules>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<artifactId>org.osgi.core</artifactId>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
-
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
- private final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
private final InstanceIdentifier<Topology> topology;
private final OperationProcessor processor;
- FlowCapableTopologyExporter(final OperationProcessor processor, final InstanceIdentifier<Topology> topology) {
+ FlowCapableTopologyExporter(final OperationProcessor processor,
+ final InstanceIdentifier<Topology> topology) {
this.processor = Preconditions.checkNotNull(processor);
this.topology = Preconditions.checkNotNull(topology);
}
processor.enqueueOperation(new TopologyOperation() {
@Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- removeAffectedLinks(nodeId);
+ public void applyOperation(ReadWriteTransaction transaction) {
+ removeAffectedLinks(nodeId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
}
- });
- processor.enqueueOperation(new TopologyOperation() {
@Override
- public void applyOperation(ReadWriteTransaction transaction) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+ public String toString() {
+ return "onNodeRemoved";
}
});
}
final InstanceIdentifier<Node> path = getNodePath(toTopologyNodeId(notification.getId()));
transaction.merge(LogicalDatastoreType.OPERATIONAL, path, node, true);
}
+
+ @Override
+ public String toString() {
+ return "onNodeUpdated";
+ }
});
}
}
@Override
public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
- final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(notification
- .getNodeConnectorRef());
+ final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(
+ notification.getNodeConnectorRef());
- processor.enqueueOperation(new TopologyOperation() {
- @Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- final TpId tpId = toTerminationPointId(getNodeConnectorKey(notification.getNodeConnectorRef()).getId());
- removeAffectedLinks(tpId);
- }
- });
+ final TpId tpId = toTerminationPointId(getNodeConnectorKey(
+ notification.getNodeConnectorRef()).getId());
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(ReadWriteTransaction transaction) {
+ removeAffectedLinks(tpId, transaction);
transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
}
+
+ @Override
+ public String toString() {
+ return "onNodeConnectorRemoved";
+ }
});
}
@Override
public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
- final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(FlowCapableNodeConnectorUpdated.class);
+ final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(
+ FlowCapableNodeConnectorUpdated.class);
if (fcncu != null) {
processor.enqueueOperation(new TopologyOperation() {
@Override
transaction.merge(LogicalDatastoreType.OPERATIONAL, path, point, true);
if ((fcncu.getState() != null && fcncu.getState().isLinkDown())
|| (fcncu.getConfiguration() != null && fcncu.getConfiguration().isPORTDOWN())) {
- removeAffectedLinks(point.getTpId());
+ removeAffectedLinks(point.getTpId(), transaction);
}
}
+
+ @Override
+ public String toString() {
+ return "onNodeConnectorUpdated";
+ }
});
}
}
final InstanceIdentifier<Link> path = linkPath(link);
transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
}
+
+ @Override
+ public String toString() {
+ return "onLinkDiscovered";
+ }
});
}
public void applyOperation(final ReadWriteTransaction transaction) {
transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
}
+
+ @Override
+ public String toString() {
+ return "onLinkRemoved";
+ }
});
}
return tpPath(toTopologyNodeId(invNodeKey.getId()), toTerminationPointId(invNodeConnectorKey.getId()));
}
- private void removeAffectedLinks(final NodeId id) {
- processor.enqueueOperation(new TopologyOperation() {
+ private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
+ CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+ transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+ Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
@Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- if (topologyOptional.isPresent()) {
- List<Link> linkList = topologyOptional.get().getLink() != null
- ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
- for (Link link : linkList) {
- if (id.equals(link.getSource().getSourceNode()) || id.equals(link.getDestination().getDestNode())) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
- }
- }
- }
- }
+ public void onSuccess(Optional<Topology> topologyOptional) {
+ removeAffectedLinks(id, topologyOptional);
+ }
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
- }
- });
+ @Override
+ public void onFailure(Throwable throwable) {
+ LOG.error("Error reading topology data for topology {}", topology, throwable);
}
});
}
- private void removeAffectedLinks(final TpId id) {
- processor.enqueueOperation(new TopologyOperation() {
- @Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- if (topologyOptional.isPresent()) {
- List<Link> linkList = topologyOptional.get().getLink() != null
- ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
- for (Link link : linkList) {
- if (id.equals(link.getSource().getSourceTp()) || id.equals(link.getDestination().getDestTp())) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
- }
- }
- }
- }
+ private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional) {
+ if (!topologyOptional.isPresent()) {
+ return;
+ }
+
+ List<Link> linkList = topologyOptional.get().getLink() != null ?
+ topologyOptional.get().getLink() : Collections.<Link> emptyList();
+ final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+ for (Link link : linkList) {
+ if (id.equals(link.getSource().getSourceNode()) ||
+ id.equals(link.getDestination().getDestNode())) {
+ linkIDsToDelete.add(linkPath(link));
+ }
+ }
+
+ enqueueLinkDeletes(linkIDsToDelete);
+ }
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
+ private void enqueueLinkDeletes(final Collection<InstanceIdentifier<Link>> linkIDsToDelete) {
+ if(!linkIDsToDelete.isEmpty()) {
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(ReadWriteTransaction transaction) {
+ for(InstanceIdentifier<Link> linkID: linkIDsToDelete) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID);
}
- });
+ }
+
+ @Override
+ public String toString() {
+ return "Delete Links " + linkIDsToDelete.size();
+ }
+ });
+ }
+ }
+
+ private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
+ CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+ transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+ Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
+ @Override
+ public void onSuccess(Optional<Topology> topologyOptional) {
+ removeAffectedLinks(id, topologyOptional);
+ }
+
+ @Override
+ public void onFailure(Throwable throwable) {
+ LOG.error("Error reading topology data for topology {}", topology, throwable);
}
});
}
+ private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional) {
+ if (!topologyOptional.isPresent()) {
+ return;
+ }
+
+ List<Link> linkList = topologyOptional.get().getLink() != null
+ ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
+ final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+ for (Link link : linkList) {
+ if (id.equals(link.getSource().getSourceTp()) ||
+ id.equals(link.getDestination().getDestTp())) {
+ linkIDsToDelete.add(linkPath(link));
+ }
+ }
+
+ enqueueLinkDeletes(linkIDsToDelete);
+ }
+
private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
return topology.child(Node.class, new NodeKey(nodeId));
}
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
+
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
+
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
for (; ; ) {
TopologyOperation op = queue.take();
- LOG.debug("New operations available, starting transaction");
- final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
+ LOG.debug("New {} operation available, starting transaction", op);
+ final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
int ops = 0;
do {
} else {
op = null;
}
+
+ LOG.debug("Next operation {}", op);
} while (op != null);
LOG.debug("Processed {} operations, submitting transaction", ops);
- final CheckedFuture txResultFuture = tx.submit();
- Futures.addCallback(txResultFuture, new FutureCallback() {
+ CheckedFuture<Void, TransactionCommitFailedException> txResultFuture = tx.submit();
+ Futures.addCallback(txResultFuture, new FutureCallback<Void>() {
@Override
- public void onSuccess(Object o) {
+ public void onSuccess(Void notUsed) {
LOG.debug("Topology export successful for tx :{}", tx.getIdentifier());
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.md.controller.topology.manager;
+
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.StateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+public class FlowCapableTopologyExporterTest {
+
+ @Mock
+ private DataBroker mockDataBroker;
+
+ @Mock
+ private BindingTransactionChain mockTxChain;
+
+ private OperationProcessor processor;
+
+ private FlowCapableTopologyExporter exporter;
+
+ private InstanceIdentifier<Topology> topologyIID;
+
+ private final ExecutorService executor = Executors.newFixedThreadPool(1);
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.initMocks(this);
+
+ doReturn(mockTxChain).when(mockDataBroker)
+ .createTransactionChain(any(TransactionChainListener.class));
+
+ processor = new OperationProcessor(mockDataBroker);
+
+ topologyIID = InstanceIdentifier.create(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(new TopologyId("test")));
+ exporter = new FlowCapableTopologyExporter(processor, topologyIID);
+
+ executor.execute(processor);
+ }
+
+ @After
+ public void tearDown() {
+ executor.shutdownNow();
+ }
+
+ @SuppressWarnings({ "rawtypes" })
+ @Test
+ public void testOnNodeRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ List<Link> linkList = Arrays.asList(
+ newLink("link1", newSourceNode("node1"), newDestNode("dest")),
+ newLink("link2", newSourceNode("source"), newDestNode("node1")),
+ newLink("link2", newSourceNode("source2"), newDestNode("dest2")));
+ final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Link.class, linkList.get(0).getKey()),
+ topologyIID.child(Link.class, linkList.get(1).getKey()),
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ };
+
+ SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+ doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+ CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+ int expDeleteCalls = expDeletedIIDs.length;
+ CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+ ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+ setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+ CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+ doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+ waitForSubmit(submitLatch1);
+
+ setReadFutureAsync(topology, readFuture);
+
+ waitForDeletes(expDeleteCalls, deleteLatch);
+
+ waitForSubmit(submitLatch2);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+ verifyMockTx(mockTx1);
+ verifyMockTx(mockTx2);
+ }
+
+ @SuppressWarnings({ "rawtypes" })
+ @Test
+ public void testOnNodeRemovedWithNoTopology() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ };
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+ waitForSubmit(submitLatch);
+
+ waitForDeletes(1, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(
+ newLink("link1", newSourceTp("tp1"), newDestTp("dest")),
+ newLink("link2", newSourceTp("source"), newDestTp("tp1")),
+ newLink("link3", newSourceTp("source2"), newDestTp("dest2")));
+ final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Link.class, linkList.get(0).getKey()),
+ topologyIID.child(Link.class, linkList.get(1).getKey()),
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+ };
+
+ final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+ doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+ CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+ int expDeleteCalls = expDeletedIIDs.length;
+ CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+ ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+ setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+ CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+ doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).build());
+
+ waitForSubmit(submitLatch1);
+
+ setReadFutureAsync(topology, readFuture);
+
+ waitForDeletes(expDeleteCalls, deleteLatch);
+
+ waitForSubmit(submitLatch2);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+ verifyMockTx(mockTx1);
+ verifyMockTx(mockTx2);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorRemovedWithNoTopology() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+ };
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ waitForDeletes(1, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+ }
+
+ @Test
+ public void testOnNodeUpdated() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeUpdated(new NodeUpdatedBuilder().setNodeRef(new NodeRef(invNodeID))
+ .setId(nodeKey.getId()).addAugmentation(FlowCapableNodeUpdated.class,
+ new FlowCapableNodeUpdatedBuilder().build()).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<Node> mergedNode = ArgumentCaptor.forClass(Node.class);
+ NodeId expNodeId = new NodeId("node1");
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(Node.class,
+ new NodeKey(expNodeId))), mergedNode.capture(), eq(true));
+ assertEquals("getNodeId", expNodeId, mergedNode.getValue().getNodeId());
+ InventoryNode augmentation = mergedNode.getValue().getAugmentation(InventoryNode.class);
+ assertNotNull("Missing augmentation", augmentation);
+ assertEquals("getInventoryNodeRef", new NodeRef(invNodeID), augmentation.getInventoryNodeRef());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdated() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().build()).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<TerminationPoint> mergedNode = ArgumentCaptor.forClass(TerminationPoint.class);
+ NodeId expNodeId = new NodeId("node1");
+ TpId expTpId = new TpId("tp1");
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(expNodeId)).child(TerminationPoint.class,
+ new TerminationPointKey(expTpId));
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ mergedNode.capture(), eq(true));
+ assertEquals("getTpId", expTpId, mergedNode.getValue().getTpId());
+ InventoryNodeConnector augmentation = mergedNode.getValue().getAugmentation(
+ InventoryNodeConnector.class);
+ assertNotNull("Missing augmentation", augmentation);
+ assertEquals("getInventoryNodeConnectorRef", new NodeConnectorRef(invNodeConnID),
+ augmentation.getInventoryNodeConnectorRef());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdatedWithLinkStateDown() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+ Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().setState(
+ new StateBuilder().setLinkDown(true).build()).build()).build());
+
+ waitForDeletes(1, deleteLatch);
+
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+ new TerminationPointKey(new TpId("tp1")));
+
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ any(TerminationPoint.class), eq(true));
+
+ assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+ linkList.get(0).getKey())}, deletedLinkIDs);
+ }
+
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdatedWithPortDown() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+ Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().setConfiguration(
+ new PortConfig(true, true, true, true)).build()).build());
+
+ waitForDeletes(1, deleteLatch);
+
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+ new TerminationPointKey(new TpId("tp1")));
+
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ any(TerminationPoint.class), eq(true));
+
+ assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+ linkList.get(0).getKey())}, deletedLinkIDs);
+ }
+
+ @Test
+ public void testOnLinkDiscovered() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onLinkDiscovered(new LinkDiscoveredBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+ mergedNode.capture(), eq(true));
+ assertEquals("Source node ID", "sourceNode",
+ mergedNode.getValue().getSource().getSourceNode().getValue());
+ assertEquals("Dest TP ID", "sourceTP",
+ mergedNode.getValue().getSource().getSourceTp().getValue());
+ assertEquals("Dest node ID", "destNode",
+ mergedNode.getValue().getDestination().getDestNode().getValue());
+ assertEquals("Dest TP ID", "destTP",
+ mergedNode.getValue().getDestination().getDestTp().getValue());
+ }
+
+ @Test
+ public void testOnLinkRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ verify(mockTx).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+ }
+
+ private void verifyMockTx(ReadWriteTransaction mockTx) {
+ InOrder inOrder = inOrder(mockTx);
+ inOrder.verify(mockTx, atLeast(0)).submit();
+ inOrder.verify(mockTx, never()).delete(eq(LogicalDatastoreType.OPERATIONAL),
+ any(InstanceIdentifier.class));
+ }
+
+ @SuppressWarnings("rawtypes")
+ private void assertDeletedIDs(InstanceIdentifier[] expDeletedIIDs,
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs) {
+ Set<InstanceIdentifier> actualIIDs = new HashSet<>(deletedLinkIDs.getAllValues());
+ for(InstanceIdentifier id: expDeletedIIDs) {
+ assertTrue("Missing expected deleted IID " + id, actualIIDs.contains(id));
+ }
+ }
+
+ private void setReadFutureAsync(final Topology topology,
+ final SettableFuture<Optional<Topology>> readFuture) {
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ readFuture.set(Optional.of(topology));
+ }
+
+ }.start();
+ }
+
+ private void waitForSubmit(CountDownLatch latch) {
+ assertEquals("Transaction submitted", true,
+ Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+ }
+
+ private void waitForDeletes(int expDeleteCalls, final CountDownLatch latch) {
+ boolean done = Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS);
+ if(!done) {
+ fail("Expected " + expDeleteCalls + " delete calls. Actual: " +
+ (expDeleteCalls - latch.getCount()));
+ }
+ }
+
+ private CountDownLatch setupStubbedSubmit(ReadWriteTransaction mockTx) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ doAnswer(new Answer<CheckedFuture<Void, TransactionCommitFailedException>>() {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> answer(
+ InvocationOnMock invocation) {
+ latch.countDown();
+ return Futures.immediateCheckedFuture(null);
+ }
+ }).when(mockTx).submit();
+
+ return latch;
+ }
+
+ @SuppressWarnings("rawtypes")
+ private void setupStubbedDeletes(ReadWriteTransaction mockTx,
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs, final CountDownLatch latch) {
+ doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) {
+ latch.countDown();
+ return null;
+ }
+ }).when(mockTx).delete(eq(LogicalDatastoreType.OPERATIONAL), deletedLinkIDs.capture());
+ }
+
+ private org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ newInvNodeKey(String id) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey =
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+ rev130819.NodeId(id));
+ return nodeKey;
+ }
+
+ private NodeConnectorKey newInvNodeConnKey(String id) {
+ return new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.
+ NodeConnectorId(id));
+ }
+
+ private KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> newNodeConnID(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey) {
+ return InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey).child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+ rev130819.node.NodeConnector.class, ncKey);
+ }
+
+ private Link newLink(String id, Source source, Destination dest) {
+ return new LinkBuilder().setLinkId(new LinkId(id))
+ .setSource(source).setDestination(dest).build();
+ }
+
+ private Destination newDestTp(String id) {
+ return new DestinationBuilder().setDestTp(new TpId(id)).build();
+ }
+
+ private Source newSourceTp(String id) {
+ return new SourceBuilder().setSourceTp(new TpId(id)).build();
+ }
+
+ private Destination newDestNode(String id) {
+ return new DestinationBuilder().setDestNode(new NodeId(id)).build();
+ }
+
+ private Source newSourceNode(String id) {
+ return new SourceBuilder().setSourceNode(new NodeId(id)).build();
+ }
+}
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
package org.opendaylight.controller.netconf.client;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import io.netty.channel.Channel;
logger.debug("Netconf session {} should use exi.", session);
NetconfStartExiMessage startExiMessage = (NetconfStartExiMessage) sessionPreferences.getStartExiMessage();
tryToInitiateExi(session, startExiMessage);
- // Exi is not supported, release session immediately
} else {
+ // Exi is not supported, release session immediately
logger.debug("Netconf session {} isn't capable of using exi.", session);
negotiationSuccessful(session);
}
private long extractSessionId(final Document doc) {
final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+ Preconditions.checkState(sessionIdNode != null, "");
String textContent = sessionIdNode.getTextContent();
if (textContent == null || textContent.equals("")) {
throw new IllegalStateException("Session id not received from server");
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+
+import java.net.InetSocketAddress;
+
+public class NetconfClientConfigurationTest {
+ @Test
+ public void testNetconfClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategy strategy = Mockito.mock(ReconnectStrategy.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ NetconfClientConfiguration cfg = NetconfClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(strategy).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getReconnectStrategy());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelPromise;
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+
+public class NetconfClientDispatcherImplTest {
+ @Test
+ public void testNetconfClientDispatcherImpl() throws Exception {
+ EventLoopGroup bossGroup = Mockito.mock(EventLoopGroup.class);
+ EventLoopGroup workerGroup = Mockito.mock(EventLoopGroup.class);
+ Timer timer = new HashedWheelTimer();
+
+ ChannelFuture chf = Mockito.mock(ChannelFuture.class);
+ Channel ch = Mockito.mock(Channel.class);
+ doReturn(ch).when(chf).channel();
+ Throwable thr = Mockito.mock(Throwable.class);
+ doReturn(chf).when(workerGroup).register(any(Channel.class));
+
+ ChannelPromise promise = Mockito.mock(ChannelPromise.class);
+ doReturn(promise).when(chf).addListener(any(GenericFutureListener.class));
+ doReturn(thr).when(chf).cause();
+
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory reconnectStrategyFactory = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ doReturn(5).when(reconnect).getConnectTimeout();
+ doReturn("").when(reconnect).toString();
+ doReturn("").when(handler).toString();
+ doReturn("").when(reconnectStrategyFactory).toString();
+ doReturn(reconnect).when(reconnectStrategyFactory).createReconnectStrategy();
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfReconnectingClientConfiguration cfg2 = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.TCP).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfClientDispatcherImpl dispatcher = new NetconfClientDispatcherImpl(bossGroup, workerGroup, timer);
+ Future<NetconfClientSession> sshSession = dispatcher.createClient(cfg);
+ Future<NetconfClientSession> tcpSession = dispatcher.createClient(cfg2);
+
+ Future<Void> sshReconn = dispatcher.createReconnectingClient(cfg);
+ Future<Void> tcpReconn = dispatcher.createReconnectingClient(cfg2);
+
+ assertNotNull(sshSession);
+ assertNotNull(tcpSession);
+ assertNotNull(sshReconn);
+ assertNotNull(tcpReconn);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.Promise;
+import org.apache.sshd.common.SessionListener;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionNegotiatorFactoryTest {
+ @Test
+ public void testGetSessionNegotiator() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class);
+ doReturn(sessionListener).when(listenerFactory).getSessionListener();
+
+ Channel channel = mock(Channel.class);
+ Promise promise = mock(Promise.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer,
+ Optional.<NetconfHelloMessageAdditionalHeader>absent(), 200L);
+
+ SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
+ assertNotNull(sessionNegotiator);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.*;
+import io.netty.handler.ssl.SslHandler;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.Promise;
+import org.apache.mina.handler.demux.ExceptionHandler;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.collections.Sets;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import io.netty.util.Timer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.w3c.dom.Document;
+import java.util.Set;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfClientSessionNegotiatorTest {
+
+ private NetconfHelloMessage helloMessage;
+ private ChannelPipeline pipeline;
+ private ChannelFuture future;
+ private Channel channel;
+ private ChannelInboundHandlerAdapter channelInboundHandlerAdapter;
+
+ @Before
+ public void setUp() throws Exception {
+ helloMessage = NetconfHelloMessage.createClientHello(Sets.newSet("exi:1.0"), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+ pipeline = mockChannelPipeline();
+ future = mockChannelFuture();
+ channel = mockChannel();
+ System.out.println("setup done");
+ }
+
+ private ChannelHandler mockChannelHandler() {
+ ChannelHandler handler = mock(ChannelHandler.class);
+ return handler;
+ }
+
+ private Channel mockChannel() {
+ Channel channel = mock(Channel.class);
+ ChannelHandler channelHandler = mockChannelHandler();
+ doReturn("").when(channel).toString();
+ doReturn(future).when(channel).close();
+ doReturn(future).when(channel).writeAndFlush(anyObject());
+ doReturn(true).when(channel).isOpen();
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(pipeline).toString();
+ doReturn(pipeline).when(pipeline).remove(any(ChannelHandler.class));
+ doReturn(channelHandler).when(pipeline).remove(anyString());
+ return channel;
+ }
+
+ private ChannelFuture mockChannelFuture() {
+ ChannelFuture future = mock(ChannelFuture.class);
+ doReturn(future).when(future).addListener(any(GenericFutureListener.class));
+ return future;
+ }
+
+ private ChannelPipeline mockChannelPipeline() {
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ ChannelHandler handler = mock(ChannelHandler.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ doReturn(null).when(pipeline).get(SslHandler.class);
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+ doReturn(handler).when(pipeline).replace(anyString(), anyString(), any(ChunkedFramingMechanismEncoder.class));
+
+ NetconfXMLToHelloMessageDecoder messageDecoder = new NetconfXMLToHelloMessageDecoder();
+ doReturn(messageDecoder).when(pipeline).replace(anyString(), anyString(), any(NetconfXMLToMessageDecoder.class));
+ doReturn(pipeline).when(pipeline).replace(any(ChannelHandler.class), anyString(), any(NetconfClientSession.class));
+ return pipeline;
+ }
+
+ private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise,
+ NetconfMessage startExi) {
+ ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class);
+ NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi);
+ doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class));
+
+ long timeout = 10L;
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ return new NetconfClientSessionNegotiator(preferences, promise, channel, timer, sessionListener, timeout);
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiator() throws Exception {
+ Promise promise = mock(Promise.class);
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("a", "b");
+ NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ negotiator.handleMessage(helloServerMessage);
+ verify(promise).setSuccess(anyObject());
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiatorWithEXI() throws Exception {
+ Promise promise = mock(Promise.class);
+ EXIOptions exiOptions = new EXIOptions();
+ NetconfStartExiMessage exiMessage = NetconfStartExiMessage.create(exiOptions, "msg-id");
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("exi:1.0");
+ NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]);
+ return null;
+ }
+ }).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+
+ ChannelHandlerContext handlerContext = mock(ChannelHandlerContext.class);
+ doReturn(pipeline).when(handlerContext).pipeline();
+ negotiator.handleMessage(helloMessage);
+ Document expectedResult = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ channelInboundHandlerAdapter.channelRead(handlerContext, new NetconfMessage(expectedResult));
+
+ verify(promise).setSuccess(anyObject());
+
+ // two calls for exiMessage, 2 for hello message
+ verify(pipeline, times(4)).replace(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.openexi.proc.common.EXIOptions;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionTest {
+
+ @Mock
+ ChannelHandler channelHandler;
+
+ @Mock
+ Channel channel;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testNetconfClientSession() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ long sessId = 20L;
+ Collection<String> caps = Lists.newArrayList("cap1", "cap2");
+
+ NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+
+ Mockito.doReturn(pipeline).when(channel).pipeline();
+ Mockito.doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+ Mockito.doReturn("").when(channelHandler).toString();
+
+ NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps);
+ session.addExiHandlers(codec);
+ session.stopExiCommunication();
+
+ assertEquals(caps, session.getServerCapabilities());
+ assertEquals(session, session.thisInstance());
+
+ Mockito.verify(pipeline, Mockito.times(4)).replace(anyString(), anyString(), Mockito.any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+
+public class NetconfReconnectingClientConfigurationTest {
+ @Test
+ public void testNetconfReconnectingClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory strategy = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(strategy).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getConnectStrategyFactory());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ Assert.assertEquals(reconnect, cfg.getReconnectStrategy());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+import io.netty.channel.*;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Promise;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+
+import java.util.Set;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.*;
+
+public class SimpleNetconfClientSessionListenerTest {
+
+ private Channel channel;
+ private ChannelFuture channelFuture;
+ Set caps;
+ private NetconfHelloMessage helloMessage;
+ private NetconfMessage message;
+ private NetconfClientSessionListener sessionListener;
+ private NetconfClientSession clientSession;
+
+ @Before
+ public void setUp() throws Exception {
+ channel = mock(Channel.class);
+ channelFuture = mock(ChannelFuture.class);
+ doReturn(channelFuture).when(channel).writeAndFlush(anyObject());
+ caps = Sets.newSet("a", "b");
+ helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ message = new NetconfMessage(helloMessage.getDocument());
+ sessionListener = mock(NetconfClientSessionListener.class);
+ clientSession = new NetconfClientSession(sessionListener, channel, 20L, caps);
+ }
+
+ @Test
+ public void testSessionDown() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.onSessionDown(clientSession, new Exception());
+ assertFalse(promise.isSuccess());
+ }
+
+ @Test
+ public void testSendRequest() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.sendRequest(message);
+ assertFalse(promise.isSuccess());
+ }
+
+ @Test
+ public void testOnMessage() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.onMessage(clientSession, message);
+ assertTrue(promise.isSuccess());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class SshClientChannelInitializerTest {
+ @Test
+ public void test() throws Exception {
+
+ AuthenticationHandler authenticationHandler = mock(AuthenticationHandler.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = mock(NetconfClientSessionNegotiatorFactory.class);
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+
+ SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ doReturn("").when(sessionNegotiator).toString();
+ doReturn(sessionNegotiator).when(negotiatorFactory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ Channel channel = mock(Channel.class);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(channel).toString();
+ doReturn(pipeline).when(pipeline).addFirst(any(ChannelHandler.class));
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+
+ Promise<NetconfClientSession> promise = mock(Promise.class);
+ doReturn("").when(promise).toString();
+
+ SshClientChannelInitializer initializer = new SshClientChannelInitializer(authenticationHandler, negotiatorFactory,
+ sessionListener);
+ initializer.initialize(channel, promise);
+ verify(pipeline, times(1)).addFirst(any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Mockito.*;
+
+public class TcpClientChannelInitializerTest {
+ @Test
+ public void testInitializeSessionNegotiator() throws Exception {
+ NetconfClientSessionNegotiatorFactory factory = mock(NetconfClientSessionNegotiatorFactory.class);
+ SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ doReturn("").when(sessionNegotiator).toString();
+ doReturn(sessionNegotiator).when(factory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+ NetconfClientSessionListener listener = mock(NetconfClientSessionListener.class);
+ TcpClientChannelInitializer initializer = new TcpClientChannelInitializer(factory, listener);
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ Channel channel = mock(Channel.class);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(channel).toString();
+
+ Promise<NetconfClientSession> promise = mock(Promise.class);
+ doReturn("").when(promise).toString();
+
+ initializer.initializeSessionNegotiator(channel, promise);
+ verify(pipeline, times(1)).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.client.test;
+package org.opendaylight.controller.netconf.client;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
-import org.opendaylight.controller.netconf.client.NetconfClientSession;
-import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
-import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration.NetconfClientProtocol;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import org.opendaylight.controller.netconf.mapping.api.Capability;
<artifactId>config-util</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-api</artifactId>
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import io.netty.channel.local.LocalAddress;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.auth.AuthProvider;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.xml.sax.SAXException;
public class NetconfITSecureTest extends AbstractNetconfConfigTest {
@Test
public void testSecure() throws Exception {
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
- try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+ try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(new SimpleNetconfClientSessionListener()))) {
NetconfMessage response = netconfClient.sendMessage(getGetConfig());
assertFalse("Unexpected error message " + XmlUtil.toString(response.getDocument()),
NetconfMessageUtil.isErrorMessage(response));
/**
* Test all requests are handled properly and no mismatch occurs in listener
*/
- @Test(timeout = 3*60*1000)
+ @Test(timeout = 5*60*1000)
public void testSecureStress() throws Exception {
+ final int requests = 10000;
+
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
- try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+ final NetconfDeviceCommunicator sessionListener = getSessionListener();
+ try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(sessionListener))) {
final AtomicInteger responseCounter = new AtomicInteger(0);
- final List<Future<?>> futures = Lists.newArrayList();
+ final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
- final int requests = 1000;
for (int i = 0; i < requests; i++) {
- final Future<NetconfMessage> netconfMessageFuture = netconfClient.sendRequest(getGetConfig());
+ NetconfMessage getConfig = getGetConfig();
+ getConfig = changeMessageId(getConfig, i);
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture = sessionListener.sendRequest(getConfig, QName.create("namespace", "2012-12-12", "get"));
futures.add(netconfMessageFuture);
- netconfMessageFuture.addListener(new GenericFutureListener<Future<? super NetconfMessage>>() {
+ Futures.addCallback(netconfMessageFuture, new FutureCallback<RpcResult<NetconfMessage>>() {
@Override
- public void operationComplete(final Future<? super NetconfMessage> future) throws Exception {
- assertTrue("Request unsuccessful " + future.cause(), future.isSuccess());
+ public void onSuccess(final RpcResult<NetconfMessage> result) {
responseCounter.incrementAndGet();
}
+
+ @Override
+ public void onFailure(final Throwable t) {
+ throw new RuntimeException(t);
+ }
});
}
- for (final Future<?> future : futures) {
- future.await();
+ // Wait for every future
+ for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+ try {
+ future.get(3, TimeUnit.MINUTES);
+ } catch (final TimeoutException e) {
+ fail("Request " + futures.indexOf(future) + " is not responding");
+ }
}
// Give future listeners some time to finish counter incrementation
}
}
- public NetconfClientConfiguration getClientConfiguration() throws IOException {
+ private NetconfMessage changeMessageId(final NetconfMessage getConfig, final int i) throws IOException, SAXException {
+ String s = XmlUtil.toString(getConfig.getDocument(), false);
+ s = s.replace("101", Integer.toString(i));
+ return new NetconfMessage(XmlUtil.readXmlToDocument(s));
+ }
+
+ public NetconfClientConfiguration getClientConfiguration(final NetconfClientSessionListener sessionListener) throws IOException {
final NetconfClientConfigurationBuilder b = NetconfClientConfigurationBuilder.create();
b.withAddress(TLS_ADDRESS);
- b.withSessionListener(new SimpleNetconfClientSessionListener());
+ // Using session listener from sal-netconf-connector since stress test cannot be performed with simple listener
+ b.withSessionListener(sessionListener);
b.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
b.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
b.withConnectionTimeoutMillis(5000);
return b.build();
}
+ @Mock
+ private RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice;
+
+ private NetconfDeviceCommunicator getSessionListener() {
+ MockitoAnnotations.initMocks(this);
+ doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+ doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
+ return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
+ }
+
public AuthProvider getAuthProvider() throws Exception {
final AuthProvider mockAuth = mock(AuthProvider.class);
doReturn("mockedAuth").when(mockAuth).toString();
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
</appender>
<logger name="org.opendaylight.controller.netconf" level="TRACE"/>
+ <logger name="org.opendaylight.controller.sal.connect.netconf" level="TRACE"/>
<root level="error">
<appender-ref ref="STDOUT" />
public class JaxBSerializerTest {
@Test
- public void testName() throws Exception {
+ public void testSerialization() throws Exception {
final NetconfMonitoringService service = new NetconfMonitoringService() {
}
};
final NetconfState model = new NetconfState(service);
- final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model));
+ final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)).replaceAll("\\s", "");
assertThat(xml, CoreMatchers.containsString(
- "<schema>\n" +
- "<format>yang</format>\n" +
- "<identifier>id</identifier>\n" +
- "<location>NETCONF</location>\n" +
- "<namespace>localhost</namespace>\n" +
- "<version>v1</version>\n" +
- "</schema>\n"));
+ "<schema>" +
+ "<format>yang</format>" +
+ "<identifier>id</identifier>" +
+ "<location>NETCONF</location>" +
+ "<namespace>localhost</namespace>" +
+ "<version>v1</version>" +
+ "</schema>"));
assertThat(xml, CoreMatchers.containsString(
- "<session>\n" +
- "<session-id>1</session-id>\n" +
- "<in-bad-rpcs>0</in-bad-rpcs>\n" +
- "<in-rpcs>0</in-rpcs>\n" +
- "<login-time>loginTime</login-time>\n" +
- "<out-notifications>0</out-notifications>\n" +
- "<out-rpc-errors>0</out-rpc-errors>\n" +
- "<ncme:session-identifier>client</ncme:session-identifier>\n" +
- "<source-host>address/port</source-host>\n" +
- "<transport>ncme:netconf-tcp</transport>\n" +
- "<username>username</username>\n" +
+ "<session>" +
+ "<session-id>1</session-id>" +
+ "<in-bad-rpcs>0</in-bad-rpcs>" +
+ "<in-rpcs>0</in-rpcs>" +
+ "<login-time>loginTime</login-time>" +
+ "<out-notifications>0</out-notifications>" +
+ "<out-rpc-errors>0</out-rpc-errors>" +
+ "<ncme:session-identifier>client</ncme:session-identifier>" +
+ "<source-host>address/port</source-host>" +
+ "<transport>ncme:netconf-tcp</transport>" +
+ "<username>username</username>" +
"</session>"));
}
", remote window is not getting read or is too small"));
}
+ // We need to reset buffer read index, since we've already read it when we tried to write it the first time
+ ((ByteBuf) msg).resetReaderIndex();
logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
// In case of pending, re-invoke write after pending is finished
lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
@Override
public void operationComplete(final IoWriteFuture future) {
+ // FIXME possible minor race condition, we cannot guarantee that this callback when pending is finished will be executed first
+ // External thread could trigger write on this instance while we are on this line
+ // Verify
if (future.isWritten()) {
synchronized (SshWriteAsyncHandler.this) {
// Pending done, decrease counter
pendingWriteCounter--;
+ write(ctx, msg, promise);
}
- write(ctx, msg, promise);
} else {
// Cannot reschedule pending, fail
handlePendingFailed(ctx, e);
assertThat(out.get(0), CoreMatchers.instanceOf(NetconfHelloMessage.class));
final NetconfHelloMessage hello = (NetconfHelloMessage) out.get(0);
assertTrue(hello.getAdditionalHeader().isPresent());
- assertEquals("[tomas;10.0.0.0:10000;tcp;client;]\n", hello.getAdditionalHeader().get().toFormattedString());
+ assertEquals("[tomas;10.0.0.0:10000;tcp;client;]" + System.lineSeparator(), hello.getAdditionalHeader().get().toFormattedString());
assertThat(XmlUtil.toString(hello.getDocument()), CoreMatchers.containsString("<hello xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\""));
}
netconfSSHServer.setAuthProvider(authProvider);
InetSocketAddress address = netconfSSHServer.getLocalSocketAddress();
- final EchoClientHandler echoClientHandler = connectClient(address);
+
+ final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort()));
+
Stopwatch stopwatch = new Stopwatch().start();
while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
Thread.sleep(100);
this.hashedWheelTimer = hashedWheelTimer;
}
- private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi) {
+ private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
final Set<Capability> capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function<ModuleBuilder, Capability>() {
@Override
: Sets.newHashSet(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0, XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
- hashedWheelTimer, simulatedOperationProvider, idProvider, CONNECTION_TIMEOUT_MILLIS, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
+ hashedWheelTimer, simulatedOperationProvider, idProvider, generateConfigsTimeout, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
serverNegotiatorFactory);
public List<Integer> start(final Main.Params params) {
final Map<ModuleBuilder, String> moduleBuilders = parseSchemasToModuleBuilders(params);
- final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi);
+ final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout);
int currentPort = params.startingPort;
<artifactId>xmlunit</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
Document doc = XmlUtil.newDocument();
Element helloElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
HELLO_TAG);
- Element capabilitiesElement = doc.createElement(XmlNetconfConstants.CAPABILITIES);
+ Element capabilitiesElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.CAPABILITIES);
for (String capability : Sets.newHashSet(capabilities)) {
- Element capElement = doc.createElement(XmlNetconfConstants.CAPABILITY);
+ Element capElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.CAPABILITY);
capElement.setTextContent(capability);
capabilitiesElement.appendChild(capElement);
}
public static NetconfHelloMessage createServerHello(Set<String> capabilities, long sessionId) throws NetconfDocumentedException {
Document doc = createHelloMessageDoc(capabilities);
- Element sessionIdElement = doc.createElement(XmlNetconfConstants.SESSION_ID);
+ Element sessionIdElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.SESSION_ID);
sessionIdElement.setTextContent(Long.toString(sessionId));
doc.getDocumentElement().appendChild(sessionIdElement);
return new NetconfHelloMessage(doc);
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+
+import com.google.common.collect.Lists;
+import org.junit.Test;
+
+public class CloseableUtilTest {
+
+ @Test
+ public void testCloseAllFail() throws Exception {
+ final AutoCloseable failingCloseable = new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ throw new RuntimeException("testing failing close");
+ }
+ };
+
+ try {
+ CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+ fail("Exception with suppressed should be thrown");
+ } catch (final RuntimeException e) {
+ assertEquals(1, e.getSuppressed().length);
+ }
+ }
+
+ @Test
+ public void testCloseAll() throws Exception {
+ final AutoCloseable failingCloseable = mock(AutoCloseable.class);
+ doNothing().when(failingCloseable).close();
+ CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
+
+public class HardcodedNamespaceResolverTest {
+
+ @Test
+ public void testResolver() throws Exception {
+ final HardcodedNamespaceResolver hardcodedNamespaceResolver = new HardcodedNamespaceResolver("prefix", "namespace");
+
+ assertEquals("namespace", hardcodedNamespaceResolver.getNamespaceURI("prefix"));
+ try{
+ hardcodedNamespaceResolver.getNamespaceURI("unknown");
+ fail("Unknown namespace lookup should fail");
+ } catch(IllegalStateException e) {}
+
+ assertNull(hardcodedNamespaceResolver.getPrefix("any"));
+ assertNull(hardcodedNamespaceResolver.getPrefixes("any"));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.hamcrest.CoreMatchers.both;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Map;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import com.google.common.base.Optional;
+
+public class XmlElementTest {
+
+ private final String elementAsString = "<top xmlns=\"namespace\" xmlns:a=\"attrNamespace\" a:attr1=\"value1\" attr2=\"value2\">" +
+ "<inner>" +
+ "<deepInner>deepValue</deepInner>" +
+ "</inner>" +
+ "<innerNamespace xmlns=\"innerNamespace\">innerNamespaceValue</innerNamespace>" +
+ "<innerPrefixed xmlns:b=\"prefixedValueNamespace\">b:valueWithPrefix</innerPrefixed>" +
+ "</top>";
+ private Document document;
+ private Element element;
+ private XmlElement xmlElement;
+
+ @Before
+ public void setUp() throws Exception {
+ document = XmlUtil.readXmlToDocument(elementAsString);
+ element = document.getDocumentElement();
+ xmlElement = XmlElement.fromDomElement(element);
+ }
+
+ @Test
+ public void testConstruct() throws Exception {
+ final XmlElement fromString = XmlElement.fromString(elementAsString);
+ assertEquals(fromString, xmlElement);
+ XmlElement.fromDomDocument(document);
+ XmlElement.fromDomElement(element);
+ XmlElement.fromDomElementWithExpected(element, "top");
+ XmlElement.fromDomElementWithExpected(element, "top", "namespace");
+
+ try {
+ XmlElement.fromString("notXml");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+
+ try {
+ XmlElement.fromDomElementWithExpected(element, "notTop");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+
+ try {
+ XmlElement.fromDomElementWithExpected(element, "top", "notNamespace");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+ }
+
+ @Test
+ public void testGetters() throws Exception {
+ assertEquals(element, xmlElement.getDomElement());
+ assertEquals(element.getElementsByTagName("inner").getLength(), xmlElement.getElementsByTagName("inner").getLength());
+
+ assertEquals("top", xmlElement.getName());
+ assertTrue(xmlElement.hasNamespace());
+ assertEquals("namespace", xmlElement.getNamespace());
+ assertEquals("namespace", xmlElement.getNamespaceAttribute());
+ assertEquals(Optional.of("namespace"), xmlElement.getNamespaceOptionally());
+
+ assertEquals("value1", xmlElement.getAttribute("attr1", "attrNamespace"));
+ assertEquals("value2", xmlElement.getAttribute("attr2"));
+ assertEquals(2 + 2/*Namespace definition*/, xmlElement.getAttributes().size());
+
+ assertEquals(3, xmlElement.getChildElements().size());
+ assertEquals(1, xmlElement.getChildElements("inner").size());
+ assertTrue(xmlElement.getOnlyChildElementOptionally("inner").isPresent());
+ assertTrue(xmlElement.getOnlyChildElementWithSameNamespaceOptionally("inner").isPresent());
+ assertEquals(0, xmlElement.getChildElements("unknown").size());
+ assertFalse(xmlElement.getOnlyChildElementOptionally("unknown").isPresent());
+ assertEquals(1, xmlElement.getChildElementsWithSameNamespace("inner").size());
+ assertEquals(0, xmlElement.getChildElementsWithSameNamespace("innerNamespace").size());
+ assertEquals(1, xmlElement.getChildElementsWithinNamespace("innerNamespace", "innerNamespace").size());
+ assertTrue(xmlElement.getOnlyChildElementOptionally("innerNamespace", "innerNamespace").isPresent());
+ assertFalse(xmlElement.getOnlyChildElementOptionally("innerNamespace", "unknownNamespace").isPresent());
+
+ final XmlElement noNamespaceElement = XmlElement.fromString("<noNamespace/>");
+ assertFalse(noNamespaceElement.hasNamespace());
+ try {
+ noNamespaceElement.getNamespace();
+ fail();
+ } catch (final MissingNameSpaceException e) {}
+
+ final XmlElement inner = xmlElement.getOnlyChildElement("inner");
+ final XmlElement deepInner = inner.getOnlyChildElementWithSameNamespaceOptionally().get();
+ assertEquals(deepInner, inner.getOnlyChildElementWithSameNamespace());
+ assertEquals(Optional.<XmlElement>absent(), xmlElement.getOnlyChildElementOptionally("unknown"));
+ assertEquals("deepValue", deepInner.getTextContent());
+ assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+ assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+ }
+
+ @Test
+ public void testExtractNamespaces() throws Exception {
+ final XmlElement innerPrefixed = xmlElement.getOnlyChildElement("innerPrefixed");
+ Map.Entry<String, String> namespaceOfTextContent = innerPrefixed.findNamespaceOfTextContent();
+
+ assertNotNull(namespaceOfTextContent);
+ assertEquals("b", namespaceOfTextContent.getKey());
+ assertEquals("prefixedValueNamespace", namespaceOfTextContent.getValue());
+ final XmlElement innerNamespace = xmlElement.getOnlyChildElement("innerNamespace");
+ namespaceOfTextContent = innerNamespace.findNamespaceOfTextContent();
+
+ assertEquals("", namespaceOfTextContent.getKey());
+ assertEquals("innerNamespace", namespaceOfTextContent.getValue());
+ }
+
+ @Test
+ public void testUnrecognisedElements() throws Exception {
+ xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"), xmlElement.getOnlyChildElement("innerPrefixed"), xmlElement.getOnlyChildElement("innerNamespace"));
+
+ try {
+ xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"));
+ fail();
+ } catch (final NetconfDocumentedException e) {
+ assertThat(e.getMessage(), both(containsString("innerNamespace")).and(containsString("innerNamespace")));
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import com.google.common.base.Optional;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXParseException;
+
+public class XmlUtilTest {
+
+ private final String xml = "<top xmlns=\"namespace\">\n" +
+ "<innerText>value</innerText>\n" +
+ "<innerPrefixedText xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+ "<innerPrefixedText xmlns=\"randomNamespace\" xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+ "</top>";
+
+ @Test
+ public void testCreateElement() throws Exception {
+ final Document document = XmlUtil.newDocument();
+ final Element top = XmlUtil.createElement(document, "top", Optional.of("namespace"));
+
+ top.appendChild(XmlUtil.createTextElement(document, "innerText", "value", Optional.of("namespace")));
+ top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("namespace")));
+ top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("randomNamespace")));
+
+ document.appendChild(top);
+ assertEquals("top", XmlUtil.createDocumentCopy(document).getDocumentElement().getTagName());
+
+ XMLUnit.setIgnoreAttributeOrder(true);
+ XMLUnit.setIgnoreWhitespace(true);
+
+ final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(xml), document);
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+ @Test
+ public void testLoadSchema() throws Exception {
+ XmlUtil.loadSchema();
+ try {
+ XmlUtil.loadSchema(getClass().getResourceAsStream("/netconfMessages/commit.xml"));
+ fail("Input stream does not contain xsd");
+ } catch (final IllegalStateException e) {
+ assertTrue(e.getCause() instanceof SAXParseException);
+ }
+
+ }
+
+ @Test
+ public void testXPath() throws Exception {
+ final XPathExpression correctXPath = XMLNetconfUtil.compileXPath("/top/innerText");
+ try {
+ XMLNetconfUtil.compileXPath("!@(*&$!");
+ fail("Incorrect xpath should fail");
+ } catch (IllegalStateException e) {}
+ final Object value = XmlUtil.evaluateXPath(correctXPath, XmlUtil.readXmlToDocument("<top><innerText>value</innerText></top>"), XPathConstants.NODE);
+ assertEquals("value", ((Element) value).getTextContent());
+ }
+}
\ No newline at end of file
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
+//import javax.xml.bind.annotation.XmlElementWrapper;
import java.io.Serializable;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
* healthmonitor_id String
* admin_state_up Bool
* status String
- * members List <String>
+ * members List <NeutronLoadBalancerPoolMember>
* http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
*/
@XmlElement (name="status")
String loadBalancerPoolStatus;
- @XmlElement (name="members")
- List loadBalancerPoolMembers;
-
- HashMap<String, NeutronLoadBalancerPoolMember> member;
+ @XmlElement(name="members")
+ List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers;
public NeutronLoadBalancerPool() {
- member = new HashMap<String, NeutronLoadBalancerPoolMember>();
}
public String getLoadBalancerPoolID() {
this.loadBalancerPoolStatus = loadBalancerPoolStatus;
}
- public List getLoadBalancerPoolMembers() {
+ public List<NeutronLoadBalancerPoolMember> getLoadBalancerPoolMembers() {
+ /*
+ * Update the pool_id of the member to that this.loadBalancerPoolID
+ */
+ for (NeutronLoadBalancerPoolMember member: loadBalancerPoolMembers)
+ member.setPoolID(loadBalancerPoolID);
return loadBalancerPoolMembers;
}
- public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) {
+ public void setLoadBalancerPoolMembers(List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers) {
this.loadBalancerPoolMembers = loadBalancerPoolMembers;
}
+ public void addLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+ this.loadBalancerPoolMembers.add(loadBalancerPoolMember);
+ }
+
+ public void removeLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+ this.loadBalancerPoolMembers.remove(loadBalancerPoolMember);
+ }
+
public NeutronLoadBalancerPool extractFields(List<String> fields) {
NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool();
Iterator<String> i = fields.iterator();
}
return ans;
}
-}
\ No newline at end of file
+}
import org.opendaylight.controller.configuration.ConfigurationObject;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
import java.io.Serializable;
import java.util.Iterator;
import java.util.List;
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
public class NeutronLoadBalancerPoolMember extends ConfigurationObject implements Serializable {
private static final long serialVersionUID = 1L;
@XmlElement (name="status")
String poolMemberStatus;
+ String poolID;
+
public NeutronLoadBalancerPoolMember() {
}
+ @XmlTransient
+ public String getPoolID() {
+ return poolID;
+ }
+
+ public void setPoolID(String poolID) {
+ this.poolID = poolID;
+ }
+
public String getPoolMemberID() {
return poolMemberID;
}
if (s.equals("id")) {
ans.setPoolMemberID(this.getPoolMemberID());
}
+ if (s.equals("pool_id")) {
+ ans.setPoolID(this.getPoolID());
+ }
if (s.equals("tenant_id")) {
ans.setPoolMemberTenantID(this.getPoolMemberTenantID());
}
@Override public String toString() {
return "NeutronLoadBalancerPoolMember{" +
"poolMemberID='" + poolMemberID + '\'' +
+ ", poolID='" + poolID + '\'' +
", poolMemberTenantID='" + poolMemberTenantID + '\'' +
", poolMemberAddress='" + poolMemberAddress + '\'' +
", poolMemberProtoPort=" + poolMemberProtoPort +
import java.util.List;
/**
- * Neutron Northbound REST APIs for LoadBalancer Policies.<br>
- * This class provides REST APIs for managing neutron LoadBalancer Policies
+ * Neutron Northbound REST APIs for LoadBalancers.<br>
+ * This class provides REST APIs for managing neutron LoadBalancers
*
* <br>
* <br>
@QueryParam("page_reverse") String pageReverse
// sorting not supported
) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
- this);
- // INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this);
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- List<NeutronLoadBalancer> allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers();
+ List<NeutronLoadBalancer> allLoadBalancers = loadBalancerInterface.getAllNeutronLoadBalancers();
// List<NeutronLoadBalancerRule> allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules();
List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
// List<NeutronLoadBalancerRule> rules = new ArrayList<NeutronLoadBalancerRule>();
/**
* Returns a specific LoadBalancer */
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@GET
@Produces({ MediaType.APPLICATION_JSON })
@ResponseCode(code = 401, condition = "Unauthorized"),
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented") })
- public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+ public Response showLoadBalancer(@PathParam("loadBalancerID") String loadBalancerID,
// return fields
@QueryParam("fields") List<String> fields) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
if (fields.size() > 0) {
- NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer ans = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
return Response.status(200).entity(
new NeutronLoadBalancerRequest(extractFields(ans, fields))).build();
} else {
- return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID))).build();
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID))).build();
}
}
@ResponseCode(code = 409, condition = "Conflict"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response createLoadBalancers(final NeutronLoadBalancerRequest input) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* Verify that the LoadBalancer doesn't already exist.
*/
- if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
+ if (loadBalancerInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
throw new BadRequestException("LoadBalancer UUID already exists");
}
- loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
-
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {
}
}
}
- loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+ loadBalancerInterface.addNeutronLoadBalancer(singleton);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
NeutronLoadBalancer test = i.next();
/*
- * Verify that the firewall policy doesn't already exist
+ * Verify that the loadbalancer doesn't already exist
*/
- if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
+ if (loadBalancerInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
throw new BadRequestException("Load Balancer Pool UUID already is already created");
}
if (testMap.containsKey(test.getLoadBalancerID())) {
i = bulk.iterator();
while (i.hasNext()) {
NeutronLoadBalancer test = i.next();
- loadBalancerPoolInterface.addNeutronLoadBalancer(test);
+ loadBalancerInterface.addNeutronLoadBalancer(test);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
/**
* Updates a LoadBalancer Policy
*/
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@PUT
@Produces({ MediaType.APPLICATION_JSON })
@Consumes({ MediaType.APPLICATION_JSON })
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response updateLoadBalancer(
- @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ @PathParam("loadBalancerID") String loadBalancerID, final NeutronLoadBalancerRequest input) {
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* verify the LoadBalancer exists and there is only one delta provided
*/
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
if (!input.isSingleton()) {
throw new BadRequestException("Only singleton edit supported");
}
NeutronLoadBalancer delta = input.getSingleton();
- NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer original = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
/*
* updates restricted by Neutron
/*
* update the object and return it
*/
- loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta);
- NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID);
+ loadBalancerInterface.updateNeutronLoadBalancer(loadBalancerID, delta);
+ NeutronLoadBalancer updatedLoadBalancer = loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
service.neutronLoadBalancerUpdated(updatedLoadBalancer);
}
}
- return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID))).build();
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID))).build();
}
/**
* Deletes a LoadBalancer */
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@DELETE
@StatusCodes({
@ResponseCode(code = 204, condition = "No Content"),
@ResponseCode(code = 409, condition = "Conflict"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response deleteLoadBalancer(
- @PathParam("loadBalancerPoolID") String loadBalancerPoolID) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ @PathParam("loadBalancerID") String loadBalancerID) {
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* verify the LoadBalancer exists and it isn't currently in use
*/
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
- if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) {
+ if (loadBalancerInterface.neutronLoadBalancerInUse(loadBalancerID)) {
return Response.status(409).build();
}
- NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer singleton = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {
}
}
- loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID);
+ loadBalancerInterface.removeNeutronLoadBalancer(loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
import javax.xml.bind.annotation.XmlElement;
import java.util.List;
-public class INeutronLoadBalancerPoolMemberRequest {
+public class NeutronLoadBalancerPoolMemberRequest {
/**
* See OpenStack Network API v2.0 Reference for description of
@XmlElement(name="members")
List<NeutronLoadBalancerPoolMember> bulkRequest;
- INeutronLoadBalancerPoolMemberRequest() {
+ NeutronLoadBalancerPoolMemberRequest() {
}
- INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+ NeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
bulkRequest = bulk;
singletonLoadBalancerPoolMember = null;
}
- INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+ NeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
singletonLoadBalancerPoolMember = group;
}
/*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
*/
package org.opendaylight.controller.networkconfig.neutron.northbound;
import org.codehaus.enunciate.jaxrs.ResponseCode;
import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware;
-import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
import org.opendaylight.controller.sal.utils.ServiceHelper;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
-
-@Path("/pools/{loadBalancerPoolID}/members")
+@Path("/pools/{loadBalancerPoolUUID}/members")
public class NeutronLoadBalancerPoolMembersNorthbound {
-
private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List<String> fields) {
return o.extractFields(fields);
}
/**
- * Returns a list of all LoadBalancerPool
+ * Returns a list of all LoadBalancerPoolMembers in specified pool
*/
@GET
@Produces({MediaType.APPLICATION_JSON})
@ResponseCode(code = 501, condition = "Not Implemented")})
public Response listMembers(
+ //Path param
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+
// return fields
@QueryParam("fields") List<String> fields,
+
// OpenStack LoadBalancerPool attributes
@QueryParam("id") String queryLoadBalancerPoolMemberID,
@QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID,
@QueryParam("page_reverse") String pageReverse
// sorting not supported
) {
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces
- .getINeutronLoadBalancerPoolMemberCRUD(this);
- if (loadBalancerPoolMemberInterface == null) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface
- .getAllNeutronLoadBalancerPoolMembers();
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
- Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ Iterator<NeutronLoadBalancerPoolMember> i = members.iterator();
while (i.hasNext()) {
NeutronLoadBalancerPoolMember nsg = i.next();
if ((queryLoadBalancerPoolMemberID == null ||
queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) &&
+ loadBalancerPoolUUID.equals(nsg.getPoolID()) &&
(queryLoadBalancerPoolMemberTenantID == null ||
queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) &&
(queryLoadBalancerPoolMemberAddress == null ||
}
}
return Response.status(200).entity(
- new INeutronLoadBalancerPoolMemberRequest(ans)).build();
+ new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+}
+
+/**
+ * Returns a specific LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@GET
+@Produces({ MediaType.APPLICATION_JSON })
+//@TypeHint(OpenStackLoadBalancerPoolMembers.class)
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response showLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+ // return fields
+ @QueryParam("fields") List<String> fields ) {
+
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember ans: members) {
+ if (!ans.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ continue;
+
+ if (fields.size() > 0) {
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolMemberRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+ }
+ }
+ return Response.status(204).build();
}
/**
* Adds a Member to an LBaaS Pool member
*/
-@Path("/pools/{loadBalancerPoolID}/members")
@PUT
@Produces({MediaType.APPLICATION_JSON})
@Consumes({MediaType.APPLICATION_JSON})
@ResponseCode(code = 401, condition = "Unauthorized"),
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented")})
-public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberRequest input) {
+public Response createLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ final NeutronLoadBalancerPoolMemberRequest input) {
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(
- this);
- if (loadBalancerPoolMemberInterface == null) {
- throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface "
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
+ // Verify that the loadBalancerPool exists, for the member to be added to its cache
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+
if (input.isSingleton()) {
NeutronLoadBalancerPoolMember singleton = input.getSingleton();
+ singleton.setPoolID(loadBalancerPoolUUID);
+ String loadBalancerPoolMemberUUID = singleton.getPoolMemberID();
/*
* Verify that the LoadBalancerPoolMember doesn't already exist.
*/
- if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
- singleton.getPoolMemberID())) {
- throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
+ List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
}
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
if (instances != null) {
}
}
}
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
service.neutronLoadBalancerPoolMemberCreated(singleton);
}
}
+
+ /**
+ * Add the member from the neutron load balancer pool as well
+ */
+ singletonPool.addLoadBalancerPoolMember(singleton);
+
} else {
List<NeutronLoadBalancerPoolMember> bulk = input.getBulk();
Iterator<NeutronLoadBalancerPoolMember> i = bulk.iterator();
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
while (i.hasNext()) {
NeutronLoadBalancerPoolMember test = i.next();
+ String loadBalancerPoolMemberUUID = test.getPoolMemberID();
/*
- * Verify that the firewall doesn't already exist
+ * Verify that the LoadBalancerPoolMember doesn't already exist.
*/
-
- if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
- test.getPoolMemberID())) {
- throw new BadRequestException("Load Balancer PoolMember UUID already is already created");
+ List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
}
+
if (testMap.containsKey(test.getPoolMemberID())) {
throw new BadRequestException("Load Balancer PoolMember UUID already exists");
}
i = bulk.iterator();
while (i.hasNext()) {
NeutronLoadBalancerPoolMember test = i.next();
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
service.neutronLoadBalancerPoolMemberCreated(test);
}
}
+ singletonPool.addLoadBalancerPoolMember(test);
}
}
return Response.status(201).entity(input).build();
}
+
+/**
+ * Updates a LB member pool
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@PUT
+@Produces({ MediaType.APPLICATION_JSON })
+@Consumes({ MediaType.APPLICATION_JSON })
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response updateLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+ final NeutronLoadBalancerPoolMemberRequest input) {
+
+ //TODO: Implement update LB member pool
+ return Response.status(501).entity(input).build();
+}
+
+/**
+ * Deletes a LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@DELETE
+@StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response deleteLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ // Verify that the loadBalancerPool exists, for the member to be removed from its cache
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+
+ //Verify that the LB pool member exists
+ NeutronLoadBalancerPoolMember singleton = null;
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) {
+ singleton = member;
+ break;
+ }
+ }
+ if (singleton == null)
+ throw new BadRequestException("LoadBalancerPoolMember UUID does not exist.");
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ service.neutronLoadBalancerPoolMemberDeleted(singleton);
+ }
+ }
+
+ /**
+ * Remove the member from the neutron load balancer pool
+ */
+ NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ singletonPool.removeLoadBalancerPoolMember(singleton);
+
+ return Response.status(204).build();
+}
}
/*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
*/
package org.opendaylight.controller.networkconfig.neutron.northbound;
import org.codehaus.enunciate.jaxrs.StatusCodes;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
import org.opendaylight.controller.sal.utils.ServiceHelper;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
* http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
*
*/
+
+/**
+ * For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD,
+ * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache.
+ * TODO: Consolidate and maintain a single copy
+ */
+
@Path("/pools")
public class NeutronLoadBalancerPoolNorthbound {
@QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID,
@QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp,
@QueryParam("status") String queryLoadBalancerPoolStatus,
- @QueryParam("members") List queryLoadBalancerPoolMembers,
+ @QueryParam("members") List<NeutronLoadBalancerPoolMember> queryLoadBalancerPoolMembers,
// pagination
@QueryParam("limit") String limit,
@QueryParam("marker") String marker,
NeutronLoadBalancerPool test = i.next();
/*
- * Verify that the firewall doesn't already exist
+ * Verify that the loadBalancerPool doesn't already exist
*/
if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) {
}
return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
}
+
+ /**
+ * Deletes a LoadBalancerPool
+ */
+
+ @Path("{loadBalancerPoolUUID}")
+ @DELETE
+ @StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response deleteLoadBalancerPool(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerPool exists and it isn't currently in use
+ */
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+ }
+ if (loadBalancerPoolInterface.neutronLoadBalancerPoolInUse(loadBalancerPoolUUID)) {
+ return Response.status(409).build();
+ }
+ NeutronLoadBalancerPool singleton = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPool(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * remove it and return 204 status
+ */
+ loadBalancerPoolInterface.removeNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ service.neutronLoadBalancerPoolDeleted(singleton);
+ }
+ }
+
+ /*
+ * remove corresponding members from the member cache too
+ */
+ INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this);
+ if (loadBalancerPoolMemberInterface != null) {
+ List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new
+ ArrayList<NeutronLoadBalancerPoolMember>(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers());
+ Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPoolMember member = i.next();
+ if (member.getPoolID() == loadBalancerPoolUUID)
+ loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID());
+ }
+ }
+ return Response.status(204).build();
+ }
}
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
end += rawPayload.length;
}
int checksumStartByte = start + getfieldOffset(CHECKSUM) / NetUtils.NumBitsInAByte;
+ int even = end & ~1;
- for (int i = start; i <= (end - 1); i = i + 2) {
+ for (int i = start; i < even; i = i + 2) {
// Skip, if the current bytes are checkSum bytes
if (i == checksumStartByte) {
continue;
wordData = ((data[i] << 8) & 0xFF00) + (data[i + 1] & 0xFF);
sum = sum + wordData;
}
- carry = (sum >> 16) & 0xFF;
+ if (even < end) {
+ // Add the last octet with zero padding.
+ wordData = (data[even] << 8) & 0xFF00;
+ sum = sum + wordData;
+ }
+
+ carry = sum >>> 16;
finalSum = (sum & 0xFFFF) + carry;
return (short) ~((short) finalSum & 0xFFFF);
}
*/
public void setHeaderField(String headerField, byte[] readValue) {
if (headerField.equals(PROTOCOL)) {
- payloadClass = protocolClassMap.get(readValue[0]);
+ // Don't set payloadClass if framgment offset is not zero.
+ byte[] fragoff = hdrFieldsMap.get(FRAGOFFSET);
+ if (fragoff == null || BitBufferHelper.getShort(fragoff) == 0) {
+ payloadClass = protocolClassMap.get(readValue[0]);
+ }
+ } else if (headerField.equals(FRAGOFFSET)) {
+ if (readValue != null && BitBufferHelper.getShort(readValue) != 0) {
+ // Clear payloadClass because protocol header is not present
+ // in this packet.
+ payloadClass = null;
+ }
} else if (headerField.equals(OPTIONS) &&
(readValue == null || readValue.length == 0)) {
hdrFieldsMap.remove(headerField);
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.controller.sal.packet;
+import java.util.Arrays;
+
import junit.framework.Assert;
import org.junit.Test;
(byte) 0x2b, (byte) 0x2c, (byte) 0x2d, (byte) 0x2e,
(byte) 0x2f, (byte) 0x30, (byte) 0x31, (byte) 0x32,
(byte) 0x33, (byte) 0x34, (byte) 0x35, (byte) 0x36, (byte) 0x37 };
+ serializeTest(icmpRawPayload, (short)0xe553);
+
+ serializeTest(null, (short)0xb108);
+ serializeTest(new byte[0], (short)0xb108);
+
+ byte[] odd = {
+ (byte)0xba, (byte)0xd4, (byte)0xc7, (byte)0x53,
+ (byte)0xf8, (byte)0x59, (byte)0x68, (byte)0x77,
+ (byte)0xfd, (byte)0x27, (byte)0xe0, (byte)0x5b,
+ (byte)0xd0, (byte)0x2e, (byte)0x28, (byte)0x41,
+ (byte)0xa3, (byte)0x48, (byte)0x5d, (byte)0x2e,
+ (byte)0x7d, (byte)0x5b, (byte)0xd3, (byte)0x60,
+ (byte)0xb3, (byte)0x88, (byte)0x8d, (byte)0x0f,
+ (byte)0x1d, (byte)0x87, (byte)0x51, (byte)0x0f,
+ (byte)0x6a, (byte)0xff, (byte)0xf7, (byte)0xd4,
+ (byte)0x40, (byte)0x35, (byte)0x4e, (byte)0x01,
+ (byte)0x36,
+ };
+ serializeTest(odd, (short)0xd0ad);
+
+ // Large payload that causes 16-bit checksum overflow more than
+ // 255 times.
+ byte[] largeEven = new byte[1024];
+ Arrays.fill(largeEven, (byte)0xff);
+ serializeTest(largeEven, (short)0xb108);
+
+ byte[] largeOdd = new byte[1021];
+ Arrays.fill(largeOdd, (byte)0xff);
+ serializeTest(largeOdd, (short)0xb207);
+ }
- short checksum = (short)0xe553;
-
- // Create ICMP object
+ private void serializeTest(byte[] payload, short checksum)
+ throws PacketException {
ICMP icmp = new ICMP();
- icmp.setType((byte)8);
- icmp.setCode((byte)0);
- icmp.setIdentifier((short) 0x46f5);
- icmp.setSequenceNumber((short) 2);
- icmp.setRawPayload(icmpRawPayload);
- //icmp.setChecksum(checksum);
+ icmp.setType((byte)8).setCode((byte)0).
+ setIdentifier((short)0x46f5).setSequenceNumber((short)2);
+ int payloadSize = 0;
+ if (payload != null) {
+ icmp.setRawPayload(payload);
+ payloadSize = payload.length;
+ }
// Serialize
- byte[] stream = icmp.serialize();
- Assert.assertTrue(stream.length == 64);
+ byte[] data = icmp.serialize();
+ Assert.assertEquals(payloadSize + 8, data.length);
// Deserialize
ICMP icmpDes = new ICMP();
- icmpDes.deserialize(stream, 0, stream.length);
+ icmpDes.deserialize(data, 0, data.length);
Assert.assertFalse(icmpDes.isCorrupted());
- Assert.assertTrue(icmpDes.getChecksum() == checksum);
- Assert.assertTrue(icmp.equals(icmpDes));
+ Assert.assertEquals(checksum, icmpDes.getChecksum());
+ Assert.assertEquals(icmp, icmpDes);
}
}
import java.net.UnknownHostException;
import java.util.Arrays;
-import junit.framework.Assert;
-
+import org.junit.Assert;
import org.junit.Test;
+
import org.opendaylight.controller.sal.match.Match;
import org.opendaylight.controller.sal.match.MatchType;
import org.opendaylight.controller.sal.utils.EtherTypes;
Assert.assertEquals(protocol, (byte) match.getField(MatchType.NW_PROTO).getValue());
Assert.assertEquals(tos, (byte) match.getField(MatchType.NW_TOS).getValue());
}
+
+ @Test
+ public void testFragment() throws Exception {
+ byte[] payload1 = new byte[0];
+ byte[] payload2 = {
+ (byte)0x61, (byte)0xd1, (byte)0x3d, (byte)0x51,
+ (byte)0x1b, (byte)0x75, (byte)0xa7, (byte)0x83,
+ };
+ byte[] payload3 = {
+ (byte)0xe7, (byte)0x0f, (byte)0x2d, (byte)0x7e,
+ (byte)0x15, (byte)0xba, (byte)0xe7, (byte)0x6d,
+ (byte)0xb5, (byte)0xc5, (byte)0xb5, (byte)0x37,
+ (byte)0x59, (byte)0xbc, (byte)0x91, (byte)0x43,
+ (byte)0xb5, (byte)0xb7, (byte)0xe4, (byte)0x28,
+ (byte)0xec, (byte)0x62, (byte)0x6b, (byte)0x6a,
+ (byte)0xd1, (byte)0xcb, (byte)0x79, (byte)0x1e,
+ (byte)0xfc, (byte)0x82, (byte)0xf5, (byte)0xb4,
+ };
+
+ // Ensure that the payload is not deserialized if the fragment offset
+ // is not zero.
+ byte proto = IPProtocols.TCP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf250);
+ fragmentTest(payload2, proto, (short)0xf248);
+ fragmentTest(payload3, proto, (short)0xf230);
+
+ proto = IPProtocols.UDP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf245);
+ fragmentTest(payload2, proto, (short)0xf23d);
+ fragmentTest(payload3, proto, (short)0xf225);
+
+ proto = IPProtocols.ICMP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf255);
+ fragmentTest(payload2, proto, (short)0xf24d);
+ fragmentTest(payload3, proto, (short)0xf235);
+
+ // Ensure that the protocol header in the first fragment is
+ // deserialized.
+ proto = IPProtocols.TCP.byteValue();
+ TCP tcp = new TCP();
+ tcp.setSourcePort((short)1234).setDestinationPort((short)32000).
+ setSequenceNumber((int)0xd541f5f8).setAckNumber((int)0x58da787d).
+ setDataOffset((byte)5).setReserved((byte)0).
+ setHeaderLenFlags((short)0x18).setWindowSize((short)0x40e8).
+ setUrgentPointer((short)0x15f7).setChecksum((short)0x0d4e);
+ firstFragmentTest(tcp, payload1, proto, (short)0xdfe6);
+ tcp.setChecksum((short)0xab2a);
+ firstFragmentTest(tcp, payload2, proto, (short)0xdfde);
+ tcp.setChecksum((short)0x1c75);
+ firstFragmentTest(tcp, payload3, proto, (short)0xdfc6);
+
+ proto = IPProtocols.UDP.byteValue();
+ UDP udp = new UDP();
+ udp.setSourcePort((short)53).setDestinationPort((short)45383).
+ setLength((short)(payload1.length + 8)).setChecksum((short)0);
+ firstFragmentTest(udp, payload1, proto, (short)0xdfe7);
+ udp.setLength((short)(payload2.length + 8));
+ firstFragmentTest(udp, payload2, proto, (short)0xdfdf);
+ udp.setLength((short)(payload3.length + 8));
+ firstFragmentTest(udp, payload3, proto, (short)0xdfc7);
+
+ proto = IPProtocols.ICMP.byteValue();
+ ICMP icmp = new ICMP();
+ icmp.setType((byte)8).setCode((byte)0).setIdentifier((short)0x3d1e).
+ setSequenceNumber((short)1);
+ firstFragmentTest(icmp, payload1, proto, (short)0xdff7);
+ firstFragmentTest(icmp, payload2, proto, (short)0xdfef);
+ firstFragmentTest(icmp, payload3, proto, (short)0xdfd7);
+ }
+
+ private void fragmentTest(byte[] payload, byte proto, short checksum)
+ throws Exception {
+ // Construct a fragmented raw IPv4 packet.
+ int ipv4Len = 20;
+ byte[] rawIp = new byte[ipv4Len + payload.length];
+
+ byte ipVersion = 4;
+ byte dscp = 35;
+ byte ecn = 2;
+ byte tos = (byte)((dscp << 2) | ecn);
+ short totalLen = (short)rawIp.length;
+ short id = 22143;
+ short offset = 0xb9;
+ byte ttl = 64;
+ byte[] srcIp = {(byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x01};
+ byte[] dstIp = {(byte)0xc0, (byte)0xa9, (byte)0x66, (byte)0x23};
+
+ rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+ rawIp[1] = tos;
+ rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+ rawIp[3] = (byte)totalLen;
+ rawIp[4] = (byte)(id >>> Byte.SIZE);
+ rawIp[5] = (byte)id;
+ rawIp[6] = (byte)(offset >>> Byte.SIZE);
+ rawIp[7] = (byte)offset;
+ rawIp[8] = ttl;
+ rawIp[9] = proto;
+ rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+ rawIp[11] = (byte)checksum;
+ System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+ System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+ System.arraycopy(payload, 0, rawIp, ipv4Len, payload.length);
+
+ // Deserialize.
+ IPv4 ipv4 = new IPv4();
+ ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+ Assert.assertEquals(ipVersion, ipv4.getVersion());
+ Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+ Assert.assertEquals(dscp, ipv4.getDiffServ());
+ Assert.assertEquals(ecn, ipv4.getECN());
+ Assert.assertEquals(totalLen, ipv4.getTotalLength());
+ Assert.assertEquals(id, ipv4.getIdentification());
+ Assert.assertEquals((byte)0, ipv4.getFlags());
+ Assert.assertEquals(offset, ipv4.getFragmentOffset());
+ Assert.assertEquals(ttl, ipv4.getTtl());
+ Assert.assertEquals(proto, ipv4.getProtocol());
+ Assert.assertEquals(checksum, ipv4.getChecksum());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+ ipv4.getSourceAddress());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+ ipv4.getDestinationAddress());
+ Assert.assertFalse(ipv4.isCorrupted());
+
+ // payloadClass should not be set if fragment offset is not zero.
+ Assert.assertEquals(null, ipv4.getPayload());
+ Assert.assertArrayEquals(payload, ipv4.getRawPayload());
+ }
+
+ private void firstFragmentTest(Packet payload, byte[] rawPayload,
+ byte proto, short checksum)
+ throws Exception {
+ // Construct a raw IPv4 packet with MF flag.
+ int ipv4Len = 20;
+ payload.setRawPayload(rawPayload);
+ byte[] payloadBytes = payload.serialize();
+ byte[] rawIp = new byte[ipv4Len + payloadBytes.length];
+
+ byte ipVersion = 4;
+ byte dscp = 13;
+ byte ecn = 1;
+ byte tos = (byte)((dscp << 2) | ecn);
+ short totalLen = (short)rawIp.length;
+ short id = 19834;
+ byte flags = 0x1;
+ short offset = 0;
+ short off = (short)(((short)flags << 13) | offset);
+ byte ttl = 64;
+ byte[] srcIp = {(byte)0xac, (byte)0x23, (byte)0x5b, (byte)0xfd};
+ byte[] dstIp = {(byte)0xc0, (byte)0xa8, (byte)0x64, (byte)0x71};
+
+ rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+ rawIp[1] = tos;
+ rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+ rawIp[3] = (byte)totalLen;
+ rawIp[4] = (byte)(id >>> Byte.SIZE);
+ rawIp[5] = (byte)id;
+ rawIp[6] = (byte)(off >>> Byte.SIZE);
+ rawIp[7] = (byte)off;
+ rawIp[8] = ttl;
+ rawIp[9] = proto;
+ rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+ rawIp[11] = (byte)checksum;
+ System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+ System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+ System.arraycopy(payloadBytes, 0, rawIp, ipv4Len, payloadBytes.length);
+
+ // Deserialize.
+ IPv4 ipv4 = new IPv4();
+ ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+ Assert.assertEquals(ipVersion, ipv4.getVersion());
+ Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+ Assert.assertEquals(dscp, ipv4.getDiffServ());
+ Assert.assertEquals(ecn, ipv4.getECN());
+ Assert.assertEquals(totalLen, ipv4.getTotalLength());
+ Assert.assertEquals(id, ipv4.getIdentification());
+ Assert.assertEquals(flags, ipv4.getFlags());
+ Assert.assertEquals(offset, ipv4.getFragmentOffset());
+ Assert.assertEquals(ttl, ipv4.getTtl());
+ Assert.assertEquals(proto, ipv4.getProtocol());
+ Assert.assertEquals(checksum, ipv4.getChecksum());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+ ipv4.getSourceAddress());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+ ipv4.getDestinationAddress());
+ Assert.assertFalse(ipv4.isCorrupted());
+
+ // Protocol header in the first fragment should be deserialized.
+ Assert.assertEquals(null, ipv4.getRawPayload());
+
+ Packet desPayload = ipv4.getPayload();
+ Assert.assertEquals(payload, desPayload);
+ Assert.assertFalse(desPayload.isCorrupted());
+ Assert.assertArrayEquals(rawPayload, desPayload.getRawPayload());
+ }
}
package org.opendaylight.controller.topologymanager.internal;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.util.ArrayList;
-import java.util.Dictionary;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.LinkedBlockingQueue;
-
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.felix.dm.Component;
import org.eclipse.osgi.framework.console.CommandInterpreter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.LinkedBlockingQueue;
+
/**
* The class describes TopologyManager which is the central repository of the
* network topology. It provides service for applications to interact with
// all except the creation time stamp because that should
// be set only when the edge is created
TimeStamp timeStamp = null;
- for (Property prop : oldProps) {
- if (prop instanceof TimeStamp) {
- TimeStamp tsProp = (TimeStamp) prop;
- if (tsProp.getTimeStampName().equals("creation")) {
- timeStamp = tsProp;
- break;
+ if (oldProps != null) {
+ for (Property prop : oldProps) {
+ if (prop instanceof TimeStamp) {
+ TimeStamp tsProp = (TimeStamp) prop;
+ if (tsProp.getTimeStampName().equals("creation")) {
+ timeStamp = tsProp;
+ break;
+ }
}
}
}
if (prop instanceof TimeStamp) {
TimeStamp t = (TimeStamp) prop;
if (t.getTimeStampName().equals("creation")) {
- i.remove();
+ if (timeStamp != null) {
+ i.remove();
+ }
break;
}
}
package org.opendaylight.controller.topologymanager.internal;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.ConstructionException;
+import org.opendaylight.controller.sal.core.Description;
import org.opendaylight.controller.sal.core.Edge;
import org.opendaylight.controller.sal.core.Host;
import org.opendaylight.controller.sal.core.Latency;
import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
import org.opendaylight.controller.sal.core.Property;
import org.opendaylight.controller.sal.core.State;
+import org.opendaylight.controller.sal.core.TimeStamp;
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.packet.address.EthernetAddress;
import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.switchmanager.SwitchConfig;
import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
public class TopologyManagerImplTest {
/**
* Mockup of switch manager that only maintains existence of node
Assert.assertTrue(nodeNCmap.isEmpty());
}
+
+ @Test
+ public void bug1348FixTest() throws ConstructionException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ TestSwitchManager swMgr = new TestSwitchManager();
+ topoManagerImpl.setSwitchManager(swMgr);
+ topoManagerImpl.nonClusterObjectCreate();
+
+ NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 1, NodeCreator.createOFNode(1000L));
+ NodeConnector tailnc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 2, NodeCreator.createOFNode(2000L));
+ Edge edge = new Edge(headnc1, tailnc1);
+ List<TopoEdgeUpdate> updatedEdges = new ArrayList<>();
+ Set<Property> edgeProps = new HashSet<>();
+ edgeProps.add(new TimeStamp(System.currentTimeMillis(), "creation"));
+ edgeProps.add(new Latency(Latency.LATENCY100ns));
+ edgeProps.add(new State(State.EDGE_UP));
+ edgeProps.add(new Bandwidth(Bandwidth.BW100Gbps));
+ edgeProps.add(new Description("Test edge"));
+ updatedEdges.add(new TopoEdgeUpdate(edge, edgeProps, UpdateType.CHANGED));
+
+ try {
+ topoManagerImpl.edgeUpdate(updatedEdges);
+ } catch (Exception e) {
+ Assert.fail("Exception was raised when trying to update edge properties: " + e.getMessage());
+ }
+
+ Assert.assertEquals(1, topoManagerImpl.getEdges().size());
+ Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
+ }
}