<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-provider</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
</feature>
+
+ <feature name='odl-clustering-test-app' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+ <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
+ <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
+ </feature>
</features>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>net.sf.jung2</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.antlr</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.moxy</artifactId>
+ </dependency>
</dependencies>
<build>
<resources>
<bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.neutron.northbound/${networkconfig.neutron.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
<ignorePermissions>false</ignorePermissions>
</configuration>
</execution>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/assembly/system</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>true</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ <useRepositoryLayout>true</useRepositoryLayout>
+ <addParentPoms>true</addParentPoms>
+ <copyPom>true</copyPom>
+ </configuration>
+ </execution>
</executions>
</plugin>
<plugin>
<sonar.language>java</sonar.language>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
- <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
+ <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+ <sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
<yang-ext.version>2013.09.07.4-SNAPSHOT</yang-ext.version>
<yang-jmx-generator.version>1.0.0-SNAPSHOT</yang-jmx-generator.version>
<yangtools.version>0.6.2-SNAPSHOT</yangtools.version>
- <sshd-core.version>0.12.0</sshd-core.version>
+ <sshd-core.version>0.12.0</sshd-core.version>
+ <jmh.version>0.9.7</jmh.version>
</properties>
<dependencyManagement>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <!-- JMH Benchmark dependencies -->
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
</dependencies>
</dependencyManagement>
--- /dev/null
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+# script; for example karaf, start, stop, admin, client, ...
+#
+# if [ "$KARAF_SCRIPT" == "SCRIPT_NAME" ]; then
+# Actions go here...
+# fi
+
+#
+# general settings which should be applied for all scripts go here; please keep
+# in mind that it is possible that scripts might be executed more than once, e.g.
+# in example of the start script where the start script is executed first and the
+# karaf script afterwards.
+#
+
+#
+# The following section shows the possible configuration options for the default
+# karaf scripts
+#
+# export JAVA_HOME # Location of Java installation
+# export JAVA_MIN_MEM # Minimum memory for the JVM
+# export JAVA_MAX_MEM # Maximum memory for the JVM
+# export JAVA_PERM_MEM # Minimum perm memory for the JVM
+# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM
+# export KARAF_HOME # Karaf home folder
+# export KARAF_DATA # Karaf data folder
+# export KARAF_BASE # Karaf base folder
+# export KARAF_ETC # Karaf etc folder
+# export KARAF_OPTS # Additional available Karaf options
+# export KARAF_DEBUG # Enable debug mode
+if [ "x$JAVA_MAX_PERM_MEM" == "x" ]; then
+ export JAVA_MAX_PERM_MEM="512m"
+fi
+if [ "x$JAVA_MAX_MEM" == "x" ]; then
+ export JAVA_MAX_MEM="2048m"
+fi
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>benchmark-data-store</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-parser-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-inmemory-datastore</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <configuration>
+ <classpathScope>test</classpathScope>
+ <executable>java</executable>
+ <arguments>
+ <argument>-classpath</argument>
+ <classpath/>
+ <argument>org.openjdk.jmh.Main</argument>
+ <argument>.*</argument>
+ </arguments>
+ </configuration>
+ <executions>
+ <execution>
+ <id>run-benchmarks</id>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Warmup;
+
+/**
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int WARMUP_ITERATIONS = 20;
+ private static final int MEASUREMENT_ITERATIONS = 20;
+
+ private static final int OUTER_LIST_100K = 100000;
+ private static final int OUTER_LIST_50K = 50000;
+ private static final int OUTER_LIST_10K = 10000;
+
+ private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
+
+ private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
+ final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
+
+ for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
+ paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
+ .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .build();
+ }
+ return paths;
+ }
+
+ private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
+ private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
+ private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
+
+ private static MapNode initInnerListItems(final int count) {
+ final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
+ .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
+
+ for (int i = 1; i <= count; ++i) {
+ mapEntryBuilder
+ .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
+ }
+ return mapEntryBuilder.build();
+ }
+
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
+
+ private static NormalizedNode<?,?>[] initOuterListItems(int outerListItemsCount, MapNode innerList) {
+ final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
+
+ for (int i = 0; i < outerListItemsCount; ++i) {
+ int outerListKey = i;
+ outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .withChild(innerList).build();
+ }
+ return outerListItems;
+ }
+
+ protected SchemaContext schemaContext;
+ protected InMemoryDOMDataStore domStore;
+
+ abstract public void setUp() throws Exception;
+
+ abstract public void tearDown();
+
+ protected void initTestNode() throws Exception {
+ final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
+ .build();
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(testPath, provideOuterListNode());
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ private DataContainerChild<?, ?> provideOuterListNode() {
+ return ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
+ .withChild(
+ ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
+ .build()).build();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+/**
+ * Benchmark Model class loads the odl-datastore-test.yang model from resources.
+ * <br>
+ * This class serves as facilitator class which holds several references to initialized yang model as static final
+ * members.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public final class BenchmarkModel {
+
+ public static final QName TEST_QNAME = QName
+ .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test");
+ public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+ public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+ public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+ public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+ private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+ public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+
+ public static final InputStream getDatastoreBenchmarkInputStream() {
+ return getInputStream(DATASTORE_TEST_YANG);
+ }
+
+ private static InputStream getInputStream(final String resourceName) {
+ return BenchmarkModel.class.getResourceAsStream(resourceName);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(
+ getDatastoreBenchmarkInputStream()));
+ return parser.resolveSchemaContext(modules);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as BlockingBoundedFastThreadPool
+ * and DOM Store Executor Service as Blocking Bounded Fast Thread Pool.
+ *
+ * @see org.opendaylight.yangtools.util.concurrent.SpecialExecutors
+ * @see org.opendaylight.controller.md.sal.dom.store.benchmark.AbstractInMemoryDatastoreWriteTransactionBenchmark
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+ private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ final ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
+ MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name );
+
+ domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithSameThreadedExecutorBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Same Thread Executor
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+module odl-datastore-test {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
+ prefix "store-test";
+
+ revision "2014-03-13" {
+ description "Initial revision.";
+ }
+
+ container test {
+ list outer-list {
+ key id;
+ leaf id {
+ type int32;
+ }
+ choice outer-choice {
+ case one {
+ leaf one {
+ type string;
+ }
+ }
+ case two-three {
+ leaf two {
+ type string;
+ }
+ leaf three {
+ type string;
+ }
+ }
+ }
+ list inner-list {
+ key name;
+ leaf name {
+ type int32;
+ }
+ leaf value {
+ type string;
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj);
builder.setFlowRef(new FlowRef(identifier));
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalFlowService().removeFlow(builder.build());
if (tableIdValidationPrecondition(tableKey, update)) {
final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
if (tableIdValidationPrecondition(tableKey, addDataObj)) {
final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
final Group group = (removeDataObj);
final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().removeGroup(builder.build());
final Group updatedGroup = (update);
final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
final Group group = (addDataObj);
final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().addGroup(builder.build());
final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().removeMeter(builder.build());
final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().addMeter(builder.build());
<module>sal-binding-dom-it</module>
</modules>
</profile>
+ <profile>
+ <id>benchmarks</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ </activation>
+ <modules>
+ <module>benchmark-data-store</module>
+ </modules>
+ </profile>
</profiles>
</project>
\ No newline at end of file
package org.opendaylight.controller.md.sal.common.api.data;
+import com.google.common.base.Supplier;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-
-import com.google.common.base.Function;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
/**
* A type of TransactionCommitFailedException that indicates a situation that would result in a
* @author Thomas Pantelis
*/
public class TransactionCommitDeadlockException extends TransactionCommitFailedException {
-
private static final long serialVersionUID = 1L;
-
private static final String DEADLOCK_MESSAGE =
"An attempt to block on a ListenableFuture via a get method from a write " +
"transaction submit was detected that would result in deadlock. The commit " +
"result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock.";
+ private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE);
- public static Function<Void, Exception> DEADLOCK_EXECUTOR_FUNCTION = new Function<Void, Exception>() {
+ public static final Supplier<Exception> DEADLOCK_EXCEPTION_SUPPLIER = new Supplier<Exception>() {
@Override
- public Exception apply(Void notUsed) {
- return new TransactionCommitDeadlockException( DEADLOCK_MESSAGE,
- RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE));
+ public Exception get() {
+ return new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR);
}
};
- public TransactionCommitDeadlockException(String message, final RpcError... errors) {
+ public TransactionCommitDeadlockException(final String message, final RpcError... errors) {
super(message, errors);
}
}
*/
package org.opendaylight.controller.md.sal.common.impl.service;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.AbstractDataModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-
public abstract class AbstractDataTransaction<P extends Path<P>, D extends Object> extends
AbstractDataModification<P, D> {
- private final static Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final ListenableFuture<RpcResult<TransactionStatus>> SUCCESS_FUTURE =
+ Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
private final Object identifier;
private final long allocationTime;
@Override
public Future<RpcResult<TransactionStatus>> commit() {
readyTime = System.nanoTime();
- LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ }
changeStatus(TransactionStatus.SUBMITED);
-
return this.broker.commit(this);
}
}
@Override
- public boolean equals(Object obj) {
+ public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
public void succeeded() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.COMMITED);
}
public void failed() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.FAILED);
}
this.onStatusChange(status);
}
- public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(
- CheckedFuture<Void,TransactionCommitFailedException> from ) {
+ public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(final CheckedFuture<Void,TransactionCommitFailedException> from) {
return Futures.transform(from, new AsyncFunction<Void, RpcResult<TransactionStatus>>() {
@Override
- public ListenableFuture<RpcResult<TransactionStatus>> apply(Void input) throws Exception {
- return Futures.immediateFuture(RpcResultBuilder.<TransactionStatus>
- success(TransactionStatus.COMMITED).build());
+ public ListenableFuture<RpcResult<TransactionStatus>> apply(final Void input) {
+ return SUCCESS_FUTURE;
}
- } );
+ });
}
}
import javax.annotation.Nullable;
import org.opendaylight.yangtools.util.concurrent.CountingRejectedExecutionHandler;
import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* MXBean implementation of the ThreadExecutorStatsMXBean interface that retrieves statistics
*/
public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
implements ThreadExecutorStatsMXBean {
-
+ private static final Logger LOG = LoggerFactory.getLogger(ThreadExecutorStatsMXBeanImpl.class);
private final ThreadPoolExecutor executor;
/**
* @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
* @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
*/
- public ThreadExecutorStatsMXBeanImpl(Executor executor, String mBeanName,
- String mBeanType, @Nullable String mBeanCategory) {
+ public ThreadExecutorStatsMXBeanImpl(final ThreadPoolExecutor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
super(mBeanName, mBeanType, mBeanCategory);
+ this.executor = Preconditions.checkNotNull(executor);
+ }
+
+ /**
+ * Create a new bean for the statistics, which is already registered.
+ *
+ * @param executor
+ * @param mBeanName
+ * @param mBeanType
+ * @param mBeanCategory
+ * @return
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
+ if (executor instanceof ThreadPoolExecutor) {
+ final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl((ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+ ret.registerMBean();
+ return ret;
+ }
- Preconditions.checkArgument(executor instanceof ThreadPoolExecutor,
- "The ExecutorService of type {} is not an instanceof ThreadPoolExecutor",
- executor.getClass());
- this.executor = (ThreadPoolExecutor)executor;
+ LOG.info("Executor {} is not supported", executor);
+ return null;
}
@Override
}
public void setDataStoreExecutor(ExecutorService dsExecutor) {
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor,
+ this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor,
"notification-executor", getMBeanType(), getMBeanCategory());
}
this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", getMBeanType(), getMBeanCategory());
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
"data-store-executor", getMBeanType(), getMBeanCategory());
}
*/
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import com.google.common.collect.ImmutableMap;
/**
*
//we will default to InMemoryDOMDataStore creation
configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
}
- ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
- .<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
- .put(LogicalDatastoreType.CONFIGURATION, configStore).build();
+
+ final Map<LogicalDatastoreType, DOMStore> datastores = new EnumMap<>(LogicalDatastoreType.class);
+ datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
+ datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
/*
* We use a single-threaded executor for commits with a bounded queue capacity. If the
DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
listenableFutureExecutor));
final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE);
commitStatsMXBean.registerMBean();
- final ThreadExecutorStatsMXBeanImpl commitExecutorStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(commitExecutor, "CommitExecutorStats",
+ final AbstractMXBean commitExecutorStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
JMX_BEAN_TYPE, null);
- commitExecutorStatsMXBean.registerMBean();
-
- final ThreadExecutorStatsMXBeanImpl commitFutureStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(listenableFutureExecutor,
+ final AbstractMXBean commitFutureStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
"CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
- commitFutureStatsMXBean.registerMBean();
newDataBroker.setCloseable(new AutoCloseable() {
@Override
public void close() {
commitStatsMXBean.unregisterMBean();
- commitExecutorStatsMXBean.unregisterMBean();
- commitFutureStatsMXBean.unregisterMBean();
+ if (commitExecutorStatsMXBean != null) {
+ commitExecutorStatsMXBean.unregisterMBean();
+ }
+ if (commitFutureStatsMXBean != null) {
+ commitFutureStatsMXBean.unregisterMBean();
+ }
}
});
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
* Composite DOM Transaction backed by {@link DOMStoreTransaction}.
*
abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTransaction> implements
AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- private final ImmutableMap<K, T> backingTxs;
+ private final Map<K, T> backingTxs;
private final Object identifier;
/**
* @param backingTxs
* Key,value map of backing transactions.
*/
- protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final ImmutableMap<K, T> backingTxs) {
+ protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map<K, T> backingTxs) {
this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null");
}
*/
protected final T getSubtransaction(final K key) {
Preconditions.checkNotNull(key, "key must not be null.");
- Preconditions.checkArgument(backingTxs.containsKey(key), "No subtransaction associated with %s", key);
- return backingTxs.get(key);
+
+ final T ret = backingTxs.get(key);
+ Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+ return ret;
}
/**
* Returns immutable Iterable of all subtransactions.
*
*/
- protected Iterable<T> getSubtransactions() {
+ protected Collection<T> getSubtransactions() {
return backingTxs.values();
}
protected void closeSubtransactions() {
/*
- * We share one exception for all failures, which are added
- * as supressedExceptions to it.
- *
+ * We share one exception for all failures, which are added
+ * as supressedExceptions to it.
*/
IllegalStateException failure = null;
for (T subtransaction : backingTxs.values()) {
subtransaction.close();
} catch (Exception e) {
// If we did not allocated failure we allocate it
- if(failure == null) {
- failure = new IllegalStateException("Uncaught exception occured during closing transaction.", e);
+ if (failure == null) {
+ failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
} else {
- // We update it with addotional exceptions, which occured during error.
+ // We update it with additional exceptions, which occurred during error.
failure.addSuppressed(e);
}
}
}
// If we have failure, we throw it at after all attempts to close.
- if(failure != null) {
+ if (failure != null) {
throw failure;
}
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.EnumMap;
import java.util.Map;
import java.util.Map.Entry;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
*
* Abstract composite transaction factory.
* @param <T>
* Type of {@link DOMStoreTransactionFactory} factory.
*/
-public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
-
- private final ImmutableMap<LogicalDatastoreType, T> storeTxFactories;
-
- private boolean closed;
+abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicIntegerFieldUpdater<AbstractDOMForwardedTransactionFactory> UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed");
+ private final Map<LogicalDatastoreType, T> storeTxFactories;
+ private volatile int closed = 0;
protected AbstractDOMForwardedTransactionFactory(final Map<LogicalDatastoreType, ? extends T> txFactories) {
- this.storeTxFactories = ImmutableMap.copyOf(txFactories);
+ this.storeTxFactories = new EnumMap<>(txFactories);
}
/**
*
* @return New composite read-only transaction.
*/
- public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newReadOnlyTransaction());
}
- return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), builder.build());
+ return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns);
}
-
-
/**
* Creates a new composite write-only transaction
*
* @return New composite write-only transaction associated with this
* factory.
*/
- public DOMDataWriteTransaction newWriteOnlyTransaction() {
+ public final DOMDataWriteTransaction newWriteOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
}
- return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), builder.build(),
- this);
+ return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), txns, this);
}
/**
*
* @return New composite read-write transaction associated with this
* factory.
- *
*/
- public DOMDataReadWriteTransaction newReadWriteTransaction() {
+ public final DOMDataReadWriteTransaction newReadWriteTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadWriteTransaction());
+ txns.put(store.getKey(), store.getValue().newReadWriteTransaction());
}
- return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), builder.build(), this);
+ return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this);
}
/**
}
/**
- *
* Checks if instance is not closed.
*
* @throws IllegalStateException If instance of this class was closed.
*
*/
- @GuardedBy("this")
- protected synchronized void checkNotClosed() {
- Preconditions.checkState(!closed,"Transaction factory was closed. No further operations allowed.");
+ protected final void checkNotClosed() {
+ Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
}
@Override
- @GuardedBy("this")
- public synchronized void close() {
- closed = true;
+ public void close() {
+ final boolean success = UPDATER.compareAndSet(this, 0, 1);
+ Preconditions.checkState(success, "Transaction factory was already closed");
}
-
}
+
package org.opendaylight.controller.md.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkState;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DOMStore> implements DOMDataBroker,
AutoCloseable {
private final AtomicLong chainNum = new AtomicLong();
private volatile AutoCloseable closeable;
- public DOMDataBrokerImpl(final ImmutableMap<LogicalDatastoreType, DOMStore> datastores,
+ public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
final ListeningExecutorService executor) {
super(datastores);
this.coordinator = new DOMDataCommitCoordinatorImpl(executor);
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
@Override
public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreTransactionChain> backingChainsBuilder = ImmutableMap
- .builder();
+ checkNotClosed();
+
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
- backingChainsBuilder.put(entry.getKey(), entry.getValue().createTransactionChain());
+ backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
}
- long chainId = chainNum.getAndIncrement();
- ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = backingChainsBuilder.build();
+
+ final long chainId = chainNum.getAndIncrement();
LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener,
backingChains);
return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, coordinator, listener);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
* NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which is backed
* by several {@link DOMStoreTransactionChain} differentiated by provided
implements DOMTransactionChain, DOMDataCommitErrorListener {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
+ private final AtomicLong txNum = new AtomicLong();
private final DOMDataCommitExecutor coordinator;
private final TransactionChainListener listener;
private final long chainId;
- private final AtomicLong txNum = new AtomicLong();
- @GuardedBy("this")
- private boolean failed = false;
+
+ private volatile boolean failed = false;
/**
*
* If any of arguments is null.
*/
public DOMDataBrokerTransactionChainImpl(final long chainId,
- final ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
final DOMDataCommitExecutor coordinator, final TransactionChainListener listener) {
super(chains);
this.chainId = chainId;
}
@Override
- public synchronized CheckedFuture<Void,TransactionCommitFailedException> submit(
+ public CheckedFuture<Void,TransactionCommitFailedException> submit(
final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotClosed();
+
return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
}
@Override
- public synchronized void close() {
+ public void close() {
super.close();
+
for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
subChain.close();
}
if (!failed) {
LOG.debug("Transaction chain {}Â successfully finished.", this);
+ // FIXME: this event should be emitted once all operations complete
listener.onTransactionChainSuccessful(this);
}
}
@Override
- public synchronized void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
failed = true;
LOG.debug("Transaction chain {}Â failed.", this, cause);
listener.onTransactionChainFailed(this, tx, cause);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import java.util.List;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
/**
*
* Implementation of blocking three phase commit coordinator, which which
public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataCommitCoordinatorImpl.class);
-
- /**
- * Runs AND binary operation between all booleans in supplied iteration of booleans.
- *
- * This method will stop evaluating iterables if first found is false.
- */
- private static final Function<Iterable<Boolean>, Boolean> AND_FUNCTION = new Function<Iterable<Boolean>, Boolean>() {
-
- @Override
- public Boolean apply(final Iterable<Boolean> input) {
- for(boolean value : input) {
- if(!value) {
- return Boolean.FALSE;
- }
- }
- return Boolean.TRUE;
- }
- };
-
- private final ListeningExecutorService executor;
-
private final DurationStatsTracker commitStatsTracker = new DurationStatsTracker();
+ private final ListeningExecutorService executor;
/**
*
}
/**
- *
* Implementation of blocking three-phase commit-coordination tasks without
- * support of cancelation.
- *
+ * support of cancellation.
*/
- private static class CommitCoordinationTask implements Callable<Void> {
-
+ private static final class CommitCoordinationTask implements Callable<Void> {
+ private static final AtomicReferenceFieldUpdater<CommitCoordinationTask, CommitPhase> PHASE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase");
private final DOMDataWriteTransaction tx;
private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
private final DurationStatsTracker commitStatTracker;
-
- @GuardedBy("this")
- private CommitPhase currentPhase;
+ private final int cohortSize;
+ private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
final DurationStatsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
- this.currentPhase = CommitPhase.SUBMITTED;
this.commitStatTracker = commitStatTracker;
+ this.cohortSize = Iterables.size(cohorts);
}
@Override
public Void call() throws TransactionCommitFailedException {
+ final long startTime = commitStatTracker != null ? System.nanoTime() : 0;
- long startTime = System.nanoTime();
try {
canCommitBlocking();
preCommitBlocking();
commitBlocking();
return null;
} catch (TransactionCommitFailedException e) {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, e);
- abortBlocking(e);
+ final CommitPhase phase = currentPhase;
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e);
+ abortBlocking(e, phase);
throw e;
} finally {
- if(commitStatTracker != null) {
+ if (commitStatTracker != null) {
commitStatTracker.addDuration(System.nanoTime() - startTime);
}
}
*
*/
private void canCommitBlocking() throws TransactionCommitFailedException {
- final Boolean canCommitResult = canCommitAll().checkedGet();
- if (!canCommitResult) {
- throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+ for (ListenableFuture<?> canCommit : canCommitAll()) {
+ try {
+ final Boolean result = (Boolean)canCommit.get();
+ if (result == null || !result) {
+ throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ throw TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
+ }
}
}
/**
*
- * Invokes preCommit on underlying cohorts and blocks till
- * all results are returned.
+ * Invokes canCommit on underlying cohorts and returns composite future
+ * which will contains {@link Boolean#TRUE} only and only if
+ * all cohorts returned true.
*
- * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
- * state is not CAN_COMMIT
- * throws IllegalStateException.
+ * Valid state transition is from SUBMITTED to CAN_COMMIT,
+ * if currentPhase is not SUBMITTED throws IllegalStateException.
*
- * @throws TransactionCommitFailedException
- * If one of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private void preCommitBlocking() throws TransactionCommitFailedException {
- preCommitAll().checkedGet();
+ private ListenableFuture<?>[] canCommitAll() {
+ changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
+ for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
+ ops[i++] = cohort.canCommit();
+ }
+ return ops;
}
/**
*
- * Invokes commit on underlying cohorts and blocks till
+ * Invokes preCommit on underlying cohorts and blocks till
* all results are returned.
*
- * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
- * IllegalStateException.
+ * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
+ * state is not CAN_COMMIT
+ * throws IllegalStateException.
*
* @throws TransactionCommitFailedException
* If one of cohorts failed preCommit
*
*/
- private void commitBlocking() throws TransactionCommitFailedException {
- commitAll().checkedGet();
- }
-
- /**
- * Aborts transaction.
- *
- * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
- * cohorts, blocks
- * for all results. If any of the abort failed throws
- * IllegalStateException,
- * which will contains originalCause as suppressed Exception.
- *
- * If aborts we're successful throws supplied exception
- *
- * @param originalCause
- * Exception which should be used to fail transaction for
- * consumers of transaction
- * future and listeners of transaction failure.
- * @throws TransactionCommitFailedException
- * on invocation of this method.
- * originalCa
- * @throws IllegalStateException
- * if abort failed.
- */
- private void abortBlocking(final TransactionCommitFailedException originalCause)
- throws TransactionCommitFailedException {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, originalCause);
- Exception cause = originalCause;
+ private void preCommitBlocking() throws TransactionCommitFailedException {
+ final ListenableFuture<?>[] preCommitFutures = preCommitAll();
try {
- abortAsyncAll().get();
+ for(ListenableFuture<?> future : preCommitFutures) {
+ future.get();
+ }
} catch (InterruptedException | ExecutionException e) {
- LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
- cause = new IllegalStateException("Abort failed.", e);
- cause.addSuppressed(e);
+ throw TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER.apply(e);
}
- Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
}
/**
* state is not CAN_COMMIT
* throws IllegalStateException.
*
- * @return Future which will complete once all cohorts completed
- * preCommit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private CheckedFuture<Void, TransactionCommitFailedException> preCommitAll() {
+ private ListenableFuture<?>[] preCommitAll() {
changeStateFrom(CommitPhase.CAN_COMMIT, CommitPhase.PRE_COMMIT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.preCommit());
+ ops[i++] = cohort.preCommit();
+ }
+ return ops;
+ }
+
+ /**
+ *
+ * Invokes commit on underlying cohorts and blocks till
+ * all results are returned.
+ *
+ * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
+ * IllegalStateException.
+ *
+ * @throws TransactionCommitFailedException
+ * If one of cohorts failed preCommit
+ *
+ */
+ private void commitBlocking() throws TransactionCommitFailedException {
+ final ListenableFuture<?>[] commitFutures = commitAll();
+ try {
+ for(ListenableFuture<?> future : commitFutures) {
+ future.get();
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ throw TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e);
}
- /*
- * We are returing all futures as list, not only succeeded ones in
- * order to fail composite future if any of them failed.
- * See Futures.allAsList for this description.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
- return MappingCheckedFuture.create(compositeResult,
- TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER);
}
/**
* Valid state transition is from PRE_COMMIT to COMMIT, if not throws
* IllegalStateException
*
- * @return Future which will complete once all cohorts completed
- * commit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private CheckedFuture<Void, TransactionCommitFailedException> commitAll() {
+ private ListenableFuture<?>[] commitAll() {
changeStateFrom(CommitPhase.PRE_COMMIT, CommitPhase.COMMIT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.commit());
+ ops[i++] = cohort.commit();
}
- /*
- * We are returing all futures as list, not only succeeded ones in
- * order to fail composite future if any of them failed.
- * See Futures.allAsList for this description.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
- return MappingCheckedFuture.create(compositeResult,
- TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
+ return ops;
}
/**
+ * Aborts transaction.
*
- * Invokes canCommit on underlying cohorts and returns composite future
- * which will contains {@link Boolean#TRUE} only and only if
- * all cohorts returned true.
- *
- * Valid state transition is from SUBMITTED to CAN_COMMIT,
- * if currentPhase is not SUBMITTED throws IllegalStateException.
+ * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
+ * cohorts, blocks
+ * for all results. If any of the abort failed throws
+ * IllegalStateException,
+ * which will contains originalCause as suppressed Exception.
*
- * @return Future which will complete once all cohorts completed
- * preCommit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * If aborts we're successful throws supplied exception
*
+ * @param originalCause
+ * Exception which should be used to fail transaction for
+ * consumers of transaction
+ * future and listeners of transaction failure.
+ * @param phase phase in which the problem ensued
+ * @throws TransactionCommitFailedException
+ * on invocation of this method.
+ * originalCa
+ * @throws IllegalStateException
+ * if abort failed.
*/
- private CheckedFuture<Boolean, TransactionCommitFailedException> canCommitAll() {
- changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
- Builder<ListenableFuture<Boolean>> canCommitOperations = ImmutableList.builder();
- for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- canCommitOperations.add(cohort.canCommit());
+ private void abortBlocking(final TransactionCommitFailedException originalCause, final CommitPhase phase)
+ throws TransactionCommitFailedException {
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, originalCause);
+ Exception cause = originalCause;
+ try {
+ abortAsyncAll(phase).get();
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
+ cause = new IllegalStateException("Abort failed.", e);
+ cause.addSuppressed(e);
}
- ListenableFuture<List<Boolean>> allCanCommits = Futures.allAsList(canCommitOperations.build());
- ListenableFuture<Boolean> allSuccessFuture = Futures.transform(allCanCommits, AND_FUNCTION);
- return MappingCheckedFuture.create(allSuccessFuture,
- TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER);
-
+ Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
}
/**
- *
* Invokes abort on underlying cohorts and returns future which
- * completes
- * once all abort on cohorts are completed.
+ * completes once all abort on cohorts are completed.
*
+ * @param phase phase in which the problem ensued
* @return Future which will complete once all cohorts completed
* abort.
- *
*/
- private ListenableFuture<Void> abortAsyncAll() {
- changeStateFrom(currentPhase, CommitPhase.ABORT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+ private ListenableFuture<Void> abortAsyncAll(final CommitPhase phase) {
+ changeStateFrom(phase, CommitPhase.ABORT);
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.abort());
+ ops[i++] = cohort.abort();
}
+
/*
- * We are returing all futures as list, not only succeeded ones in
+ * We are returning all futures as list, not only succeeded ones in
* order to fail composite future if any of them failed.
* See Futures.allAsList for this description.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
+ ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops);
return compositeResult;
}
* @throws IllegalStateException
* If currentState of task does not match expected state
*/
- private synchronized void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
- Preconditions.checkState(currentPhase.equals(currentExpected),
- "Invalid state transition: Tx: %s current state: %s new state: %s", tx.getIdentifier(),
- currentPhase, newState);
- LOG.debug("Transaction {}: Phase {} Started ", tx.getIdentifier(), newState);
- currentPhase = newState;
- };
+ private void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
+ final boolean success = PHASE_UPDATER.compareAndSet(this, currentExpected, newState);
+ Preconditions.checkState(success, "Invalid state transition: Tx: %s expected: %s current: %s target: %s",
+ tx.getIdentifier(), currentExpected, currentPhase, newState);
+ LOG.debug("Transaction {}: Phase {} Started", tx.getIdentifier(), newState);
+ };
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
- *
* Read Only Transaction, which is composed of several
* {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by
* {@link LogicalDatastoreType} type parameter in
DOMDataReadOnlyTransaction {
protected DOMForwardedReadOnlyTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
super(identifier, backingTxs);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
public void close() {
closeSubtransactions();
}
-
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
*
* Read-Write Transaction, which is composed of several
* transactions.
*
*/
-
-class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements
- DOMDataReadWriteTransaction {
-
+final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
protected DOMForwardedReadWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs, commitImpl);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import static com.google.common.base.Preconditions.checkState;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- *
- *
* Read-Write Transaction, which is composed of several
- * {@link DOMStoreWriteTransaction} transactions. Subtransaction is selected by
+ * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by
* {@link LogicalDatastoreType} type parameter in:
*
* <ul>
* invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} for underlying
* transactions.
*
- * @param <T>
- * Subtype of {@link DOMStoreWriteTransaction} which is used as
+ * @param <T> Subtype of {@link DOMStoreWriteTransaction} which is used as
* subtransaction.
*/
class DOMForwardedWriteTransaction<T extends DOMStoreWriteTransaction> extends
AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, DOMDataCommitImplementation> IMPL_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, DOMDataCommitImplementation.class, "commitImpl");
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, Future> FUTURE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class);
+ private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
/**
- * Implementation of real commit.
- *
- * Transaction can not be commited if commitImpl is null,
- * so this seting this property to null is also used to
- * prevent write to
- * already commited / canceled transaction {@link #checkNotCanceled()
- *
- *
+ * Implementation of real commit. It also acts as an indication that
+ * the transaction is running -- which we flip atomically using
+ * {@link #IMPL_UPDATER}.
*/
- @GuardedBy("this")
private volatile DOMDataCommitImplementation commitImpl;
/**
+ * Future task of transaction commit. It starts off as null, but is
+ * set appropriately on {@link #submit()} and {@link #cancel()} via
+ * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
*
- * Future task of transaction commit.
- *
- * This value is initially null, and is once updated if transaction
- * is commited {@link #commit()}.
- * If this future exists, transaction MUST not be commited again
- * and all modifications should fail. See {@link #checkNotCommited()}.
- *
+ * Lazy set is safe for use because it is only referenced to in the
+ * {@link #cancel()} slow path, where we will busy-wait for it. The
+ * fast path gets the benefit of a store-store barrier instead of the
+ * usual store-load barrier.
*/
- @GuardedBy("this")
- private volatile CheckedFuture<Void, TransactionCommitFailedException> commitFuture;
+ private volatile Future<?> commitFuture;
protected DOMForwardedWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
+ final Map<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs);
this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
}
@Override
public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).write(path, data);
}
@Override
public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).delete(path);
}
@Override
public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).merge(path, data);
}
@Override
- public synchronized boolean cancel() {
- // Transaction is already canceled, we are safe to return true
- final boolean cancelationResult;
- if (commitImpl == null && commitFuture != null) {
- // Transaction is submitted, we try to cancel future.
- cancelationResult = commitFuture.cancel(false);
- } else if(commitImpl == null) {
+ public boolean cancel() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ if (impl != null) {
+ LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+ FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
return true;
- } else {
- cancelationResult = true;
- commitImpl = null;
}
- return cancelationResult;
+ // The transaction is in process of being submitted or cancelled. Busy-wait
+ // for the corresponding future.
+ Future<?> future;
+ do {
+ future = commitFuture;
+ } while (future == null);
+
+ return future.cancel(false);
}
@Override
- public synchronized ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
}
@Override
- public CheckedFuture<Void,TransactionCommitFailedException> submit() {
- checkNotReady();
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ checkRunning(impl);
- ImmutableList.Builder<DOMStoreThreePhaseCommitCohort> cohortsBuilder = ImmutableList.builder();
- for (DOMStoreWriteTransaction subTx : getSubtransactions()) {
- cohortsBuilder.add(subTx.ready());
- }
- ImmutableList<DOMStoreThreePhaseCommitCohort> cohorts = cohortsBuilder.build();
- commitFuture = commitImpl.submit(this, cohorts);
-
- /*
- *We remove reference to Commit Implementation in order
- *to prevent memory leak
- */
- commitImpl = null;
- return commitFuture;
- }
+ final Collection<T> txns = getSubtransactions();
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
- private void checkNotReady() {
- checkNotCommited();
- checkNotCanceled();
- }
+ // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+ for (DOMStoreWriteTransaction txn : txns) {
+ cohorts.add(txn.ready());
+ }
- private void checkNotCanceled() {
- Preconditions.checkState(commitImpl != null, "Transaction was canceled.");
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+ FUTURE_UPDATER.lazySet(this, ret);
+ return ret;
}
- private void checkNotCommited() {
- checkState(commitFuture == null, "Transaction was already submited.");
+ private void checkRunning(final DOMDataCommitImplementation impl) {
+ Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
}
-}
\ No newline at end of file
+}
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
public class DOMBrokerTest {
private SchemaContext schemaContext;
commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
domBroker = new DOMDataBrokerImpl(stores, executor);
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
Futures.addCallback( writeTx.submit(), new FutureCallback<Void>() {
@Override
- public void onSuccess( Void result ) {
+ public void onSuccess( final Void result ) {
commitCompletedLatch.countDown();
}
@Override
- public void onFailure( Throwable t ) {
+ public void onFailure( final Throwable t ) {
caughtCommitEx.set( t );
commitCompletedLatch.countDown();
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
private final CountDownLatch latch = new CountDownLatch( 1 );
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
this.change = change;
latch.countDown();
}
ExecutorService delegate;
- public CommitExecutorService( ExecutorService delegate ) {
+ public CommitExecutorService( final ExecutorService delegate ) {
this.delegate = delegate;
}
return this.bluePrint;
}
- public List<Object> collectModuleRoots(XSQLBluePrintNode table) {
+ public List<Object> collectModuleRoots(XSQLBluePrintNode table,LogicalDatastoreType type) {
if (table.getParent().isModule()) {
try {
List<Object> result = new LinkedList<Object>();
.toInstance();
DOMDataReadTransaction t = this.domDataBroker
.newReadOnlyTransaction();
- Object node = t.read(LogicalDatastoreType.OPERATIONAL,
+ Object node = t.read(type,
instanceIdentifier).get();
+
node = XSQLODLUtils.get(node, "reference");
if (node == null) {
return result;
XSQLAdapter.log(err);
}
} else {
- return collectModuleRoots(table.getParent());
+ return collectModuleRoots(table.getParent(),type);
}
return null;
}
public void execute(JDBCResultSet rs) {
List<XSQLBluePrintNode> tables = rs.getTables();
- List<Object> roots = collectModuleRoots(tables.get(0));
+ List<Object> roots = collectModuleRoots(tables.get(0),LogicalDatastoreType.OPERATIONAL);
+ roots.addAll(collectModuleRoots(tables.get(0),LogicalDatastoreType.CONFIGURATION));
+ if(roots.isEmpty()){
+ rs.setFinished(true);
+ }
XSQLBluePrintNode main = rs.getMainTable();
List<NETask> tasks = new LinkedList<XSQLAdapter.NETask>();
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
* to implement {@link DOMStore} contract.
*
*/
-public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
- TransactionReadyPrototype,AutoCloseable {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
private final ListenerTree listenerTree = ListenerTree.create();
private final AtomicLong txCounter = new AtomicLong(0);
- private final ListeningExecutorService listeningExecutor;
private final QueuedNotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> dataChangeListenerNotificationManager;
private final ExecutorService dataChangeListenerExecutor;
-
- private final ExecutorService domStoreExecutor;
+ private final ListeningExecutorService commitExecutor;
private final boolean debugTransactions;
private final String name;
private volatile AutoCloseable closeable;
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor) {
- this(name, domStoreExecutor, dataChangeListenerExecutor,
+ this(name, commitExecutor, dataChangeListenerExecutor,
InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false);
}
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor, final int maxDataChangeListenerQueueSize,
final boolean debugTransactions) {
this.name = Preconditions.checkNotNull(name);
- this.domStoreExecutor = Preconditions.checkNotNull(domStoreExecutor);
- this.listeningExecutor = MoreExecutors.listeningDecorator(this.domStoreExecutor);
+ this.commitExecutor = Preconditions.checkNotNull(commitExecutor);
this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
this.debugTransactions = debugTransactions;
"DataChangeListenerQueueMgr");
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
}
public ExecutorService getDomStoreExecutor() {
- return domStoreExecutor;
+ return commitExecutor;
}
@Override
@Override
public void close() {
- ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+ ExecutorServiceUtil.tryGracefulShutdown(commitExecutor, 30, TimeUnit.SECONDS);
ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
if(closeable != null) {
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction writeTx) {
- LOG.debug("Tx: {} is submitted. Modifications: {}", writeTx.getIdentifier(), writeTx.getMutatedView());
- return new ThreePhaseCommitImpl(writeTx);
+ protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ LOG.debug("Tx: {} is closed.", tx.getIdentifier());
+ }
+
+ @Override
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
+ return new ThreePhaseCommitImpl(tx, tree);
}
private Object nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private class DOMStoreTransactionChainImpl implements DOMStoreTransactionChain, TransactionReadyPrototype {
-
+ private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
+ @GuardedBy("this")
+ private SnapshotBackedWriteTransaction allocatedTransaction;
+ @GuardedBy("this")
+ private DataTreeSnapshot readySnapshot;
@GuardedBy("this")
- private SnapshotBackedWriteTransaction latestOutstandingTx;
-
private boolean chainFailed = false;
+ @GuardedBy("this")
private void checkFailed() {
Preconditions.checkState(!chainFailed, "Transaction chain is failed.");
}
- @Override
- public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
- final DataTreeSnapshot snapshot;
+ @GuardedBy("this")
+ private DataTreeSnapshot getSnapshot() {
checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
+
+ if (allocatedTransaction != null) {
+ Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier());
+ return readySnapshot;
} else {
- snapshot = dataTree.takeSnapshot();
+ return dataTree.takeSnapshot();
}
+ }
+
+ @GuardedBy("this")
+ private <T extends SnapshotBackedWriteTransaction> T recordTransaction(final T transaction) {
+ allocatedTransaction = transaction;
+ readySnapshot = null;
+ return transaction;
+ }
+
+ @Override
+ public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
+ final DataTreeSnapshot snapshot = getSnapshot();
return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot);
}
@Override
public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
- }
- final SnapshotBackedReadWriteTransaction ret = new SnapshotBackedReadWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
}
@Override
public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
+ }
+
+ @Override
+ protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ if (tx.equals(allocatedTransaction)) {
+ Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot);
+ allocatedTransaction = null;
}
- final SnapshotBackedWriteTransaction ret = new SnapshotBackedWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction tx) {
- DOMStoreThreePhaseCommitCohort storeCohort = InMemoryDOMDataStore.this.ready(tx);
- return new ChainedTransactionCommitImpl(tx, storeCohort, this);
+ protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction);
+ if (readySnapshot != null) {
+ // The snapshot should have been cleared
+ LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree);
+ }
+
+ final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree);
+ readySnapshot = tree;
+ return new ChainedTransactionCommitImpl(tx, cohort, this);
}
@Override
public void close() {
-
// FIXME: this call doesn't look right here - listeningExecutor is shared and owned
// by the outer class.
//listeningExecutor.shutdownNow();
protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
final Throwable t) {
chainFailed = true;
-
}
public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If committed transaction is latestOutstandingTx we clear
- // latestOutstandingTx
- // field in order to base new transactions on Datastore Data Tree
- // directly.
- if (transaction.equals(latestOutstandingTx)) {
- latestOutstandingTx = null;
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ if (transaction.equals(allocatedTransaction)) {
+ if (readySnapshot == null) {
+ LOG.warn("Transaction {} committed while no ready snapshot present", transaction);
+ }
+
+ allocatedTransaction = null;
+ readySnapshot = null;
}
}
-
}
private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DOMStoreThreePhaseCommitCohort delegate;
-
private final DOMStoreTransactionChainImpl txChain;
protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- super();
this.transaction = transaction;
this.delegate = delegate;
this.txChain = txChain;
public void onSuccess(final Void result) {
txChain.onTransactionCommited(transaction);
}
-
});
return commitFuture;
}
-
}
private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DataTreeModification modification;
private ResolveDataChangeEventsTask listenerResolver;
private DataTreeCandidate candidate;
- public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction) {
+ public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
this.transaction = writeTransaction;
- this.modification = transaction.getMutatedView();
+ this.modification = modification;
}
@Override
public ListenableFuture<Boolean> canCommit() {
- return listeningExecutor.submit(new Callable<Boolean>() {
+ return commitExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws TransactionCommitFailedException {
try {
@Override
public ListenableFuture<Void> preCommit() {
- return listeningExecutor.submit(new Callable<Void>() {
+ return commitExecutor.submit(new Callable<Void>() {
@Override
public Void call() {
candidate = dataTree.prepare(modification);
* The commit has to occur atomically with regard to listener
* registrations.
*/
- synchronized (this) {
+ synchronized (InMemoryDOMDataStore.this) {
dataTree.commit(candidate);
listenerResolver.resolve(dataChangeListenerNotificationManager);
}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
@Nullable final InMemoryDOMDataStoreConfigProperties properties) {
InMemoryDOMDataStoreConfigProperties actualProperties = properties;
- if(actualProperties == null) {
+ if (actualProperties == null) {
actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault();
}
// task execution time to get higher throughput as DataChangeListeners typically provide
// much of the business logic for a data model. If the executor queue size limit is reached,
// subsequent submitted notifications will block the calling thread.
-
int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize();
int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize();
ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
- ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- actualProperties.getMaxDataStoreExecutorQueueSize(), "DOMStore-" + name );
-
- InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
- domStoreExecutor, dataChangeListenerExecutor,
+ final ListeningExecutorService commitExecutor = MoreExecutors.sameThreadExecutor();
+ final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+ commitExecutor, dataChangeListenerExecutor,
actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions);
- if(schemaService != null) {
+ if (schemaService != null) {
schemaService.registerSchemaContextListener(dataStore);
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkNotNull;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* and executed according to {@link TransactionReadyPrototype}.
*
*/
-class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
- implements DOMStoreReadWriteTransaction {
-
+final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
/**
LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
checkNotNull(path, "Path must not be null.");
- DataTreeModification dataView = getMutatedView();
- if(dataView == null) {
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
- }
-
+ final Optional<NormalizedNode<?, ?>> result;
try {
- return Futures.immediateCheckedFuture(dataView.readNode(path));
+ result = readSnapshotNode(path);
} catch (Exception e) {
LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e));
+ }
+
+ if (result == null) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
+ } else {
+ return Futures.immediateCheckedFuture(result);
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*
*/
class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
-
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
- private DataTreeModification mutableTree;
- private boolean ready = false;
- private TransactionReadyPrototype readyImpl;
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
+
+ // non-null when not ready
+ private volatile TransactionReadyPrototype readyImpl;
+ // non-null when not committed/closed
+ private volatile DataTreeModification mutableTree;
/**
* Creates new write-only transaction.
public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
super(identifier, debug);
- mutableTree = snapshot.newModification();
this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
+ mutableTree = snapshot.newModification();
LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot);
}
- @Override
- public void close() {
- LOG.debug("Store transaction: {} : Closed", getIdentifier());
- this.mutableTree = null;
- this.readyImpl = null;
- }
-
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
- mutableTree.write(path, data);
+ tree.write(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
- mutableTree.merge(path, data);
+ tree.merge(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void delete(final YangInstanceIdentifier path) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
+
try {
- LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
- mutableTree.delete(path);
+ tree.delete(path);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, mutableTree, e);
+ LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
}
}
- protected final boolean isReady() {
- return ready;
+ /**
+ * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does
+ * not allow data access after the transaction has been closed or readied.
+ *
+ * @param path Path to read
+ * @return null if the the transaction has been closed;
+ */
+ protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+ return readyImpl == null ? null : mutableTree.readNode(path);
}
- protected final void checkNotReady() {
- checkState(!ready, "Transaction %s is ready. No further modifications allowed.", getIdentifier());
+ private final void checkNotReady() {
+ checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.", getIdentifier());
}
@Override
- public synchronized DOMStoreThreePhaseCommitCohort ready() {
- checkState(!ready, "Transaction %s is already ready.", getIdentifier());
- ready = true;
+ public DOMStoreThreePhaseCommitCohort ready() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
+
LOG.debug("Store transaction: {} : Ready", getIdentifier());
- mutableTree.ready();
- return readyImpl.ready(this);
+
+ final DataTreeModification tree = mutableTree;
+ TREE_UPDATER.lazySet(this, null);
+ tree.ready();
+ return wasReady.transactionReady(this, tree);
}
- protected DataTreeModification getMutatedView() {
- return mutableTree;
+ @Override
+ public void close() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ if (wasReady != null) {
+ LOG.debug("Store transaction: {} : Closed", getIdentifier());
+ TREE_UPDATER.lazySet(this, null);
+ wasReady.transactionAborted(this);
+ } else {
+ LOG.debug("Store transaction: {} : Closed after submit", getIdentifier());
+ }
}
@Override
protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("ready", isReady());
+ return toStringHelper.add("ready", readyImpl == null);
}
/**
* providing underlying logic for applying implementation.
*
*/
- public static interface TransactionReadyPrototype {
+ abstract static class TransactionReadyPrototype {
+ /**
+ * Called when a transaction is closed without being readied. This is not invoked for
+ * transactions which are ready.
+ *
+ * @param tx Transaction which got aborted.
+ */
+ protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
/**
* Returns a commit coordinator associated with supplied transactions.
*
* @param tx
* Transaction on which ready was invoked.
+ * @param tree
+ * Modified data tree which has been constructed.
* @return DOMStoreThreePhaseCommitCohort associated with transaction
*/
- DOMStoreThreePhaseCommitCohort ready(SnapshotBackedWriteTransaction tx);
+ protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
}
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl.jmx;
import java.util.concurrent.ExecutorService;
-
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
*/
public class InMemoryDataStoreStats implements AutoCloseable {
- private final ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean;
- private final ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean;
+ private final AbstractMXBean notificationExecutorStatsBean;
+ private final AbstractMXBean dataStoreExecutorStatsBean;
private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
- public InMemoryDataStoreStats(String mBeanType, QueuedNotificationManager<?, ?> manager,
- ExecutorService dataStoreExecutor) {
+ public InMemoryDataStoreStats(final String mBeanType, final QueuedNotificationManager<?, ?> manager,
+ final ExecutorService dataStoreExecutor) {
- this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
+ notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", mBeanType, null);
notificationManagerStatsBean.registerMBean();
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
"notification-executor", mBeanType, null);
- this.notificationExecutorStatsBean.registerMBean();
+ if (notificationExecutorStatsBean != null) {
+ notificationExecutorStatsBean.registerMBean();
+ }
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dataStoreExecutor,
+ dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dataStoreExecutor,
"data-store-executor", mBeanType, null);
- this.dataStoreExecutorStatsBean.registerMBean();
+ if (dataStoreExecutorStatsBean != null) {
+ dataStoreExecutorStatsBean.registerMBean();
+ }
}
@Override
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-config</artifactId>
+ <packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/20-clustering-test-app.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/modules.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider">
+ prefix:clustering-it-provider
+ </type>
+ <name>clustering-it-provider</name>
+
+ <rpc-registry>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <name>binding-rpc-broker</name>
+ </rpc-registry>
+ <data-broker>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
+ </data-broker>
+ <notification-service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
+ binding:binding-notification-service
+ </type>
+ <name>binding-notification-broker</name>
+ </notification-service>
+ </module>
+ </modules>
+ </data>
+
+ </configuration>
+
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider?module=clustering-it-provider&revision=2014-08-19</capability>
+
+ </required-capabilities>
+
+</snapshot>
+
--- /dev/null
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+# name = "<friendly_name_of_the_module>"
+# shards = [
+# {
+# name="<any_name_that_is_unique_for_the_module>"
+# replicas = [
+# "<name_of_member_on_which_to_run>"
+# ]
+# ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+ {
+ name = "default"
+ shards = [
+ {
+ name="default"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "topology"
+ shards = [
+ {
+ name="topology"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "inventory"
+ shards = [
+ {
+ name="inventory"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "toaster"
+ shards = [
+ {
+ name="toaster"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car"
+ shards = [
+ {
+ name="car"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "people"
+ shards = [
+ {
+ name="people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car-people"
+ shards = [
+ {
+ name="car-people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+
+]
--- /dev/null
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+# name = "<friendly_name_of_module>"
+# namespace = "<the yang namespace of the module>"
+# shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+ {
+ name = "inventory"
+ namespace = "urn:opendaylight:inventory"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "topology"
+ namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "toaster"
+ namespace = "http://netconfcentral.org/ns/toaster"
+ shard-strategy = "module"
+ },
+ {
+ name = "car"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+ shard-strategy = "module"
+ }
+ {
+ name = "people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+ shard-strategy = "module"
+ }
+
+ {
+ name = "car-people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+ shard-strategy = "module"
+ }
+]
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-model</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>org.opendaylight.controller.sal-clustering-it-model</Bundle-Name>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/generated-sources/sal</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!--This plugin's configuration is used to store Eclipse
+ m2e settings only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ <version>${ietf-inet-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ <version>${ietf-yang-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ <version>${yang-ext.version}</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+module car-people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "c"; revision-date 2014-08-18; }
+ import people { prefix "people"; revision-date 2014-08-18; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ container car-people {
+ description
+ "Top-level container for all people car map";
+
+ list car-person {
+ key "car-id person-id";
+ description "A mapping of cars and people.";
+ leaf car-id {
+ type c:car-id;
+ }
+
+ leaf person-id {
+ type people:person-id;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car-purchase {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
+
+ prefix cp;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "car"; revision-date 2014-08-18; }
+ import people { prefix "person"; revision-date 2014-08-18; }
+ import yang-ext {prefix "ext"; revision-date "2013-07-09";}
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car purchase for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ rpc buy-car {
+ description
+ "buy a new car";
+ input {
+ leaf person {
+ ext:context-reference "person:person-context";
+ type person:person-ref;
+ description "A reference to a particular person.";
+ }
+
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+ }
+
+ notification carBought {
+ description
+ "Indicates that a person bought a car.";
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef car-id {
+ type inet:uri;
+ description "An identifier for car entry.";
+ }
+
+ grouping car-entry {
+ description "Describes the contents of a car entry -
+ Details of the car manufacturer, model etc";
+ leaf id {
+ type car-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf model {
+ type string;
+ }
+ leaf manufacturer {
+ type string;
+ }
+
+ leaf year {
+ type uint32;
+ }
+
+ leaf category {
+ type string;
+ }
+ }
+
+ container cars {
+ description
+ "Top-level container for all car objects.";
+ list car-entry {
+ key "id";
+ description "A list of cars (as defined by the 'grouping car-entry').";
+ uses car-entry;
+ }
+ }
+
+
+}
\ No newline at end of file
--- /dev/null
+module people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people";
+
+ prefix people;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for person for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef person-id {
+ type inet:uri;
+ description "An identifier for person.";
+ }
+
+ typedef person-ref {
+ type instance-identifier;
+ description "A reference that points to an people:people/person in the data tree.";
+ }
+ identity person-context {
+ description "A person-context is a classifier for person elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
+ }
+
+ grouping person {
+ description "Describes the details of the person";
+
+ leaf id {
+ type person-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf gender {
+ type string;
+ }
+
+ leaf age {
+ type uint32;
+ }
+
+ leaf address {
+ type string;
+ }
+
+ leaf contactNo {
+ type string;
+ }
+ }
+
+ container people {
+ description
+ "Top-level container for all people";
+
+ list person {
+ key "id";
+ description "A list of people (as defined by the 'grouping person').";
+ uses person;
+ }
+ }
+
+ rpc add-person {
+ description
+ "Add a person entry into database";
+ input {
+ uses person;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sal-samples</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it</artifactId>
+ <packaging>pom</packaging>
+ <modules>
+ <module>configuration</module>
+ <module>model</module>
+ <module>provider</module>
+ </modules>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-provider</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.controller.config.yang.config.clustering_it_provider</Export-Package>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-util</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>equinoxSDK381</groupId>
+ <artifactId>org.eclipse.osgi</artifactId>
+ <version>3.8.1.v20120830-144521</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.listener;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class PeopleCarListener implements CarPurchaseListener {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleCarListener.class);
+
+ private DataBroker dataProvider;
+
+
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+ @Override
+ public void onCarBought(CarBought notification) {
+ log.info("onCarBought notification : Adding car person entry");
+
+ final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
+ carPersonBuilder.setCarId(notification.getCarId());
+ carPersonBuilder.setPersonId(notification.getPersonId());
+ CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
+ carPersonBuilder.setKey(key);
+ final CarPerson carPerson = carPersonBuilder.build();
+
+ InstanceIdentifier<CarPerson> carPersonIId =
+ InstanceIdentifier.<CarPeople>builder(CarPeople.class).child(CarPerson.class, carPerson.getKey()).build();
+
+
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("Car bought, entry added to map of people and car [{}]", carPerson);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("Car bought, Failed entry addition to map of people and car [{}]", carPerson);
+ }
+ });
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+public class PeopleProvider implements PeopleService, AutoCloseable {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleProvider.class);
+
+ private DataBroker dataProvider;
+
+ private BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration;
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+
+ public void setRpcRegistration(BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration) {
+ this.rpcRegistration = rpcRegistration;
+ }
+
+ @Override
+ public Future<RpcResult<Void>> addPerson(AddPersonInput input) {
+ log.info("RPC addPerson : adding person [{}]", input);
+
+ PersonBuilder builder = new PersonBuilder(input);
+ final Person person = builder.build();
+ final SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+
+ // Each entry will be identifiable by a unique key, we have to create that identifier
+ final InstanceIdentifier.InstanceIdentifierBuilder<Person> personIdBuilder =
+ InstanceIdentifier.<People>builder(People.class)
+ .child(Person.class, person.getKey());
+ final InstanceIdentifier personId = personIdBuilder.build();
+ // Place entry in data store tree
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, personId, person);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("RPC addPerson : person added successfully [{}]", person);
+ rpcRegistration.registerPath(PersonContext.class, personId);
+ log.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("RPC addPerson : person addition failed [{}]", person);
+ futureResult.set(RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, t.getMessage()).build());
+ }
+ });
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+
+public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable{
+
+ private static final Logger log = LoggerFactory.getLogger(PurchaseCarProvider.class);
+
+ private NotificationProviderService notificationProvider;
+
+
+ public void setNotificationProvider(final NotificationProviderService salService) {
+ this.notificationProvider = salService;
+ }
+
+
+ @Override
+ public Future<RpcResult<Void>> buyCar(BuyCarInput input) {
+ log.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+ SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+ CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder();
+ carBoughtBuilder.setCarId(input.getCarId());
+ carBoughtBuilder.setPersonId(input.getPersonId());
+ notificationProvider.publish(carBoughtBuilder.build());
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+
+
+import org.opendaylight.controller.clustering.it.listener.PeopleCarListener;
+import org.opendaylight.controller.clustering.it.provider.PeopleProvider;
+import org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+public class ClusteringItProviderModule extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModule {
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.clustering_it_provider.ClusteringItProviderModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ DataBroker dataBrokerService = getDataBrokerDependency();
+ NotificationProviderService notificationProvider = getNotificationServiceDependency();
+
+ // Add routed RPC registration for car purchase
+ final PurchaseCarProvider purchaseCar = new PurchaseCarProvider();
+ purchaseCar.setNotificationProvider(notificationProvider);
+
+ final BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> purchaseCarRpc = getRpcRegistryDependency()
+ .addRoutedRpcImplementation(CarPurchaseService.class, purchaseCar);
+
+ // Add people provider registration
+ final PeopleProvider people = new PeopleProvider();
+ people.setDataProvider(dataBrokerService);
+
+ people.setRpcRegistration(purchaseCarRpc);
+
+ final BindingAwareBroker.RpcRegistration<PeopleService> peopleRpcReg = getRpcRegistryDependency()
+ .addRpcImplementation(PeopleService.class, people);
+
+
+
+ final PeopleCarListener peopleCarListener = new PeopleCarListener();
+ peopleCarListener.setDataProvider(dataBrokerService);
+
+ final ListenerRegistration<NotificationListener> listenerReg =
+ getNotificationServiceDependency().registerNotificationListener( peopleCarListener );
+
+ // Wrap toaster as AutoCloseable and close registrations to md-sal at
+ // close()
+ final class AutoCloseableToaster implements AutoCloseable {
+
+ @Override
+ public void close() throws Exception {
+ peopleRpcReg.close();
+ purchaseCarRpc.close();
+ people.close();
+ purchaseCar.close();
+ listenerReg.close();
+ }
+ }
+
+ AutoCloseable ret = new AutoCloseableToaster();
+ return ret;
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: clustering-it-provider yang module local name: clustering-it-provider
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Aug 19 14:44:46 PDT 2014
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+public class ClusteringItProviderModuleFactory extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModuleFactory {
+
+}
--- /dev/null
+module clustering-it-provider {
+
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider";
+ prefix "clustering-it-provider";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; }
+
+ description
+ "This module contains the base YANG definitions for
+ clustering-it-provider implementation.";
+
+ revision "2014-08-19" {
+ description
+ "Initial revision.";
+ }
+
+ // This is the definition of the service implementation as a module identity.
+ identity clustering-it-provider {
+ base config:module-type;
+
+ // Specifies the prefix for generated java classes.
+ config:java-name-prefix ClusteringItProvider;
+ }
+
+ // Augments the 'configuration' choice node under modules/module.
+ augment "/config:modules/config:module/config:configuration" {
+ case clustering-it-provider {
+ when "/config:modules/config:module/config:type = 'clustering-it-provider'";
+
+ container rpc-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-rpc-registry;
+ }
+ }
+ }
+
+ container notification-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-notification-service;
+ }
+ }
+ }
+
+ container data-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity mdsal:binding-async-data-broker;
+ }
+ }
+ }
+ }
+ }
+}
<module>toaster-provider</module>
<module>toaster-config</module>
<module>l2switch</module>
+ <module>clustering-test-app</module>
</modules>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
package org.opendaylight.controller.netconf.client;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import io.netty.channel.Channel;
logger.debug("Netconf session {} should use exi.", session);
NetconfStartExiMessage startExiMessage = (NetconfStartExiMessage) sessionPreferences.getStartExiMessage();
tryToInitiateExi(session, startExiMessage);
- // Exi is not supported, release session immediately
} else {
+ // Exi is not supported, release session immediately
logger.debug("Netconf session {} isn't capable of using exi.", session);
negotiationSuccessful(session);
}
private long extractSessionId(final Document doc) {
final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+ Preconditions.checkState(sessionIdNode != null, "");
String textContent = sessionIdNode.getTextContent();
if (textContent == null || textContent.equals("")) {
throw new IllegalStateException("Session id not received from server");
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+
+import java.net.InetSocketAddress;
+
+public class NetconfClientConfigurationTest {
+ @Test
+ public void testNetconfClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategy strategy = Mockito.mock(ReconnectStrategy.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ NetconfClientConfiguration cfg = NetconfClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(strategy).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getReconnectStrategy());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelPromise;
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+
+public class NetconfClientDispatcherImplTest {
+ @Test
+ public void testNetconfClientDispatcherImpl() throws Exception {
+ EventLoopGroup bossGroup = Mockito.mock(EventLoopGroup.class);
+ EventLoopGroup workerGroup = Mockito.mock(EventLoopGroup.class);
+ Timer timer = new HashedWheelTimer();
+
+ ChannelFuture chf = Mockito.mock(ChannelFuture.class);
+ Channel ch = Mockito.mock(Channel.class);
+ doReturn(ch).when(chf).channel();
+ Throwable thr = Mockito.mock(Throwable.class);
+ doReturn(chf).when(workerGroup).register(any(Channel.class));
+
+ ChannelPromise promise = Mockito.mock(ChannelPromise.class);
+ doReturn(promise).when(chf).addListener(any(GenericFutureListener.class));
+ doReturn(thr).when(chf).cause();
+
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory reconnectStrategyFactory = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ doReturn(5).when(reconnect).getConnectTimeout();
+ doReturn("").when(reconnect).toString();
+ doReturn("").when(handler).toString();
+ doReturn("").when(reconnectStrategyFactory).toString();
+ doReturn(reconnect).when(reconnectStrategyFactory).createReconnectStrategy();
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfReconnectingClientConfiguration cfg2 = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.TCP).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfClientDispatcherImpl dispatcher = new NetconfClientDispatcherImpl(bossGroup, workerGroup, timer);
+ Future<NetconfClientSession> sshSession = dispatcher.createClient(cfg);
+ Future<NetconfClientSession> tcpSession = dispatcher.createClient(cfg2);
+
+ Future<Void> sshReconn = dispatcher.createReconnectingClient(cfg);
+ Future<Void> tcpReconn = dispatcher.createReconnectingClient(cfg2);
+
+ assertNotNull(sshSession);
+ assertNotNull(tcpSession);
+ assertNotNull(sshReconn);
+ assertNotNull(tcpReconn);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.Promise;
+import org.apache.sshd.common.SessionListener;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionNegotiatorFactoryTest {
+ @Test
+ public void testGetSessionNegotiator() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class);
+ doReturn(sessionListener).when(listenerFactory).getSessionListener();
+
+ Channel channel = mock(Channel.class);
+ Promise promise = mock(Promise.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer,
+ Optional.<NetconfHelloMessageAdditionalHeader>absent(), 200L);
+
+ SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
+ assertNotNull(sessionNegotiator);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.*;
+import io.netty.handler.ssl.SslHandler;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.Promise;
+import org.apache.mina.handler.demux.ExceptionHandler;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.collections.Sets;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import io.netty.util.Timer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.w3c.dom.Document;
+import java.util.Set;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfClientSessionNegotiatorTest {
+
+ private NetconfHelloMessage helloMessage;
+ private ChannelPipeline pipeline;
+ private ChannelFuture future;
+ private Channel channel;
+ private ChannelInboundHandlerAdapter channelInboundHandlerAdapter;
+
+ @Before
+ public void setUp() throws Exception {
+ helloMessage = NetconfHelloMessage.createClientHello(Sets.newSet("exi:1.0"), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+ pipeline = mockChannelPipeline();
+ future = mockChannelFuture();
+ channel = mockChannel();
+ System.out.println("setup done");
+ }
+
+ private ChannelHandler mockChannelHandler() {
+ ChannelHandler handler = mock(ChannelHandler.class);
+ return handler;
+ }
+
+ private Channel mockChannel() {
+ Channel channel = mock(Channel.class);
+ ChannelHandler channelHandler = mockChannelHandler();
+ doReturn("").when(channel).toString();
+ doReturn(future).when(channel).close();
+ doReturn(future).when(channel).writeAndFlush(anyObject());
+ doReturn(true).when(channel).isOpen();
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(pipeline).toString();
+ doReturn(pipeline).when(pipeline).remove(any(ChannelHandler.class));
+ doReturn(channelHandler).when(pipeline).remove(anyString());
+ return channel;
+ }
+
+ private ChannelFuture mockChannelFuture() {
+ ChannelFuture future = mock(ChannelFuture.class);
+ doReturn(future).when(future).addListener(any(GenericFutureListener.class));
+ return future;
+ }
+
+ private ChannelPipeline mockChannelPipeline() {
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ ChannelHandler handler = mock(ChannelHandler.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ doReturn(null).when(pipeline).get(SslHandler.class);
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+ doReturn(handler).when(pipeline).replace(anyString(), anyString(), any(ChunkedFramingMechanismEncoder.class));
+
+ NetconfXMLToHelloMessageDecoder messageDecoder = new NetconfXMLToHelloMessageDecoder();
+ doReturn(messageDecoder).when(pipeline).replace(anyString(), anyString(), any(NetconfXMLToMessageDecoder.class));
+ doReturn(pipeline).when(pipeline).replace(any(ChannelHandler.class), anyString(), any(NetconfClientSession.class));
+ return pipeline;
+ }
+
+ private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise,
+ NetconfMessage startExi) {
+ ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class);
+ NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi);
+ doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class));
+
+ long timeout = 10L;
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ return new NetconfClientSessionNegotiator(preferences, promise, channel, timer, sessionListener, timeout);
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiator() throws Exception {
+ Promise promise = mock(Promise.class);
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("a", "b");
+ NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ negotiator.handleMessage(helloServerMessage);
+ verify(promise).setSuccess(anyObject());
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiatorWithEXI() throws Exception {
+ Promise promise = mock(Promise.class);
+ EXIOptions exiOptions = new EXIOptions();
+ NetconfStartExiMessage exiMessage = NetconfStartExiMessage.create(exiOptions, "msg-id");
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("exi:1.0");
+ NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]);
+ return null;
+ }
+ }).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+
+ ChannelHandlerContext handlerContext = mock(ChannelHandlerContext.class);
+ doReturn(pipeline).when(handlerContext).pipeline();
+ negotiator.handleMessage(helloMessage);
+ Document expectedResult = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ channelInboundHandlerAdapter.channelRead(handlerContext, new NetconfMessage(expectedResult));
+
+ verify(promise).setSuccess(anyObject());
+
+ // two calls for exiMessage, 2 for hello message
+ verify(pipeline, times(4)).replace(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.openexi.proc.common.EXIOptions;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionTest {
+
+ @Mock
+ ChannelHandler channelHandler;
+
+ @Mock
+ Channel channel;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testNetconfClientSession() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ long sessId = 20L;
+ Collection<String> caps = Lists.newArrayList("cap1", "cap2");
+
+ NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+
+ Mockito.doReturn(pipeline).when(channel).pipeline();
+ Mockito.doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+ Mockito.doReturn("").when(channelHandler).toString();
+
+ NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps);
+ session.addExiHandlers(codec);
+ session.stopExiCommunication();
+
+ assertEquals(caps, session.getServerCapabilities());
+ assertEquals(session, session.thisInstance());
+
+ Mockito.verify(pipeline, Mockito.times(4)).replace(anyString(), anyString(), Mockito.any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+
+public class NetconfReconnectingClientConfigurationTest {
+ @Test
+ public void testNetconfReconnectingClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory strategy = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(strategy).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getConnectStrategyFactory());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ Assert.assertEquals(reconnect, cfg.getReconnectStrategy());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+import io.netty.channel.*;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Promise;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+
+import java.util.Set;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.*;
+
+public class SimpleNetconfClientSessionListenerTest {
+
+ private Channel channel;
+ private ChannelFuture channelFuture;
+ Set caps;
+ private NetconfHelloMessage helloMessage;
+ private NetconfMessage message;
+ private NetconfClientSessionListener sessionListener;
+ private NetconfClientSession clientSession;
+
+ @Before
+ public void setUp() throws Exception {
+ channel = mock(Channel.class);
+ channelFuture = mock(ChannelFuture.class);
+ doReturn(channelFuture).when(channel).writeAndFlush(anyObject());
+ caps = Sets.newSet("a", "b");
+ helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ message = new NetconfMessage(helloMessage.getDocument());
+ sessionListener = mock(NetconfClientSessionListener.class);
+ clientSession = new NetconfClientSession(sessionListener, channel, 20L, caps);
+ }
+
+ @Test
+ public void testSessionDown() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.onSessionDown(clientSession, new Exception());
+ assertFalse(promise.isSuccess());
+ }
+
+ @Test
+ public void testSendRequest() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.sendRequest(message);
+ assertFalse(promise.isSuccess());
+ }
+
+ @Test
+ public void testOnMessage() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.onMessage(clientSession, message);
+ assertTrue(promise.isSuccess());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class SshClientChannelInitializerTest {
+ @Test
+ public void test() throws Exception {
+
+ AuthenticationHandler authenticationHandler = mock(AuthenticationHandler.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = mock(NetconfClientSessionNegotiatorFactory.class);
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+
+ SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ doReturn("").when(sessionNegotiator).toString();
+ doReturn(sessionNegotiator).when(negotiatorFactory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ Channel channel = mock(Channel.class);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(channel).toString();
+ doReturn(pipeline).when(pipeline).addFirst(any(ChannelHandler.class));
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+
+ Promise<NetconfClientSession> promise = mock(Promise.class);
+ doReturn("").when(promise).toString();
+
+ SshClientChannelInitializer initializer = new SshClientChannelInitializer(authenticationHandler, negotiatorFactory,
+ sessionListener);
+ initializer.initialize(channel, promise);
+ verify(pipeline, times(1)).addFirst(any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Mockito.*;
+
+public class TcpClientChannelInitializerTest {
+ @Test
+ public void testInitializeSessionNegotiator() throws Exception {
+ NetconfClientSessionNegotiatorFactory factory = mock(NetconfClientSessionNegotiatorFactory.class);
+ SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ doReturn("").when(sessionNegotiator).toString();
+ doReturn(sessionNegotiator).when(factory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+ NetconfClientSessionListener listener = mock(NetconfClientSessionListener.class);
+ TcpClientChannelInitializer initializer = new TcpClientChannelInitializer(factory, listener);
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ Channel channel = mock(Channel.class);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(channel).toString();
+
+ Promise<NetconfClientSession> promise = mock(Promise.class);
+ doReturn("").when(promise).toString();
+
+ initializer.initializeSessionNegotiator(channel, promise);
+ verify(pipeline, times(1)).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.client.test;
+package org.opendaylight.controller.netconf.client;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
-import org.opendaylight.controller.netconf.client.NetconfClientSession;
-import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
-import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration.NetconfClientProtocol;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public class JaxBSerializerTest {
@Test
- public void testName() throws Exception {
+ public void testSerialization() throws Exception {
final NetconfMonitoringService service = new NetconfMonitoringService() {
}
};
final NetconfState model = new NetconfState(service);
- final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model));
+ final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)).replaceAll("\\s", "");
assertThat(xml, CoreMatchers.containsString(
- "<schema>\n" +
- "<format>yang</format>\n" +
- "<identifier>id</identifier>\n" +
- "<location>NETCONF</location>\n" +
- "<namespace>localhost</namespace>\n" +
- "<version>v1</version>\n" +
- "</schema>\n"));
+ "<schema>" +
+ "<format>yang</format>" +
+ "<identifier>id</identifier>" +
+ "<location>NETCONF</location>" +
+ "<namespace>localhost</namespace>" +
+ "<version>v1</version>" +
+ "</schema>"));
assertThat(xml, CoreMatchers.containsString(
- "<session>\n" +
- "<session-id>1</session-id>\n" +
- "<in-bad-rpcs>0</in-bad-rpcs>\n" +
- "<in-rpcs>0</in-rpcs>\n" +
- "<login-time>loginTime</login-time>\n" +
- "<out-notifications>0</out-notifications>\n" +
- "<out-rpc-errors>0</out-rpc-errors>\n" +
- "<ncme:session-identifier>client</ncme:session-identifier>\n" +
- "<source-host>address/port</source-host>\n" +
- "<transport>ncme:netconf-tcp</transport>\n" +
- "<username>username</username>\n" +
+ "<session>" +
+ "<session-id>1</session-id>" +
+ "<in-bad-rpcs>0</in-bad-rpcs>" +
+ "<in-rpcs>0</in-rpcs>" +
+ "<login-time>loginTime</login-time>" +
+ "<out-notifications>0</out-notifications>" +
+ "<out-rpc-errors>0</out-rpc-errors>" +
+ "<ncme:session-identifier>client</ncme:session-identifier>" +
+ "<source-host>address/port</source-host>" +
+ "<transport>ncme:netconf-tcp</transport>" +
+ "<username>username</username>" +
"</session>"));
}
assertThat(out.get(0), CoreMatchers.instanceOf(NetconfHelloMessage.class));
final NetconfHelloMessage hello = (NetconfHelloMessage) out.get(0);
assertTrue(hello.getAdditionalHeader().isPresent());
- assertEquals("[tomas;10.0.0.0:10000;tcp;client;]\n", hello.getAdditionalHeader().get().toFormattedString());
+ assertEquals("[tomas;10.0.0.0:10000;tcp;client;]" + System.lineSeparator(), hello.getAdditionalHeader().get().toFormattedString());
assertThat(XmlUtil.toString(hello.getDocument()), CoreMatchers.containsString("<hello xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\""));
}
netconfSSHServer.setAuthProvider(authProvider);
InetSocketAddress address = netconfSSHServer.getLocalSocketAddress();
- final EchoClientHandler echoClientHandler = connectClient(address);
+
+ final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort()));
+
Stopwatch stopwatch = new Stopwatch().start();
while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
Thread.sleep(100);
this.hashedWheelTimer = hashedWheelTimer;
}
- private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi) {
+ private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
final Set<Capability> capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function<ModuleBuilder, Capability>() {
@Override
: Sets.newHashSet(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0, XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
- hashedWheelTimer, simulatedOperationProvider, idProvider, CONNECTION_TIMEOUT_MILLIS, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
+ hashedWheelTimer, simulatedOperationProvider, idProvider, generateConfigsTimeout, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
serverNegotiatorFactory);
public List<Integer> start(final Main.Params params) {
final Map<ModuleBuilder, String> moduleBuilders = parseSchemasToModuleBuilders(params);
- final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi);
+ final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout);
int currentPort = params.startingPort;
<artifactId>xmlunit</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
Document doc = XmlUtil.newDocument();
Element helloElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
HELLO_TAG);
- Element capabilitiesElement = doc.createElement(XmlNetconfConstants.CAPABILITIES);
+ Element capabilitiesElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.CAPABILITIES);
for (String capability : Sets.newHashSet(capabilities)) {
- Element capElement = doc.createElement(XmlNetconfConstants.CAPABILITY);
+ Element capElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.CAPABILITY);
capElement.setTextContent(capability);
capabilitiesElement.appendChild(capElement);
}
public static NetconfHelloMessage createServerHello(Set<String> capabilities, long sessionId) throws NetconfDocumentedException {
Document doc = createHelloMessageDoc(capabilities);
- Element sessionIdElement = doc.createElement(XmlNetconfConstants.SESSION_ID);
+ Element sessionIdElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.SESSION_ID);
sessionIdElement.setTextContent(Long.toString(sessionId));
doc.getDocumentElement().appendChild(sessionIdElement);
return new NetconfHelloMessage(doc);
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+
+import com.google.common.collect.Lists;
+import org.junit.Test;
+
+public class CloseableUtilTest {
+
+ @Test
+ public void testCloseAllFail() throws Exception {
+ final AutoCloseable failingCloseable = new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ throw new RuntimeException("testing failing close");
+ }
+ };
+
+ try {
+ CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+ fail("Exception with suppressed should be thrown");
+ } catch (final RuntimeException e) {
+ assertEquals(1, e.getSuppressed().length);
+ }
+ }
+
+ @Test
+ public void testCloseAll() throws Exception {
+ final AutoCloseable failingCloseable = mock(AutoCloseable.class);
+ doNothing().when(failingCloseable).close();
+ CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
+
+public class HardcodedNamespaceResolverTest {
+
+ @Test
+ public void testResolver() throws Exception {
+ final HardcodedNamespaceResolver hardcodedNamespaceResolver = new HardcodedNamespaceResolver("prefix", "namespace");
+
+ assertEquals("namespace", hardcodedNamespaceResolver.getNamespaceURI("prefix"));
+ try{
+ hardcodedNamespaceResolver.getNamespaceURI("unknown");
+ fail("Unknown namespace lookup should fail");
+ } catch(IllegalStateException e) {}
+
+ assertNull(hardcodedNamespaceResolver.getPrefix("any"));
+ assertNull(hardcodedNamespaceResolver.getPrefixes("any"));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.hamcrest.CoreMatchers.both;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Map;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import com.google.common.base.Optional;
+
+public class XmlElementTest {
+
+ private final String elementAsString = "<top xmlns=\"namespace\" xmlns:a=\"attrNamespace\" a:attr1=\"value1\" attr2=\"value2\">" +
+ "<inner>" +
+ "<deepInner>deepValue</deepInner>" +
+ "</inner>" +
+ "<innerNamespace xmlns=\"innerNamespace\">innerNamespaceValue</innerNamespace>" +
+ "<innerPrefixed xmlns:b=\"prefixedValueNamespace\">b:valueWithPrefix</innerPrefixed>" +
+ "</top>";
+ private Document document;
+ private Element element;
+ private XmlElement xmlElement;
+
+ @Before
+ public void setUp() throws Exception {
+ document = XmlUtil.readXmlToDocument(elementAsString);
+ element = document.getDocumentElement();
+ xmlElement = XmlElement.fromDomElement(element);
+ }
+
+ @Test
+ public void testConstruct() throws Exception {
+ final XmlElement fromString = XmlElement.fromString(elementAsString);
+ assertEquals(fromString, xmlElement);
+ XmlElement.fromDomDocument(document);
+ XmlElement.fromDomElement(element);
+ XmlElement.fromDomElementWithExpected(element, "top");
+ XmlElement.fromDomElementWithExpected(element, "top", "namespace");
+
+ try {
+ XmlElement.fromString("notXml");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+
+ try {
+ XmlElement.fromDomElementWithExpected(element, "notTop");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+
+ try {
+ XmlElement.fromDomElementWithExpected(element, "top", "notNamespace");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+ }
+
+ @Test
+ public void testGetters() throws Exception {
+ assertEquals(element, xmlElement.getDomElement());
+ assertEquals(element.getElementsByTagName("inner").getLength(), xmlElement.getElementsByTagName("inner").getLength());
+
+ assertEquals("top", xmlElement.getName());
+ assertTrue(xmlElement.hasNamespace());
+ assertEquals("namespace", xmlElement.getNamespace());
+ assertEquals("namespace", xmlElement.getNamespaceAttribute());
+ assertEquals(Optional.of("namespace"), xmlElement.getNamespaceOptionally());
+
+ assertEquals("value1", xmlElement.getAttribute("attr1", "attrNamespace"));
+ assertEquals("value2", xmlElement.getAttribute("attr2"));
+ assertEquals(2 + 2/*Namespace definition*/, xmlElement.getAttributes().size());
+
+ assertEquals(3, xmlElement.getChildElements().size());
+ assertEquals(1, xmlElement.getChildElements("inner").size());
+ assertTrue(xmlElement.getOnlyChildElementOptionally("inner").isPresent());
+ assertTrue(xmlElement.getOnlyChildElementWithSameNamespaceOptionally("inner").isPresent());
+ assertEquals(0, xmlElement.getChildElements("unknown").size());
+ assertFalse(xmlElement.getOnlyChildElementOptionally("unknown").isPresent());
+ assertEquals(1, xmlElement.getChildElementsWithSameNamespace("inner").size());
+ assertEquals(0, xmlElement.getChildElementsWithSameNamespace("innerNamespace").size());
+ assertEquals(1, xmlElement.getChildElementsWithinNamespace("innerNamespace", "innerNamespace").size());
+ assertTrue(xmlElement.getOnlyChildElementOptionally("innerNamespace", "innerNamespace").isPresent());
+ assertFalse(xmlElement.getOnlyChildElementOptionally("innerNamespace", "unknownNamespace").isPresent());
+
+ final XmlElement noNamespaceElement = XmlElement.fromString("<noNamespace/>");
+ assertFalse(noNamespaceElement.hasNamespace());
+ try {
+ noNamespaceElement.getNamespace();
+ fail();
+ } catch (final MissingNameSpaceException e) {}
+
+ final XmlElement inner = xmlElement.getOnlyChildElement("inner");
+ final XmlElement deepInner = inner.getOnlyChildElementWithSameNamespaceOptionally().get();
+ assertEquals(deepInner, inner.getOnlyChildElementWithSameNamespace());
+ assertEquals(Optional.<XmlElement>absent(), xmlElement.getOnlyChildElementOptionally("unknown"));
+ assertEquals("deepValue", deepInner.getTextContent());
+ assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+ assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+ }
+
+ @Test
+ public void testExtractNamespaces() throws Exception {
+ final XmlElement innerPrefixed = xmlElement.getOnlyChildElement("innerPrefixed");
+ Map.Entry<String, String> namespaceOfTextContent = innerPrefixed.findNamespaceOfTextContent();
+
+ assertNotNull(namespaceOfTextContent);
+ assertEquals("b", namespaceOfTextContent.getKey());
+ assertEquals("prefixedValueNamespace", namespaceOfTextContent.getValue());
+ final XmlElement innerNamespace = xmlElement.getOnlyChildElement("innerNamespace");
+ namespaceOfTextContent = innerNamespace.findNamespaceOfTextContent();
+
+ assertEquals("", namespaceOfTextContent.getKey());
+ assertEquals("innerNamespace", namespaceOfTextContent.getValue());
+ }
+
+ @Test
+ public void testUnrecognisedElements() throws Exception {
+ xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"), xmlElement.getOnlyChildElement("innerPrefixed"), xmlElement.getOnlyChildElement("innerNamespace"));
+
+ try {
+ xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"));
+ fail();
+ } catch (final NetconfDocumentedException e) {
+ assertThat(e.getMessage(), both(containsString("innerNamespace")).and(containsString("innerNamespace")));
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import com.google.common.base.Optional;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXParseException;
+
+public class XmlUtilTest {
+
+ private final String xml = "<top xmlns=\"namespace\">\n" +
+ "<innerText>value</innerText>\n" +
+ "<innerPrefixedText xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+ "<innerPrefixedText xmlns=\"randomNamespace\" xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+ "</top>";
+
+ @Test
+ public void testCreateElement() throws Exception {
+ final Document document = XmlUtil.newDocument();
+ final Element top = XmlUtil.createElement(document, "top", Optional.of("namespace"));
+
+ top.appendChild(XmlUtil.createTextElement(document, "innerText", "value", Optional.of("namespace")));
+ top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("namespace")));
+ top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("randomNamespace")));
+
+ document.appendChild(top);
+ assertEquals("top", XmlUtil.createDocumentCopy(document).getDocumentElement().getTagName());
+
+ XMLUnit.setIgnoreAttributeOrder(true);
+ XMLUnit.setIgnoreWhitespace(true);
+
+ final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(xml), document);
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+ @Test
+ public void testLoadSchema() throws Exception {
+ XmlUtil.loadSchema();
+ try {
+ XmlUtil.loadSchema(getClass().getResourceAsStream("/netconfMessages/commit.xml"));
+ fail("Input stream does not contain xsd");
+ } catch (final IllegalStateException e) {
+ assertTrue(e.getCause() instanceof SAXParseException);
+ }
+
+ }
+
+ @Test
+ public void testXPath() throws Exception {
+ final XPathExpression correctXPath = XMLNetconfUtil.compileXPath("/top/innerText");
+ try {
+ XMLNetconfUtil.compileXPath("!@(*&$!");
+ fail("Incorrect xpath should fail");
+ } catch (IllegalStateException e) {}
+ final Object value = XmlUtil.evaluateXPath(correctXPath, XmlUtil.readXmlToDocument("<top><innerText>value</innerText></top>"), XPathConstants.NODE);
+ assertEquals("value", ((Element) value).getTextContent());
+ }
+}
\ No newline at end of file
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
+//import javax.xml.bind.annotation.XmlElementWrapper;
import java.io.Serializable;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
* healthmonitor_id String
* admin_state_up Bool
* status String
- * members List <String>
+ * members List <NeutronLoadBalancerPoolMember>
* http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
*/
@XmlElement (name="status")
String loadBalancerPoolStatus;
- @XmlElement (name="members")
- List loadBalancerPoolMembers;
-
- HashMap<String, NeutronLoadBalancerPoolMember> member;
+ @XmlElement(name="members")
+ List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers;
public NeutronLoadBalancerPool() {
- member = new HashMap<String, NeutronLoadBalancerPoolMember>();
}
public String getLoadBalancerPoolID() {
this.loadBalancerPoolStatus = loadBalancerPoolStatus;
}
- public List getLoadBalancerPoolMembers() {
+ public List<NeutronLoadBalancerPoolMember> getLoadBalancerPoolMembers() {
+ /*
+ * Update the pool_id of the member to that this.loadBalancerPoolID
+ */
+ for (NeutronLoadBalancerPoolMember member: loadBalancerPoolMembers)
+ member.setPoolID(loadBalancerPoolID);
return loadBalancerPoolMembers;
}
- public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) {
+ public void setLoadBalancerPoolMembers(List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers) {
this.loadBalancerPoolMembers = loadBalancerPoolMembers;
}
+ public void addLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+ this.loadBalancerPoolMembers.add(loadBalancerPoolMember);
+ }
+
+ public void removeLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+ this.loadBalancerPoolMembers.remove(loadBalancerPoolMember);
+ }
+
public NeutronLoadBalancerPool extractFields(List<String> fields) {
NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool();
Iterator<String> i = fields.iterator();
}
return ans;
}
-}
\ No newline at end of file
+}
import org.opendaylight.controller.configuration.ConfigurationObject;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
import java.io.Serializable;
import java.util.Iterator;
import java.util.List;
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
public class NeutronLoadBalancerPoolMember extends ConfigurationObject implements Serializable {
private static final long serialVersionUID = 1L;
@XmlElement (name="status")
String poolMemberStatus;
+ String poolID;
+
public NeutronLoadBalancerPoolMember() {
}
+ @XmlTransient
+ public String getPoolID() {
+ return poolID;
+ }
+
+ public void setPoolID(String poolID) {
+ this.poolID = poolID;
+ }
+
public String getPoolMemberID() {
return poolMemberID;
}
if (s.equals("id")) {
ans.setPoolMemberID(this.getPoolMemberID());
}
+ if (s.equals("pool_id")) {
+ ans.setPoolID(this.getPoolID());
+ }
if (s.equals("tenant_id")) {
ans.setPoolMemberTenantID(this.getPoolMemberTenantID());
}
@Override public String toString() {
return "NeutronLoadBalancerPoolMember{" +
"poolMemberID='" + poolMemberID + '\'' +
+ ", poolID='" + poolID + '\'' +
", poolMemberTenantID='" + poolMemberTenantID + '\'' +
", poolMemberAddress='" + poolMemberAddress + '\'' +
", poolMemberProtoPort=" + poolMemberProtoPort +
import java.util.List;
/**
- * Neutron Northbound REST APIs for LoadBalancer Policies.<br>
- * This class provides REST APIs for managing neutron LoadBalancer Policies
+ * Neutron Northbound REST APIs for LoadBalancers.<br>
+ * This class provides REST APIs for managing neutron LoadBalancers
*
* <br>
* <br>
@QueryParam("page_reverse") String pageReverse
// sorting not supported
) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
- this);
- // INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this);
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- List<NeutronLoadBalancer> allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers();
+ List<NeutronLoadBalancer> allLoadBalancers = loadBalancerInterface.getAllNeutronLoadBalancers();
// List<NeutronLoadBalancerRule> allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules();
List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
// List<NeutronLoadBalancerRule> rules = new ArrayList<NeutronLoadBalancerRule>();
/**
* Returns a specific LoadBalancer */
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@GET
@Produces({ MediaType.APPLICATION_JSON })
@ResponseCode(code = 401, condition = "Unauthorized"),
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented") })
- public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+ public Response showLoadBalancer(@PathParam("loadBalancerID") String loadBalancerID,
// return fields
@QueryParam("fields") List<String> fields) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
if (fields.size() > 0) {
- NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer ans = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
return Response.status(200).entity(
new NeutronLoadBalancerRequest(extractFields(ans, fields))).build();
} else {
- return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID))).build();
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID))).build();
}
}
@ResponseCode(code = 409, condition = "Conflict"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response createLoadBalancers(final NeutronLoadBalancerRequest input) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* Verify that the LoadBalancer doesn't already exist.
*/
- if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
+ if (loadBalancerInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
throw new BadRequestException("LoadBalancer UUID already exists");
}
- loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
-
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {
}
}
}
- loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+ loadBalancerInterface.addNeutronLoadBalancer(singleton);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
NeutronLoadBalancer test = i.next();
/*
- * Verify that the firewall policy doesn't already exist
+ * Verify that the loadbalancer doesn't already exist
*/
- if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
+ if (loadBalancerInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
throw new BadRequestException("Load Balancer Pool UUID already is already created");
}
if (testMap.containsKey(test.getLoadBalancerID())) {
i = bulk.iterator();
while (i.hasNext()) {
NeutronLoadBalancer test = i.next();
- loadBalancerPoolInterface.addNeutronLoadBalancer(test);
+ loadBalancerInterface.addNeutronLoadBalancer(test);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
/**
* Updates a LoadBalancer Policy
*/
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@PUT
@Produces({ MediaType.APPLICATION_JSON })
@Consumes({ MediaType.APPLICATION_JSON })
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response updateLoadBalancer(
- @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ @PathParam("loadBalancerID") String loadBalancerID, final NeutronLoadBalancerRequest input) {
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* verify the LoadBalancer exists and there is only one delta provided
*/
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
if (!input.isSingleton()) {
throw new BadRequestException("Only singleton edit supported");
}
NeutronLoadBalancer delta = input.getSingleton();
- NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer original = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
/*
* updates restricted by Neutron
/*
* update the object and return it
*/
- loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta);
- NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID);
+ loadBalancerInterface.updateNeutronLoadBalancer(loadBalancerID, delta);
+ NeutronLoadBalancer updatedLoadBalancer = loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
service.neutronLoadBalancerUpdated(updatedLoadBalancer);
}
}
- return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID))).build();
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID))).build();
}
/**
* Deletes a LoadBalancer */
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@DELETE
@StatusCodes({
@ResponseCode(code = 204, condition = "No Content"),
@ResponseCode(code = 409, condition = "Conflict"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response deleteLoadBalancer(
- @PathParam("loadBalancerPoolID") String loadBalancerPoolID) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ @PathParam("loadBalancerID") String loadBalancerID) {
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* verify the LoadBalancer exists and it isn't currently in use
*/
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
- if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) {
+ if (loadBalancerInterface.neutronLoadBalancerInUse(loadBalancerID)) {
return Response.status(409).build();
}
- NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer singleton = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {
}
}
- loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID);
+ loadBalancerInterface.removeNeutronLoadBalancer(loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
import javax.xml.bind.annotation.XmlElement;
import java.util.List;
-public class INeutronLoadBalancerPoolMemberRequest {
+public class NeutronLoadBalancerPoolMemberRequest {
/**
* See OpenStack Network API v2.0 Reference for description of
@XmlElement(name="members")
List<NeutronLoadBalancerPoolMember> bulkRequest;
- INeutronLoadBalancerPoolMemberRequest() {
+ NeutronLoadBalancerPoolMemberRequest() {
}
- INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+ NeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
bulkRequest = bulk;
singletonLoadBalancerPoolMember = null;
}
- INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+ NeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
singletonLoadBalancerPoolMember = group;
}
/*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
*/
package org.opendaylight.controller.networkconfig.neutron.northbound;
import org.codehaus.enunciate.jaxrs.ResponseCode;
import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware;
-import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
import org.opendaylight.controller.sal.utils.ServiceHelper;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
-
-@Path("/pools/{loadBalancerPoolID}/members")
+@Path("/pools/{loadBalancerPoolUUID}/members")
public class NeutronLoadBalancerPoolMembersNorthbound {
-
private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List<String> fields) {
return o.extractFields(fields);
}
/**
- * Returns a list of all LoadBalancerPool
+ * Returns a list of all LoadBalancerPoolMembers in specified pool
*/
@GET
@Produces({MediaType.APPLICATION_JSON})
@ResponseCode(code = 501, condition = "Not Implemented")})
public Response listMembers(
+ //Path param
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+
// return fields
@QueryParam("fields") List<String> fields,
+
// OpenStack LoadBalancerPool attributes
@QueryParam("id") String queryLoadBalancerPoolMemberID,
@QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID,
@QueryParam("page_reverse") String pageReverse
// sorting not supported
) {
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces
- .getINeutronLoadBalancerPoolMemberCRUD(this);
- if (loadBalancerPoolMemberInterface == null) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface
- .getAllNeutronLoadBalancerPoolMembers();
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
- Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ Iterator<NeutronLoadBalancerPoolMember> i = members.iterator();
while (i.hasNext()) {
NeutronLoadBalancerPoolMember nsg = i.next();
if ((queryLoadBalancerPoolMemberID == null ||
queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) &&
+ loadBalancerPoolUUID.equals(nsg.getPoolID()) &&
(queryLoadBalancerPoolMemberTenantID == null ||
queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) &&
(queryLoadBalancerPoolMemberAddress == null ||
}
}
return Response.status(200).entity(
- new INeutronLoadBalancerPoolMemberRequest(ans)).build();
+ new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+}
+
+/**
+ * Returns a specific LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@GET
+@Produces({ MediaType.APPLICATION_JSON })
+//@TypeHint(OpenStackLoadBalancerPoolMembers.class)
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response showLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+ // return fields
+ @QueryParam("fields") List<String> fields ) {
+
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember ans: members) {
+ if (!ans.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ continue;
+
+ if (fields.size() > 0) {
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolMemberRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+ }
+ }
+ return Response.status(204).build();
}
/**
* Adds a Member to an LBaaS Pool member
*/
-@Path("/pools/{loadBalancerPoolID}/members")
@PUT
@Produces({MediaType.APPLICATION_JSON})
@Consumes({MediaType.APPLICATION_JSON})
@ResponseCode(code = 401, condition = "Unauthorized"),
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented")})
-public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberRequest input) {
+public Response createLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ final NeutronLoadBalancerPoolMemberRequest input) {
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(
- this);
- if (loadBalancerPoolMemberInterface == null) {
- throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface "
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
+ // Verify that the loadBalancerPool exists, for the member to be added to its cache
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+
if (input.isSingleton()) {
NeutronLoadBalancerPoolMember singleton = input.getSingleton();
+ singleton.setPoolID(loadBalancerPoolUUID);
+ String loadBalancerPoolMemberUUID = singleton.getPoolMemberID();
/*
* Verify that the LoadBalancerPoolMember doesn't already exist.
*/
- if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
- singleton.getPoolMemberID())) {
- throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
+ List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
}
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
if (instances != null) {
}
}
}
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
service.neutronLoadBalancerPoolMemberCreated(singleton);
}
}
+
+ /**
+ * Add the member from the neutron load balancer pool as well
+ */
+ singletonPool.addLoadBalancerPoolMember(singleton);
+
} else {
List<NeutronLoadBalancerPoolMember> bulk = input.getBulk();
Iterator<NeutronLoadBalancerPoolMember> i = bulk.iterator();
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
while (i.hasNext()) {
NeutronLoadBalancerPoolMember test = i.next();
+ String loadBalancerPoolMemberUUID = test.getPoolMemberID();
/*
- * Verify that the firewall doesn't already exist
+ * Verify that the LoadBalancerPoolMember doesn't already exist.
*/
-
- if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
- test.getPoolMemberID())) {
- throw new BadRequestException("Load Balancer PoolMember UUID already is already created");
+ List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
}
+
if (testMap.containsKey(test.getPoolMemberID())) {
throw new BadRequestException("Load Balancer PoolMember UUID already exists");
}
i = bulk.iterator();
while (i.hasNext()) {
NeutronLoadBalancerPoolMember test = i.next();
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
service.neutronLoadBalancerPoolMemberCreated(test);
}
}
+ singletonPool.addLoadBalancerPoolMember(test);
}
}
return Response.status(201).entity(input).build();
}
+
+/**
+ * Updates a LB member pool
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@PUT
+@Produces({ MediaType.APPLICATION_JSON })
+@Consumes({ MediaType.APPLICATION_JSON })
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response updateLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+ final NeutronLoadBalancerPoolMemberRequest input) {
+
+ //TODO: Implement update LB member pool
+ return Response.status(501).entity(input).build();
+}
+
+/**
+ * Deletes a LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@DELETE
+@StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response deleteLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ // Verify that the loadBalancerPool exists, for the member to be removed from its cache
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+
+ //Verify that the LB pool member exists
+ NeutronLoadBalancerPoolMember singleton = null;
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) {
+ singleton = member;
+ break;
+ }
+ }
+ if (singleton == null)
+ throw new BadRequestException("LoadBalancerPoolMember UUID does not exist.");
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ service.neutronLoadBalancerPoolMemberDeleted(singleton);
+ }
+ }
+
+ /**
+ * Remove the member from the neutron load balancer pool
+ */
+ NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ singletonPool.removeLoadBalancerPoolMember(singleton);
+
+ return Response.status(204).build();
+}
}
/*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
*/
package org.opendaylight.controller.networkconfig.neutron.northbound;
import org.codehaus.enunciate.jaxrs.StatusCodes;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
import org.opendaylight.controller.sal.utils.ServiceHelper;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
* http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
*
*/
+
+/**
+ * For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD,
+ * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache.
+ * TODO: Consolidate and maintain a single copy
+ */
+
@Path("/pools")
public class NeutronLoadBalancerPoolNorthbound {
@QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID,
@QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp,
@QueryParam("status") String queryLoadBalancerPoolStatus,
- @QueryParam("members") List queryLoadBalancerPoolMembers,
+ @QueryParam("members") List<NeutronLoadBalancerPoolMember> queryLoadBalancerPoolMembers,
// pagination
@QueryParam("limit") String limit,
@QueryParam("marker") String marker,
NeutronLoadBalancerPool test = i.next();
/*
- * Verify that the firewall doesn't already exist
+ * Verify that the loadBalancerPool doesn't already exist
*/
if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) {
}
return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
}
+
+ /**
+ * Deletes a LoadBalancerPool
+ */
+
+ @Path("{loadBalancerPoolUUID}")
+ @DELETE
+ @StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response deleteLoadBalancerPool(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerPool exists and it isn't currently in use
+ */
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+ }
+ if (loadBalancerPoolInterface.neutronLoadBalancerPoolInUse(loadBalancerPoolUUID)) {
+ return Response.status(409).build();
+ }
+ NeutronLoadBalancerPool singleton = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPool(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * remove it and return 204 status
+ */
+ loadBalancerPoolInterface.removeNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ service.neutronLoadBalancerPoolDeleted(singleton);
+ }
+ }
+
+ /*
+ * remove corresponding members from the member cache too
+ */
+ INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this);
+ if (loadBalancerPoolMemberInterface != null) {
+ List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new
+ ArrayList<NeutronLoadBalancerPoolMember>(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers());
+ Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPoolMember member = i.next();
+ if (member.getPoolID() == loadBalancerPoolUUID)
+ loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID());
+ }
+ }
+ return Response.status(204).build();
+ }
}
package org.opendaylight.controller.topologymanager.internal;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.util.ArrayList;
-import java.util.Dictionary;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.LinkedBlockingQueue;
-
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.felix.dm.Component;
import org.eclipse.osgi.framework.console.CommandInterpreter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.LinkedBlockingQueue;
+
/**
* The class describes TopologyManager which is the central repository of the
* network topology. It provides service for applications to interact with
// all except the creation time stamp because that should
// be set only when the edge is created
TimeStamp timeStamp = null;
- for (Property prop : oldProps) {
- if (prop instanceof TimeStamp) {
- TimeStamp tsProp = (TimeStamp) prop;
- if (tsProp.getTimeStampName().equals("creation")) {
- timeStamp = tsProp;
- break;
+ if (oldProps != null) {
+ for (Property prop : oldProps) {
+ if (prop instanceof TimeStamp) {
+ TimeStamp tsProp = (TimeStamp) prop;
+ if (tsProp.getTimeStampName().equals("creation")) {
+ timeStamp = tsProp;
+ break;
+ }
}
}
}
if (prop instanceof TimeStamp) {
TimeStamp t = (TimeStamp) prop;
if (t.getTimeStampName().equals("creation")) {
- i.remove();
+ if (timeStamp != null) {
+ i.remove();
+ }
break;
}
}
package org.opendaylight.controller.topologymanager.internal;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.ConstructionException;
+import org.opendaylight.controller.sal.core.Description;
import org.opendaylight.controller.sal.core.Edge;
import org.opendaylight.controller.sal.core.Host;
import org.opendaylight.controller.sal.core.Latency;
import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
import org.opendaylight.controller.sal.core.Property;
import org.opendaylight.controller.sal.core.State;
+import org.opendaylight.controller.sal.core.TimeStamp;
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.packet.address.EthernetAddress;
import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.switchmanager.SwitchConfig;
import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
public class TopologyManagerImplTest {
/**
* Mockup of switch manager that only maintains existence of node
Assert.assertTrue(nodeNCmap.isEmpty());
}
+
+ @Test
+ public void bug1348FixTest() throws ConstructionException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ TestSwitchManager swMgr = new TestSwitchManager();
+ topoManagerImpl.setSwitchManager(swMgr);
+ topoManagerImpl.nonClusterObjectCreate();
+
+ NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 1, NodeCreator.createOFNode(1000L));
+ NodeConnector tailnc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 2, NodeCreator.createOFNode(2000L));
+ Edge edge = new Edge(headnc1, tailnc1);
+ List<TopoEdgeUpdate> updatedEdges = new ArrayList<>();
+ Set<Property> edgeProps = new HashSet<>();
+ edgeProps.add(new TimeStamp(System.currentTimeMillis(), "creation"));
+ edgeProps.add(new Latency(Latency.LATENCY100ns));
+ edgeProps.add(new State(State.EDGE_UP));
+ edgeProps.add(new Bandwidth(Bandwidth.BW100Gbps));
+ edgeProps.add(new Description("Test edge"));
+ updatedEdges.add(new TopoEdgeUpdate(edge, edgeProps, UpdateType.CHANGED));
+
+ try {
+ topoManagerImpl.edgeUpdate(updatedEdges);
+ } catch (Exception e) {
+ Assert.fail("Exception was raised when trying to update edge properties: " + e.getMessage());
+ }
+
+ Assert.assertEquals(1, topoManagerImpl.getEdges().size());
+ Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
+ }
}