<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-provider</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
</feature>
+
+ <feature name='odl-clustering-test-app' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+ <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
+ <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
+ </feature>
</features>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>net.sf.jung2</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.antlr</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.moxy</artifactId>
+ </dependency>
</dependencies>
<build>
<resources>
<bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.neutron.northbound/${networkconfig.neutron.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
<ignorePermissions>false</ignorePermissions>
</configuration>
</execution>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/assembly/system</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>true</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ <useRepositoryLayout>true</useRepositoryLayout>
+ <addParentPoms>true</addParentPoms>
+ <copyPom>true</copyPom>
+ </configuration>
+ </execution>
</executions>
</plugin>
<plugin>
<sonar.language>java</sonar.language>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
- <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
+ <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+ <sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
<yang-ext.version>2013.09.07.4-SNAPSHOT</yang-ext.version>
<yang-jmx-generator.version>1.0.0-SNAPSHOT</yang-jmx-generator.version>
<yangtools.version>0.6.2-SNAPSHOT</yangtools.version>
- <sshd-core.version>0.12.0</sshd-core.version>
+ <sshd-core.version>0.12.0</sshd-core.version>
+ <jmh.version>0.9.7</jmh.version>
</properties>
<dependencyManagement>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <!-- JMH Benchmark dependencies -->
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
</dependencies>
</dependencyManagement>
--- /dev/null
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+# script; for example karaf, start, stop, admin, client, ...
+#
+# if [ "$KARAF_SCRIPT" == "SCRIPT_NAME" ]; then
+# Actions go here...
+# fi
+
+#
+# general settings which should be applied for all scripts go here; please keep
+# in mind that it is possible that scripts might be executed more than once, e.g.
+# in example of the start script where the start script is executed first and the
+# karaf script afterwards.
+#
+
+#
+# The following section shows the possible configuration options for the default
+# karaf scripts
+#
+# export JAVA_HOME # Location of Java installation
+# export JAVA_MIN_MEM # Minimum memory for the JVM
+# export JAVA_MAX_MEM # Maximum memory for the JVM
+# export JAVA_PERM_MEM # Minimum perm memory for the JVM
+# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM
+# export KARAF_HOME # Karaf home folder
+# export KARAF_DATA # Karaf data folder
+# export KARAF_BASE # Karaf base folder
+# export KARAF_ETC # Karaf etc folder
+# export KARAF_OPTS # Additional available Karaf options
+# export KARAF_DEBUG # Enable debug mode
+if [ "x$JAVA_MAX_PERM_MEM" == "x" ]; then
+ export JAVA_MAX_PERM_MEM="512m"
+fi
+if [ "x$JAVA_MAX_MEM" == "x" ]; then
+ export JAVA_MAX_MEM="2048m"
+fi
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>benchmark-data-store</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-parser-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-inmemory-datastore</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <configuration>
+ <classpathScope>test</classpathScope>
+ <executable>java</executable>
+ <arguments>
+ <argument>-classpath</argument>
+ <classpath/>
+ <argument>org.openjdk.jmh.Main</argument>
+ <argument>.*</argument>
+ </arguments>
+ </configuration>
+ <executions>
+ <execution>
+ <id>run-benchmarks</id>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Warmup;
+
+/**
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int WARMUP_ITERATIONS = 20;
+ private static final int MEASUREMENT_ITERATIONS = 20;
+
+ private static final int OUTER_LIST_100K = 100000;
+ private static final int OUTER_LIST_50K = 50000;
+ private static final int OUTER_LIST_10K = 10000;
+
+ private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
+
+ private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
+ final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
+
+ for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
+ paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
+ .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .build();
+ }
+ return paths;
+ }
+
+ private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
+ private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
+ private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
+
+ private static MapNode initInnerListItems(final int count) {
+ final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
+ .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
+
+ for (int i = 1; i <= count; ++i) {
+ mapEntryBuilder
+ .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
+ }
+ return mapEntryBuilder.build();
+ }
+
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
+
+ private static NormalizedNode<?,?>[] initOuterListItems(int outerListItemsCount, MapNode innerList) {
+ final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
+
+ for (int i = 0; i < outerListItemsCount; ++i) {
+ int outerListKey = i;
+ outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .withChild(innerList).build();
+ }
+ return outerListItems;
+ }
+
+ protected SchemaContext schemaContext;
+ protected InMemoryDOMDataStore domStore;
+
+ abstract public void setUp() throws Exception;
+
+ abstract public void tearDown();
+
+ protected void initTestNode() throws Exception {
+ final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
+ .build();
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(testPath, provideOuterListNode());
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ private DataContainerChild<?, ?> provideOuterListNode() {
+ return ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
+ .withChild(
+ ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
+ .build()).build();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+/**
+ * Benchmark Model class loads the odl-datastore-test.yang model from resources.
+ * <br>
+ * This class serves as facilitator class which holds several references to initialized yang model as static final
+ * members.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public final class BenchmarkModel {
+
+ public static final QName TEST_QNAME = QName
+ .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test");
+ public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+ public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+ public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+ public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+ private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+ public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+
+ public static final InputStream getDatastoreBenchmarkInputStream() {
+ return getInputStream(DATASTORE_TEST_YANG);
+ }
+
+ private static InputStream getInputStream(final String resourceName) {
+ return BenchmarkModel.class.getResourceAsStream(resourceName);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(
+ getDatastoreBenchmarkInputStream()));
+ return parser.resolveSchemaContext(modules);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as BlockingBoundedFastThreadPool
+ * and DOM Store Executor Service as Blocking Bounded Fast Thread Pool.
+ *
+ * @see org.opendaylight.yangtools.util.concurrent.SpecialExecutors
+ * @see org.opendaylight.controller.md.sal.dom.store.benchmark.AbstractInMemoryDatastoreWriteTransactionBenchmark
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+ private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ final ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
+ MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name );
+
+ domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithSameThreadedExecutorBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Same Thread Executor
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+module odl-datastore-test {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
+ prefix "store-test";
+
+ revision "2014-03-13" {
+ description "Initial revision.";
+ }
+
+ container test {
+ list outer-list {
+ key id;
+ leaf id {
+ type int32;
+ }
+ choice outer-choice {
+ case one {
+ leaf one {
+ type string;
+ }
+ }
+ case two-three {
+ leaf two {
+ type string;
+ }
+ leaf three {
+ type string;
+ }
+ }
+ }
+ list inner-list {
+ key name;
+ leaf name {
+ type int32;
+ }
+ leaf value {
+ type string;
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj);
builder.setFlowRef(new FlowRef(identifier));
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalFlowService().removeFlow(builder.build());
if (tableIdValidationPrecondition(tableKey, update)) {
final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
if (tableIdValidationPrecondition(tableKey, addDataObj)) {
final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
final Group group = (removeDataObj);
final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().removeGroup(builder.build());
final Group updatedGroup = (update);
final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
final Group group = (addDataObj);
final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().addGroup(builder.build());
final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().removeMeter(builder.build());
final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().addMeter(builder.build());
<module>sal-binding-dom-it</module>
</modules>
</profile>
+ <profile>
+ <id>benchmarks</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ </activation>
+ <modules>
+ <module>benchmark-data-store</module>
+ </modules>
+ </profile>
</profiles>
</project>
\ No newline at end of file
<Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
<Export-package>org.opendaylight.cluster.raft</Export-package>
<Import-Package>*</Import-Package>
+ <DynamicImport-Package>*</DynamicImport-Package>
</instructions>
</configuration>
</plugin>
*/
public class DefaultConfigParamsImpl implements ConfigParams {
- private static final int SNAPSHOT_BATCH_COUNT = 100000;
+ private static final int SNAPSHOT_BATCH_COUNT = 20000;
/**
* The maximum election time variance
@Override public void onReceiveRecover(Object message) {
if (message instanceof SnapshotOffer) {
- LOG.debug("SnapshotOffer called..");
+ LOG.info("SnapshotOffer called..");
SnapshotOffer offer = (SnapshotOffer) message;
Snapshot snapshot = (Snapshot) offer.snapshot();
context.setReplicatedLog(replicatedLog);
context.setLastApplied(snapshot.getLastAppliedIndex());
- LOG.debug("Applied snapshot to replicatedLog. " +
- "snapshotIndex={}, snapshotTerm={}, journal-size={}",
+ LOG.info("Applied snapshot to replicatedLog. " +
+ "snapshotIndex={}, snapshotTerm={}, journal-size={}",
replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
- replicatedLog.size());
+ replicatedLog.size()
+ );
// Apply the snapshot to the actors state
applySnapshot(ByteString.copyFrom(snapshot.getState()));
context.removePeer(rrp.getName());
} else if (message instanceof CaptureSnapshot) {
- LOG.debug("CaptureSnapshot received by actor");
+ LOG.info("CaptureSnapshot received by actor");
CaptureSnapshot cs = (CaptureSnapshot)message;
captureSnapshot = cs;
createSnapshot();
} else if (message instanceof CaptureSnapshotReply){
- LOG.debug("CaptureSnapshotReply received by actor");
+ LOG.info("CaptureSnapshotReply received by actor");
CaptureSnapshotReply csr = (CaptureSnapshotReply) message;
ByteString stateInBytes = csr.getSnapshot();
- LOG.debug("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
+ LOG.info("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
handleCaptureSnapshotReply(stateInBytes);
} else {
if(oldBehavior != currentBehavior){
onStateChanged();
}
+
+ onLeaderChanged(oldBehavior.getLeaderId(), currentBehavior.getLeaderId());
}
}
*/
protected abstract void onStateChanged();
+ protected void onLeaderChanged(String oldLeader, String newLeader){};
+
private RaftActorBehavior switchBehavior(RaftState state) {
if (currentBehavior != null) {
if (currentBehavior.state() == state) {
context.getReplicatedLog().get(i);
if (replicatedLogEntry != null) {
+ // Send a local message to the local RaftActor (it's derived class to be
+ // specific to apply the log to it's index)
actor().tell(new ApplyState(clientActor, identifier,
replicatedLogEntry), actor());
newLastApplied = i;
} else {
//if one index is not present in the log, no point in looping
// around as the rest wont be present either
- context.getLogger().error(
+ context.getLogger().warning(
"Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index );
break;
}
}
- // Send a local message to the local RaftActor (it's derived class to be
- // specific to apply the log to it's index)
context.getLogger().debug("Setting last applied to {}", newLastApplied);
context.setLastApplied(newLastApplied);
}
try {
if(leProtoBuff.getData() != null && leProtoBuff.getData().getClientPayloadClassName() != null) {
String clientPayloadClassName = leProtoBuff.getData().getClientPayloadClassName();
- payload = (Payload)Class.forName(clientPayloadClassName).newInstance();
+ payload = (Payload) Class.forName(clientPayloadClassName).newInstance();
payload = payload.decode(leProtoBuff.getData());
payload.setClientPayloadClassName(clientPayloadClassName);
} else {
package org.opendaylight.controller.cluster.raft.messages;
import com.google.protobuf.ByteString;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
public class InstallSnapshot extends AbstractRaftRPC {
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore;
+package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
import com.google.common.base.Preconditions;
import com.google.protobuf.GeneratedMessage;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.UnknownFieldSet;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: InstallSnapshot.proto
-package org.opendaylight.controller.cluster.raft.protobuff.messages;
+package org.opendaylight.controller.protobuff.messages.cluster.raft;
public final class InstallSnapshotMessages {
private InstallSnapshotMessages() {}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
}
public static com.google.protobuf.Parser<InstallSnapshot> PARSER =
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
- com.google.protobuf.ByteString bs =
+ com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
getLeaderIdBytes() {
java.lang.Object ref = leaderId_;
if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
leaderId_ = b;
return super.writeReplace();
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot prototype) {
+ public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshotOrBuilder {
+ implements org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshotOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
}
- // Construct using org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.newBuilder()
+ // Construct using org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot build() {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot build() {
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot buildPartial() {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot(this);
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot buildPartial() {
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) {
- return mergeFrom((org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot)other);
+ if (other instanceof org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) {
+ return mergeFrom((org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot other) {
- if (other == org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot other) {
+ if (other == org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
if (other.hasTerm()) {
setTerm(other.getTerm());
}
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
+ parsedMessage = (org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
getLeaderIdBytes() {
java.lang.Object ref = leaderId_;
if (ref instanceof String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
leaderId_ = b;
"\021lastIncludedIndex\030\003 \001(\003\022\030\n\020lastIncluded" +
"Term\030\004 \001(\003\022\014\n\004data\030\005 \001(\014\022\022\n\nchunkIndex\030\006" +
" \001(\005\022\023\n\013totalChunks\030\007 \001(\005BX\n;org.openday" +
- "light.controller.cluster.raft.protobuff." +
- "messagesB\027InstallSnapshotMessagesH\001"
+ "light.controller.protobuff.messages.clus" +
+ "ter.raftB\027InstallSnapshotMessagesH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
package org.opendaylight.controller.cluster.raft;
-option java_package = "org.opendaylight.controller.cluster.raft.protobuff.messages";
+option java_package = "org.opendaylight.controller.protobuff.messages.cluster.raft";
option java_outer_classname = "InstallSnapshotMessages";
option optimize_for = SPEED;
metric-capture-enabled = true
akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+
actor {
+
provider = "akka.cluster.ClusterActorRefProvider"
serializers {
java = "akka.serialization.JavaSerializer"
metric-capture-enabled = true
akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+
actor {
provider = "akka.cluster.ClusterActorRefProvider"
package org.opendaylight.controller.md.sal.common.api.data;
+import com.google.common.base.Supplier;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-
-import com.google.common.base.Function;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
/**
* A type of TransactionCommitFailedException that indicates a situation that would result in a
* @author Thomas Pantelis
*/
public class TransactionCommitDeadlockException extends TransactionCommitFailedException {
-
private static final long serialVersionUID = 1L;
-
private static final String DEADLOCK_MESSAGE =
"An attempt to block on a ListenableFuture via a get method from a write " +
"transaction submit was detected that would result in deadlock. The commit " +
"result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock.";
+ private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE);
- public static Function<Void, Exception> DEADLOCK_EXECUTOR_FUNCTION = new Function<Void, Exception>() {
+ public static final Supplier<Exception> DEADLOCK_EXCEPTION_SUPPLIER = new Supplier<Exception>() {
@Override
- public Exception apply(Void notUsed) {
- return new TransactionCommitDeadlockException( DEADLOCK_MESSAGE,
- RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE));
+ public Exception get() {
+ return new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR);
}
};
- public TransactionCommitDeadlockException(String message, final RpcError... errors) {
+ public TransactionCommitDeadlockException(final String message, final RpcError... errors) {
super(message, errors);
}
}
*/
package org.opendaylight.controller.md.sal.common.impl.service;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.AbstractDataModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-
public abstract class AbstractDataTransaction<P extends Path<P>, D extends Object> extends
AbstractDataModification<P, D> {
- private final static Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final ListenableFuture<RpcResult<TransactionStatus>> SUCCESS_FUTURE =
+ Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
private final Object identifier;
private final long allocationTime;
@Override
public Future<RpcResult<TransactionStatus>> commit() {
readyTime = System.nanoTime();
- LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ }
changeStatus(TransactionStatus.SUBMITED);
-
return this.broker.commit(this);
}
}
@Override
- public boolean equals(Object obj) {
+ public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
public void succeeded() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.COMMITED);
}
public void failed() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.FAILED);
}
this.onStatusChange(status);
}
- public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(
- CheckedFuture<Void,TransactionCommitFailedException> from ) {
+ public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(final CheckedFuture<Void,TransactionCommitFailedException> from) {
return Futures.transform(from, new AsyncFunction<Void, RpcResult<TransactionStatus>>() {
@Override
- public ListenableFuture<RpcResult<TransactionStatus>> apply(Void input) throws Exception {
- return Futures.immediateFuture(RpcResultBuilder.<TransactionStatus>
- success(TransactionStatus.COMMITED).build());
+ public ListenableFuture<RpcResult<TransactionStatus>> apply(final Void input) {
+ return SUCCESS_FUTURE;
}
- } );
+ });
}
}
import javax.annotation.Nullable;
import org.opendaylight.yangtools.util.concurrent.CountingRejectedExecutionHandler;
import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* MXBean implementation of the ThreadExecutorStatsMXBean interface that retrieves statistics
*/
public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
implements ThreadExecutorStatsMXBean {
-
+ private static final Logger LOG = LoggerFactory.getLogger(ThreadExecutorStatsMXBeanImpl.class);
private final ThreadPoolExecutor executor;
/**
* @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
* @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
*/
- public ThreadExecutorStatsMXBeanImpl(Executor executor, String mBeanName,
- String mBeanType, @Nullable String mBeanCategory) {
+ public ThreadExecutorStatsMXBeanImpl(final ThreadPoolExecutor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
super(mBeanName, mBeanType, mBeanCategory);
+ this.executor = Preconditions.checkNotNull(executor);
+ }
+
+ /**
+ * Create a new bean for the statistics, which is already registered.
+ *
+ * @param executor
+ * @param mBeanName
+ * @param mBeanType
+ * @param mBeanCategory
+ * @return
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
+ if (executor instanceof ThreadPoolExecutor) {
+ final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl((ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+ ret.registerMBean();
+ return ret;
+ }
- Preconditions.checkArgument(executor instanceof ThreadPoolExecutor,
- "The ExecutorService of type {} is not an instanceof ThreadPoolExecutor",
- executor.getClass());
- this.executor = (ThreadPoolExecutor)executor;
+ LOG.info("Executor {} is not supported", executor);
+ return null;
}
@Override
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
.tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
self());
+ createSnapshotTransaction = null;
// Send a PoisonPill instead of sending close transaction because we do not really need
// a response
getSender().tell(PoisonPill.getInstance(), self());
}
} else {
- LOG.error("Unknown state received {}", data);
+ LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
}
// Update stats
// Since this will be done only on Recovery or when this actor is a Follower
// we can safely commit everything in here. We not need to worry about event notifications
// as they would have already been disabled on the follower
+
+ LOG.info("Applying snapshot");
try {
DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
syncCommitTransaction(transaction);
} catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
LOG.error(e, "An exception occurred when applying snapshot");
+ } finally {
+ LOG.info("Done applying snapshot");
}
}
.tell(new EnableNotification(isLeader()), getSelf());
}
- if (getLeaderId() != null) {
- shardMBean.setLeader(getLeaderId());
- }
shardMBean.setRaftState(getRaftState().name());
shardMBean.setCurrentTerm(getCurrentTerm());
}
}
+ @Override protected void onLeaderChanged(String oldLeader, String newLeader) {
+ if((oldLeader == null && newLeader == null) || (newLeader != null && newLeader.equals(oldLeader)) ){
+ return;
+ }
+ LOG.info("Current state = {}, Leader = {}", getRaftState().name(), newLeader);
+ shardMBean.setLeader(newLeader);
+ }
+
@Override public String persistenceId() {
return this.name.toString();
}
}
public void setDataStoreExecutor(ExecutorService dsExecutor) {
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor,
+ this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor,
"notification-executor", getMBeanType(), getMBeanCategory());
}
this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", getMBeanType(), getMBeanCategory());
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
"data-store-executor", getMBeanType(), getMBeanCategory());
}
System.setProperty("shard.persistent", "false");
system = ActorSystem.create("test");
+
+ deletePersistenceFiles();
}
@AfterClass
public static void tearDownClass() throws IOException {
JavaTestKit.shutdownActorSystem(system);
system = null;
+
+ deletePersistenceFiles();
}
protected static void deletePersistenceFiles() throws IOException {
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import java.io.File;
subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
getRef());
- waitForLogMessage(Logging.Debug.class, subject, "CaptureSnapshotReply received by actor");
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
+ subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
+ getRef());
+
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
}
};
- Thread.sleep(2000);
deletePersistenceFiles();
}};
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import com.google.common.collect.Iterables;
+import scala.concurrent.Future;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class InMemorySnapshotStore extends SnapshotStore {
+
+ Map<String, List<Snapshot>> snapshots = new HashMap<>();
+
+ @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+ SnapshotSelectionCriteria snapshotSelectionCriteria) {
+ List<Snapshot> snapshotList = snapshots.get(s);
+ if(snapshotList == null){
+ return Futures.successful(Option.<SelectedSnapshot>none());
+ }
+
+ Snapshot snapshot = Iterables.getLast(snapshotList);
+ SelectedSnapshot selectedSnapshot =
+ new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
+ return Futures.successful(Option.some(selectedSnapshot));
+ }
+
+ @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+ if(snapshotList == null){
+ snapshotList = new ArrayList<>();
+ snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
+ }
+ snapshotList.add(new Snapshot(snapshotMetadata, o));
+
+ return Futures.successful(null);
+ }
+
+ @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+ }
+
+ @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+ List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+ if(snapshotList == null){
+ return;
+ }
+
+ int deleteIndex = -1;
+
+ for(int i=0;i<snapshotList.size(); i++){
+ Snapshot snapshot = snapshotList.get(i);
+ if(snapshotMetadata.equals(snapshot.getMetadata())){
+ deleteIndex = i;
+ break;
+ }
+ }
+
+ if(deleteIndex != -1){
+ snapshotList.remove(deleteIndex);
+ }
+
+ }
+
+ @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+ throws Exception {
+ List<Snapshot> snapshotList = snapshots.get(s);
+
+ if(snapshotList == null){
+ return;
+ }
+
+ // TODO : This is a quick and dirty implementation. Do actual match later.
+ snapshotList.clear();
+ snapshots.remove(s);
+ }
+
+ private static class Snapshot {
+ private final SnapshotMetadata metadata;
+ private final Object data;
+
+ private Snapshot(SnapshotMetadata metadata, Object data) {
+ this.metadata = metadata;
+ this.data = data;
+ }
+
+ public SnapshotMetadata getMetadata() {
+ return metadata;
+ }
+
+ public Object getData() {
+ return data;
+ }
+ }
+}
import akka.actor.Props;
import akka.actor.UntypedActor;
import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import akka.actor.Props;
import akka.actor.UntypedActor;
import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
actor {
}
}
}
+
+in-memory-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
bounded-mailbox {
mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
*/
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import com.google.common.collect.ImmutableMap;
/**
*
//we will default to InMemoryDOMDataStore creation
configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
}
- ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
- .<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
- .put(LogicalDatastoreType.CONFIGURATION, configStore).build();
+
+ final Map<LogicalDatastoreType, DOMStore> datastores = new EnumMap<>(LogicalDatastoreType.class);
+ datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
+ datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
/*
* We use a single-threaded executor for commits with a bounded queue capacity. If the
DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
listenableFutureExecutor));
final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE);
commitStatsMXBean.registerMBean();
- final ThreadExecutorStatsMXBeanImpl commitExecutorStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(commitExecutor, "CommitExecutorStats",
+ final AbstractMXBean commitExecutorStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
JMX_BEAN_TYPE, null);
- commitExecutorStatsMXBean.registerMBean();
-
- final ThreadExecutorStatsMXBeanImpl commitFutureStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(listenableFutureExecutor,
+ final AbstractMXBean commitFutureStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
"CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
- commitFutureStatsMXBean.registerMBean();
newDataBroker.setCloseable(new AutoCloseable() {
@Override
public void close() {
commitStatsMXBean.unregisterMBean();
- commitExecutorStatsMXBean.unregisterMBean();
- commitFutureStatsMXBean.unregisterMBean();
+ if (commitExecutorStatsMXBean != null) {
+ commitExecutorStatsMXBean.unregisterMBean();
+ }
+ if (commitFutureStatsMXBean != null) {
+ commitFutureStatsMXBean.unregisterMBean();
+ }
}
});
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
* Composite DOM Transaction backed by {@link DOMStoreTransaction}.
*
abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTransaction> implements
AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- private final ImmutableMap<K, T> backingTxs;
+ private final Map<K, T> backingTxs;
private final Object identifier;
/**
* @param backingTxs
* Key,value map of backing transactions.
*/
- protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final ImmutableMap<K, T> backingTxs) {
+ protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map<K, T> backingTxs) {
this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null");
}
*/
protected final T getSubtransaction(final K key) {
Preconditions.checkNotNull(key, "key must not be null.");
- Preconditions.checkArgument(backingTxs.containsKey(key), "No subtransaction associated with %s", key);
- return backingTxs.get(key);
+
+ final T ret = backingTxs.get(key);
+ Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+ return ret;
}
/**
* Returns immutable Iterable of all subtransactions.
*
*/
- protected Iterable<T> getSubtransactions() {
+ protected Collection<T> getSubtransactions() {
return backingTxs.values();
}
protected void closeSubtransactions() {
/*
- * We share one exception for all failures, which are added
- * as supressedExceptions to it.
- *
+ * We share one exception for all failures, which are added
+ * as supressedExceptions to it.
*/
IllegalStateException failure = null;
for (T subtransaction : backingTxs.values()) {
subtransaction.close();
} catch (Exception e) {
// If we did not allocated failure we allocate it
- if(failure == null) {
- failure = new IllegalStateException("Uncaught exception occured during closing transaction.", e);
+ if (failure == null) {
+ failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
} else {
- // We update it with addotional exceptions, which occured during error.
+ // We update it with additional exceptions, which occurred during error.
failure.addSuppressed(e);
}
}
}
// If we have failure, we throw it at after all attempts to close.
- if(failure != null) {
+ if (failure != null) {
throw failure;
}
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.EnumMap;
import java.util.Map;
import java.util.Map.Entry;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
*
* Abstract composite transaction factory.
* @param <T>
* Type of {@link DOMStoreTransactionFactory} factory.
*/
-public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
-
- private final ImmutableMap<LogicalDatastoreType, T> storeTxFactories;
-
- private boolean closed;
+abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicIntegerFieldUpdater<AbstractDOMForwardedTransactionFactory> UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed");
+ private final Map<LogicalDatastoreType, T> storeTxFactories;
+ private volatile int closed = 0;
protected AbstractDOMForwardedTransactionFactory(final Map<LogicalDatastoreType, ? extends T> txFactories) {
- this.storeTxFactories = ImmutableMap.copyOf(txFactories);
+ this.storeTxFactories = new EnumMap<>(txFactories);
}
/**
*
* @return New composite read-only transaction.
*/
- public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newReadOnlyTransaction());
}
- return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), builder.build());
+ return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns);
}
-
-
/**
* Creates a new composite write-only transaction
*
* @return New composite write-only transaction associated with this
* factory.
*/
- public DOMDataWriteTransaction newWriteOnlyTransaction() {
+ public final DOMDataWriteTransaction newWriteOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
}
- return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), builder.build(),
- this);
+ return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), txns, this);
}
/**
*
* @return New composite read-write transaction associated with this
* factory.
- *
*/
- public DOMDataReadWriteTransaction newReadWriteTransaction() {
+ public final DOMDataReadWriteTransaction newReadWriteTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadWriteTransaction());
+ txns.put(store.getKey(), store.getValue().newReadWriteTransaction());
}
- return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), builder.build(), this);
+ return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this);
}
/**
}
/**
- *
* Checks if instance is not closed.
*
* @throws IllegalStateException If instance of this class was closed.
*
*/
- @GuardedBy("this")
- protected synchronized void checkNotClosed() {
- Preconditions.checkState(!closed,"Transaction factory was closed. No further operations allowed.");
+ protected final void checkNotClosed() {
+ Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
}
@Override
- @GuardedBy("this")
- public synchronized void close() {
- closed = true;
+ public void close() {
+ final boolean success = UPDATER.compareAndSet(this, 0, 1);
+ Preconditions.checkState(success, "Transaction factory was already closed");
}
-
}
+
package org.opendaylight.controller.md.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkState;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DOMStore> implements DOMDataBroker,
AutoCloseable {
private final AtomicLong chainNum = new AtomicLong();
private volatile AutoCloseable closeable;
- public DOMDataBrokerImpl(final ImmutableMap<LogicalDatastoreType, DOMStore> datastores,
+ public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
final ListeningExecutorService executor) {
super(datastores);
this.coordinator = new DOMDataCommitCoordinatorImpl(executor);
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
@Override
public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreTransactionChain> backingChainsBuilder = ImmutableMap
- .builder();
+ checkNotClosed();
+
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
- backingChainsBuilder.put(entry.getKey(), entry.getValue().createTransactionChain());
+ backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
}
- long chainId = chainNum.getAndIncrement();
- ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = backingChainsBuilder.build();
+
+ final long chainId = chainNum.getAndIncrement();
LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener,
backingChains);
return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, coordinator, listener);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
* NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which is backed
* by several {@link DOMStoreTransactionChain} differentiated by provided
implements DOMTransactionChain, DOMDataCommitErrorListener {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
+ private final AtomicLong txNum = new AtomicLong();
private final DOMDataCommitExecutor coordinator;
private final TransactionChainListener listener;
private final long chainId;
- private final AtomicLong txNum = new AtomicLong();
- @GuardedBy("this")
- private boolean failed = false;
+
+ private volatile boolean failed = false;
/**
*
* If any of arguments is null.
*/
public DOMDataBrokerTransactionChainImpl(final long chainId,
- final ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
final DOMDataCommitExecutor coordinator, final TransactionChainListener listener) {
super(chains);
this.chainId = chainId;
}
@Override
- public synchronized CheckedFuture<Void,TransactionCommitFailedException> submit(
+ public CheckedFuture<Void,TransactionCommitFailedException> submit(
final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotClosed();
+
return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
}
@Override
- public synchronized void close() {
+ public void close() {
super.close();
+
for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
subChain.close();
}
if (!failed) {
LOG.debug("Transaction chain {}Â successfully finished.", this);
+ // FIXME: this event should be emitted once all operations complete
listener.onTransactionChainSuccessful(this);
}
}
@Override
- public synchronized void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
failed = true;
LOG.debug("Transaction chain {}Â failed.", this, cause);
listener.onTransactionChainFailed(this, tx, cause);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import java.util.List;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
/**
*
* Implementation of blocking three phase commit coordinator, which which
public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataCommitCoordinatorImpl.class);
-
- /**
- * Runs AND binary operation between all booleans in supplied iteration of booleans.
- *
- * This method will stop evaluating iterables if first found is false.
- */
- private static final Function<Iterable<Boolean>, Boolean> AND_FUNCTION = new Function<Iterable<Boolean>, Boolean>() {
-
- @Override
- public Boolean apply(final Iterable<Boolean> input) {
- for(boolean value : input) {
- if(!value) {
- return Boolean.FALSE;
- }
- }
- return Boolean.TRUE;
- }
- };
-
- private final ListeningExecutorService executor;
-
private final DurationStatsTracker commitStatsTracker = new DurationStatsTracker();
+ private final ListeningExecutorService executor;
/**
*
}
/**
- *
* Implementation of blocking three-phase commit-coordination tasks without
- * support of cancelation.
- *
+ * support of cancellation.
*/
- private static class CommitCoordinationTask implements Callable<Void> {
-
+ private static final class CommitCoordinationTask implements Callable<Void> {
+ private static final AtomicReferenceFieldUpdater<CommitCoordinationTask, CommitPhase> PHASE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase");
private final DOMDataWriteTransaction tx;
private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
private final DurationStatsTracker commitStatTracker;
-
- @GuardedBy("this")
- private CommitPhase currentPhase;
+ private final int cohortSize;
+ private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
final DurationStatsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
- this.currentPhase = CommitPhase.SUBMITTED;
this.commitStatTracker = commitStatTracker;
+ this.cohortSize = Iterables.size(cohorts);
}
@Override
public Void call() throws TransactionCommitFailedException {
+ final long startTime = commitStatTracker != null ? System.nanoTime() : 0;
- long startTime = System.nanoTime();
try {
canCommitBlocking();
preCommitBlocking();
commitBlocking();
return null;
} catch (TransactionCommitFailedException e) {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, e);
- abortBlocking(e);
+ final CommitPhase phase = currentPhase;
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e);
+ abortBlocking(e, phase);
throw e;
} finally {
- if(commitStatTracker != null) {
+ if (commitStatTracker != null) {
commitStatTracker.addDuration(System.nanoTime() - startTime);
}
}
*
*/
private void canCommitBlocking() throws TransactionCommitFailedException {
- final Boolean canCommitResult = canCommitAll().checkedGet();
- if (!canCommitResult) {
- throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+ for (ListenableFuture<?> canCommit : canCommitAll()) {
+ try {
+ final Boolean result = (Boolean)canCommit.get();
+ if (result == null || !result) {
+ throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ throw TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
+ }
}
}
/**
*
- * Invokes preCommit on underlying cohorts and blocks till
- * all results are returned.
+ * Invokes canCommit on underlying cohorts and returns composite future
+ * which will contains {@link Boolean#TRUE} only and only if
+ * all cohorts returned true.
*
- * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
- * state is not CAN_COMMIT
- * throws IllegalStateException.
+ * Valid state transition is from SUBMITTED to CAN_COMMIT,
+ * if currentPhase is not SUBMITTED throws IllegalStateException.
*
- * @throws TransactionCommitFailedException
- * If one of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private void preCommitBlocking() throws TransactionCommitFailedException {
- preCommitAll().checkedGet();
+ private ListenableFuture<?>[] canCommitAll() {
+ changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
+ for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
+ ops[i++] = cohort.canCommit();
+ }
+ return ops;
}
/**
*
- * Invokes commit on underlying cohorts and blocks till
+ * Invokes preCommit on underlying cohorts and blocks till
* all results are returned.
*
- * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
- * IllegalStateException.
+ * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
+ * state is not CAN_COMMIT
+ * throws IllegalStateException.
*
* @throws TransactionCommitFailedException
* If one of cohorts failed preCommit
*
*/
- private void commitBlocking() throws TransactionCommitFailedException {
- commitAll().checkedGet();
- }
-
- /**
- * Aborts transaction.
- *
- * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
- * cohorts, blocks
- * for all results. If any of the abort failed throws
- * IllegalStateException,
- * which will contains originalCause as suppressed Exception.
- *
- * If aborts we're successful throws supplied exception
- *
- * @param originalCause
- * Exception which should be used to fail transaction for
- * consumers of transaction
- * future and listeners of transaction failure.
- * @throws TransactionCommitFailedException
- * on invocation of this method.
- * originalCa
- * @throws IllegalStateException
- * if abort failed.
- */
- private void abortBlocking(final TransactionCommitFailedException originalCause)
- throws TransactionCommitFailedException {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, originalCause);
- Exception cause = originalCause;
+ private void preCommitBlocking() throws TransactionCommitFailedException {
+ final ListenableFuture<?>[] preCommitFutures = preCommitAll();
try {
- abortAsyncAll().get();
+ for(ListenableFuture<?> future : preCommitFutures) {
+ future.get();
+ }
} catch (InterruptedException | ExecutionException e) {
- LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
- cause = new IllegalStateException("Abort failed.", e);
- cause.addSuppressed(e);
+ throw TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER.apply(e);
}
- Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
}
/**
* state is not CAN_COMMIT
* throws IllegalStateException.
*
- * @return Future which will complete once all cohorts completed
- * preCommit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private CheckedFuture<Void, TransactionCommitFailedException> preCommitAll() {
+ private ListenableFuture<?>[] preCommitAll() {
changeStateFrom(CommitPhase.CAN_COMMIT, CommitPhase.PRE_COMMIT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.preCommit());
+ ops[i++] = cohort.preCommit();
+ }
+ return ops;
+ }
+
+ /**
+ *
+ * Invokes commit on underlying cohorts and blocks till
+ * all results are returned.
+ *
+ * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
+ * IllegalStateException.
+ *
+ * @throws TransactionCommitFailedException
+ * If one of cohorts failed preCommit
+ *
+ */
+ private void commitBlocking() throws TransactionCommitFailedException {
+ final ListenableFuture<?>[] commitFutures = commitAll();
+ try {
+ for(ListenableFuture<?> future : commitFutures) {
+ future.get();
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ throw TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e);
}
- /*
- * We are returing all futures as list, not only succeeded ones in
- * order to fail composite future if any of them failed.
- * See Futures.allAsList for this description.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
- return MappingCheckedFuture.create(compositeResult,
- TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER);
}
/**
* Valid state transition is from PRE_COMMIT to COMMIT, if not throws
* IllegalStateException
*
- * @return Future which will complete once all cohorts completed
- * commit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private CheckedFuture<Void, TransactionCommitFailedException> commitAll() {
+ private ListenableFuture<?>[] commitAll() {
changeStateFrom(CommitPhase.PRE_COMMIT, CommitPhase.COMMIT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.commit());
+ ops[i++] = cohort.commit();
}
- /*
- * We are returing all futures as list, not only succeeded ones in
- * order to fail composite future if any of them failed.
- * See Futures.allAsList for this description.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
- return MappingCheckedFuture.create(compositeResult,
- TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
+ return ops;
}
/**
+ * Aborts transaction.
*
- * Invokes canCommit on underlying cohorts and returns composite future
- * which will contains {@link Boolean#TRUE} only and only if
- * all cohorts returned true.
- *
- * Valid state transition is from SUBMITTED to CAN_COMMIT,
- * if currentPhase is not SUBMITTED throws IllegalStateException.
+ * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
+ * cohorts, blocks
+ * for all results. If any of the abort failed throws
+ * IllegalStateException,
+ * which will contains originalCause as suppressed Exception.
*
- * @return Future which will complete once all cohorts completed
- * preCommit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * If aborts we're successful throws supplied exception
*
+ * @param originalCause
+ * Exception which should be used to fail transaction for
+ * consumers of transaction
+ * future and listeners of transaction failure.
+ * @param phase phase in which the problem ensued
+ * @throws TransactionCommitFailedException
+ * on invocation of this method.
+ * originalCa
+ * @throws IllegalStateException
+ * if abort failed.
*/
- private CheckedFuture<Boolean, TransactionCommitFailedException> canCommitAll() {
- changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
- Builder<ListenableFuture<Boolean>> canCommitOperations = ImmutableList.builder();
- for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- canCommitOperations.add(cohort.canCommit());
+ private void abortBlocking(final TransactionCommitFailedException originalCause, final CommitPhase phase)
+ throws TransactionCommitFailedException {
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, originalCause);
+ Exception cause = originalCause;
+ try {
+ abortAsyncAll(phase).get();
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
+ cause = new IllegalStateException("Abort failed.", e);
+ cause.addSuppressed(e);
}
- ListenableFuture<List<Boolean>> allCanCommits = Futures.allAsList(canCommitOperations.build());
- ListenableFuture<Boolean> allSuccessFuture = Futures.transform(allCanCommits, AND_FUNCTION);
- return MappingCheckedFuture.create(allSuccessFuture,
- TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER);
-
+ Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
}
/**
- *
* Invokes abort on underlying cohorts and returns future which
- * completes
- * once all abort on cohorts are completed.
+ * completes once all abort on cohorts are completed.
*
+ * @param phase phase in which the problem ensued
* @return Future which will complete once all cohorts completed
* abort.
- *
*/
- private ListenableFuture<Void> abortAsyncAll() {
- changeStateFrom(currentPhase, CommitPhase.ABORT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+ private ListenableFuture<Void> abortAsyncAll(final CommitPhase phase) {
+ changeStateFrom(phase, CommitPhase.ABORT);
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.abort());
+ ops[i++] = cohort.abort();
}
+
/*
- * We are returing all futures as list, not only succeeded ones in
+ * We are returning all futures as list, not only succeeded ones in
* order to fail composite future if any of them failed.
* See Futures.allAsList for this description.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
+ ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops);
return compositeResult;
}
* @throws IllegalStateException
* If currentState of task does not match expected state
*/
- private synchronized void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
- Preconditions.checkState(currentPhase.equals(currentExpected),
- "Invalid state transition: Tx: %s current state: %s new state: %s", tx.getIdentifier(),
- currentPhase, newState);
- LOG.debug("Transaction {}: Phase {} Started ", tx.getIdentifier(), newState);
- currentPhase = newState;
- };
+ private void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
+ final boolean success = PHASE_UPDATER.compareAndSet(this, currentExpected, newState);
+ Preconditions.checkState(success, "Invalid state transition: Tx: %s expected: %s current: %s target: %s",
+ tx.getIdentifier(), currentExpected, currentPhase, newState);
+ LOG.debug("Transaction {}: Phase {} Started", tx.getIdentifier(), newState);
+ };
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
- *
* Read Only Transaction, which is composed of several
* {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by
* {@link LogicalDatastoreType} type parameter in
DOMDataReadOnlyTransaction {
protected DOMForwardedReadOnlyTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
super(identifier, backingTxs);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
public void close() {
closeSubtransactions();
}
-
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
*
* Read-Write Transaction, which is composed of several
* transactions.
*
*/
-
-class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements
- DOMDataReadWriteTransaction {
-
+final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
protected DOMForwardedReadWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs, commitImpl);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import static com.google.common.base.Preconditions.checkState;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- *
- *
* Read-Write Transaction, which is composed of several
- * {@link DOMStoreWriteTransaction} transactions. Subtransaction is selected by
+ * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by
* {@link LogicalDatastoreType} type parameter in:
*
* <ul>
* invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} for underlying
* transactions.
*
- * @param <T>
- * Subtype of {@link DOMStoreWriteTransaction} which is used as
+ * @param <T> Subtype of {@link DOMStoreWriteTransaction} which is used as
* subtransaction.
*/
class DOMForwardedWriteTransaction<T extends DOMStoreWriteTransaction> extends
AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, DOMDataCommitImplementation> IMPL_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, DOMDataCommitImplementation.class, "commitImpl");
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, Future> FUTURE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class);
+ private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
/**
- * Implementation of real commit.
- *
- * Transaction can not be commited if commitImpl is null,
- * so this seting this property to null is also used to
- * prevent write to
- * already commited / canceled transaction {@link #checkNotCanceled()
- *
- *
+ * Implementation of real commit. It also acts as an indication that
+ * the transaction is running -- which we flip atomically using
+ * {@link #IMPL_UPDATER}.
*/
- @GuardedBy("this")
private volatile DOMDataCommitImplementation commitImpl;
/**
+ * Future task of transaction commit. It starts off as null, but is
+ * set appropriately on {@link #submit()} and {@link #cancel()} via
+ * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
*
- * Future task of transaction commit.
- *
- * This value is initially null, and is once updated if transaction
- * is commited {@link #commit()}.
- * If this future exists, transaction MUST not be commited again
- * and all modifications should fail. See {@link #checkNotCommited()}.
- *
+ * Lazy set is safe for use because it is only referenced to in the
+ * {@link #cancel()} slow path, where we will busy-wait for it. The
+ * fast path gets the benefit of a store-store barrier instead of the
+ * usual store-load barrier.
*/
- @GuardedBy("this")
- private volatile CheckedFuture<Void, TransactionCommitFailedException> commitFuture;
+ private volatile Future<?> commitFuture;
protected DOMForwardedWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
+ final Map<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs);
this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
}
@Override
public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).write(path, data);
}
@Override
public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).delete(path);
}
@Override
public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).merge(path, data);
}
@Override
- public synchronized boolean cancel() {
- // Transaction is already canceled, we are safe to return true
- final boolean cancelationResult;
- if (commitImpl == null && commitFuture != null) {
- // Transaction is submitted, we try to cancel future.
- cancelationResult = commitFuture.cancel(false);
- } else if(commitImpl == null) {
+ public boolean cancel() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ if (impl != null) {
+ LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+ FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
return true;
- } else {
- cancelationResult = true;
- commitImpl = null;
}
- return cancelationResult;
+ // The transaction is in process of being submitted or cancelled. Busy-wait
+ // for the corresponding future.
+ Future<?> future;
+ do {
+ future = commitFuture;
+ } while (future == null);
+
+ return future.cancel(false);
}
@Override
- public synchronized ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
}
@Override
- public CheckedFuture<Void,TransactionCommitFailedException> submit() {
- checkNotReady();
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ checkRunning(impl);
- ImmutableList.Builder<DOMStoreThreePhaseCommitCohort> cohortsBuilder = ImmutableList.builder();
- for (DOMStoreWriteTransaction subTx : getSubtransactions()) {
- cohortsBuilder.add(subTx.ready());
- }
- ImmutableList<DOMStoreThreePhaseCommitCohort> cohorts = cohortsBuilder.build();
- commitFuture = commitImpl.submit(this, cohorts);
-
- /*
- *We remove reference to Commit Implementation in order
- *to prevent memory leak
- */
- commitImpl = null;
- return commitFuture;
- }
+ final Collection<T> txns = getSubtransactions();
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
- private void checkNotReady() {
- checkNotCommited();
- checkNotCanceled();
- }
+ // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+ for (DOMStoreWriteTransaction txn : txns) {
+ cohorts.add(txn.ready());
+ }
- private void checkNotCanceled() {
- Preconditions.checkState(commitImpl != null, "Transaction was canceled.");
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+ FUTURE_UPDATER.lazySet(this, ret);
+ return ret;
}
- private void checkNotCommited() {
- checkState(commitFuture == null, "Transaction was already submited.");
+ private void checkRunning(final DOMDataCommitImplementation impl) {
+ Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
}
-}
\ No newline at end of file
+}
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
public class DOMBrokerTest {
private SchemaContext schemaContext;
commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
domBroker = new DOMDataBrokerImpl(stores, executor);
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
Futures.addCallback( writeTx.submit(), new FutureCallback<Void>() {
@Override
- public void onSuccess( Void result ) {
+ public void onSuccess( final Void result ) {
commitCompletedLatch.countDown();
}
@Override
- public void onFailure( Throwable t ) {
+ public void onFailure( final Throwable t ) {
caughtCommitEx.set( t );
commitCompletedLatch.countDown();
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
private final CountDownLatch latch = new CountDownLatch( 1 );
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
this.change = change;
latch.countDown();
}
ExecutorService delegate;
- public CommitExecutorService( ExecutorService delegate ) {
+ public CommitExecutorService( final ExecutorService delegate ) {
this.delegate = delegate;
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
* to implement {@link DOMStore} contract.
*
*/
-public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
- TransactionReadyPrototype,AutoCloseable {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
private final ListenerTree listenerTree = ListenerTree.create();
private final AtomicLong txCounter = new AtomicLong(0);
- private final ListeningExecutorService listeningExecutor;
private final QueuedNotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> dataChangeListenerNotificationManager;
private final ExecutorService dataChangeListenerExecutor;
-
- private final ExecutorService domStoreExecutor;
+ private final ListeningExecutorService commitExecutor;
private final boolean debugTransactions;
private final String name;
private volatile AutoCloseable closeable;
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor) {
- this(name, domStoreExecutor, dataChangeListenerExecutor,
+ this(name, commitExecutor, dataChangeListenerExecutor,
InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false);
}
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor, final int maxDataChangeListenerQueueSize,
final boolean debugTransactions) {
this.name = Preconditions.checkNotNull(name);
- this.domStoreExecutor = Preconditions.checkNotNull(domStoreExecutor);
- this.listeningExecutor = MoreExecutors.listeningDecorator(this.domStoreExecutor);
+ this.commitExecutor = Preconditions.checkNotNull(commitExecutor);
this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
this.debugTransactions = debugTransactions;
"DataChangeListenerQueueMgr");
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
}
public ExecutorService getDomStoreExecutor() {
- return domStoreExecutor;
+ return commitExecutor;
}
@Override
@Override
public void close() {
- ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+ ExecutorServiceUtil.tryGracefulShutdown(commitExecutor, 30, TimeUnit.SECONDS);
ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
if(closeable != null) {
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction writeTx) {
- LOG.debug("Tx: {} is submitted. Modifications: {}", writeTx.getIdentifier(), writeTx.getMutatedView());
- return new ThreePhaseCommitImpl(writeTx);
+ protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ LOG.debug("Tx: {} is closed.", tx.getIdentifier());
+ }
+
+ @Override
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
+ return new ThreePhaseCommitImpl(tx, tree);
}
private Object nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private class DOMStoreTransactionChainImpl implements DOMStoreTransactionChain, TransactionReadyPrototype {
-
+ private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
+ @GuardedBy("this")
+ private SnapshotBackedWriteTransaction allocatedTransaction;
+ @GuardedBy("this")
+ private DataTreeSnapshot readySnapshot;
@GuardedBy("this")
- private SnapshotBackedWriteTransaction latestOutstandingTx;
-
private boolean chainFailed = false;
+ @GuardedBy("this")
private void checkFailed() {
Preconditions.checkState(!chainFailed, "Transaction chain is failed.");
}
- @Override
- public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
- final DataTreeSnapshot snapshot;
+ @GuardedBy("this")
+ private DataTreeSnapshot getSnapshot() {
checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
+
+ if (allocatedTransaction != null) {
+ Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier());
+ return readySnapshot;
} else {
- snapshot = dataTree.takeSnapshot();
+ return dataTree.takeSnapshot();
}
+ }
+
+ @GuardedBy("this")
+ private <T extends SnapshotBackedWriteTransaction> T recordTransaction(final T transaction) {
+ allocatedTransaction = transaction;
+ readySnapshot = null;
+ return transaction;
+ }
+
+ @Override
+ public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
+ final DataTreeSnapshot snapshot = getSnapshot();
return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot);
}
@Override
public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
- }
- final SnapshotBackedReadWriteTransaction ret = new SnapshotBackedReadWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
}
@Override
public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
+ }
+
+ @Override
+ protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ if (tx.equals(allocatedTransaction)) {
+ Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot);
+ allocatedTransaction = null;
}
- final SnapshotBackedWriteTransaction ret = new SnapshotBackedWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction tx) {
- DOMStoreThreePhaseCommitCohort storeCohort = InMemoryDOMDataStore.this.ready(tx);
- return new ChainedTransactionCommitImpl(tx, storeCohort, this);
+ protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction);
+ if (readySnapshot != null) {
+ // The snapshot should have been cleared
+ LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree);
+ }
+
+ final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree);
+ readySnapshot = tree;
+ return new ChainedTransactionCommitImpl(tx, cohort, this);
}
@Override
public void close() {
-
// FIXME: this call doesn't look right here - listeningExecutor is shared and owned
// by the outer class.
//listeningExecutor.shutdownNow();
protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
final Throwable t) {
chainFailed = true;
-
}
public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If committed transaction is latestOutstandingTx we clear
- // latestOutstandingTx
- // field in order to base new transactions on Datastore Data Tree
- // directly.
- if (transaction.equals(latestOutstandingTx)) {
- latestOutstandingTx = null;
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ if (transaction.equals(allocatedTransaction)) {
+ if (readySnapshot == null) {
+ LOG.warn("Transaction {} committed while no ready snapshot present", transaction);
+ }
+
+ allocatedTransaction = null;
+ readySnapshot = null;
}
}
-
}
private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DOMStoreThreePhaseCommitCohort delegate;
-
private final DOMStoreTransactionChainImpl txChain;
protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- super();
this.transaction = transaction;
this.delegate = delegate;
this.txChain = txChain;
public void onSuccess(final Void result) {
txChain.onTransactionCommited(transaction);
}
-
});
return commitFuture;
}
-
}
private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DataTreeModification modification;
private ResolveDataChangeEventsTask listenerResolver;
private DataTreeCandidate candidate;
- public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction) {
+ public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
this.transaction = writeTransaction;
- this.modification = transaction.getMutatedView();
+ this.modification = modification;
}
@Override
public ListenableFuture<Boolean> canCommit() {
- return listeningExecutor.submit(new Callable<Boolean>() {
+ return commitExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws TransactionCommitFailedException {
try {
@Override
public ListenableFuture<Void> preCommit() {
- return listeningExecutor.submit(new Callable<Void>() {
+ return commitExecutor.submit(new Callable<Void>() {
@Override
public Void call() {
candidate = dataTree.prepare(modification);
* The commit has to occur atomically with regard to listener
* registrations.
*/
- synchronized (this) {
+ synchronized (InMemoryDOMDataStore.this) {
dataTree.commit(candidate);
listenerResolver.resolve(dataChangeListenerNotificationManager);
}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
@Nullable final InMemoryDOMDataStoreConfigProperties properties) {
InMemoryDOMDataStoreConfigProperties actualProperties = properties;
- if(actualProperties == null) {
+ if (actualProperties == null) {
actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault();
}
// task execution time to get higher throughput as DataChangeListeners typically provide
// much of the business logic for a data model. If the executor queue size limit is reached,
// subsequent submitted notifications will block the calling thread.
-
int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize();
int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize();
ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
- ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- actualProperties.getMaxDataStoreExecutorQueueSize(), "DOMStore-" + name );
-
- InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
- domStoreExecutor, dataChangeListenerExecutor,
+ final ListeningExecutorService commitExecutor = MoreExecutors.sameThreadExecutor();
+ final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+ commitExecutor, dataChangeListenerExecutor,
actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions);
- if(schemaService != null) {
+ if (schemaService != null) {
schemaService.registerSchemaContextListener(dataStore);
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkNotNull;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* and executed according to {@link TransactionReadyPrototype}.
*
*/
-class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
- implements DOMStoreReadWriteTransaction {
-
+final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
/**
LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
checkNotNull(path, "Path must not be null.");
- DataTreeModification dataView = getMutatedView();
- if(dataView == null) {
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
- }
-
+ final Optional<NormalizedNode<?, ?>> result;
try {
- return Futures.immediateCheckedFuture(dataView.readNode(path));
+ result = readSnapshotNode(path);
} catch (Exception e) {
LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e));
+ }
+
+ if (result == null) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
+ } else {
+ return Futures.immediateCheckedFuture(result);
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*
*/
class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
-
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
- private DataTreeModification mutableTree;
- private boolean ready = false;
- private TransactionReadyPrototype readyImpl;
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
+
+ // non-null when not ready
+ private volatile TransactionReadyPrototype readyImpl;
+ // non-null when not committed/closed
+ private volatile DataTreeModification mutableTree;
/**
* Creates new write-only transaction.
public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
super(identifier, debug);
- mutableTree = snapshot.newModification();
this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
+ mutableTree = snapshot.newModification();
LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot);
}
- @Override
- public void close() {
- LOG.debug("Store transaction: {} : Closed", getIdentifier());
- this.mutableTree = null;
- this.readyImpl = null;
- }
-
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
- mutableTree.write(path, data);
+ tree.write(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
- mutableTree.merge(path, data);
+ tree.merge(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void delete(final YangInstanceIdentifier path) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
+
try {
- LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
- mutableTree.delete(path);
+ tree.delete(path);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, mutableTree, e);
+ LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
}
}
- protected final boolean isReady() {
- return ready;
+ /**
+ * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does
+ * not allow data access after the transaction has been closed or readied.
+ *
+ * @param path Path to read
+ * @return null if the the transaction has been closed;
+ */
+ protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+ return readyImpl == null ? null : mutableTree.readNode(path);
}
- protected final void checkNotReady() {
- checkState(!ready, "Transaction %s is ready. No further modifications allowed.", getIdentifier());
+ private final void checkNotReady() {
+ checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.", getIdentifier());
}
@Override
- public synchronized DOMStoreThreePhaseCommitCohort ready() {
- checkState(!ready, "Transaction %s is already ready.", getIdentifier());
- ready = true;
+ public DOMStoreThreePhaseCommitCohort ready() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
+
LOG.debug("Store transaction: {} : Ready", getIdentifier());
- mutableTree.ready();
- return readyImpl.ready(this);
+
+ final DataTreeModification tree = mutableTree;
+ TREE_UPDATER.lazySet(this, null);
+ tree.ready();
+ return wasReady.transactionReady(this, tree);
}
- protected DataTreeModification getMutatedView() {
- return mutableTree;
+ @Override
+ public void close() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ if (wasReady != null) {
+ LOG.debug("Store transaction: {} : Closed", getIdentifier());
+ TREE_UPDATER.lazySet(this, null);
+ wasReady.transactionAborted(this);
+ } else {
+ LOG.debug("Store transaction: {} : Closed after submit", getIdentifier());
+ }
}
@Override
protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("ready", isReady());
+ return toStringHelper.add("ready", readyImpl == null);
}
/**
* providing underlying logic for applying implementation.
*
*/
- public static interface TransactionReadyPrototype {
+ abstract static class TransactionReadyPrototype {
+ /**
+ * Called when a transaction is closed without being readied. This is not invoked for
+ * transactions which are ready.
+ *
+ * @param tx Transaction which got aborted.
+ */
+ protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
/**
* Returns a commit coordinator associated with supplied transactions.
*
* @param tx
* Transaction on which ready was invoked.
+ * @param tree
+ * Modified data tree which has been constructed.
* @return DOMStoreThreePhaseCommitCohort associated with transaction
*/
- DOMStoreThreePhaseCommitCohort ready(SnapshotBackedWriteTransaction tx);
+ protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
}
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl.jmx;
import java.util.concurrent.ExecutorService;
-
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
*/
public class InMemoryDataStoreStats implements AutoCloseable {
- private final ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean;
- private final ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean;
+ private final AbstractMXBean notificationExecutorStatsBean;
+ private final AbstractMXBean dataStoreExecutorStatsBean;
private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
- public InMemoryDataStoreStats(String mBeanType, QueuedNotificationManager<?, ?> manager,
- ExecutorService dataStoreExecutor) {
+ public InMemoryDataStoreStats(final String mBeanType, final QueuedNotificationManager<?, ?> manager,
+ final ExecutorService dataStoreExecutor) {
- this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
+ notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", mBeanType, null);
notificationManagerStatsBean.registerMBean();
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
"notification-executor", mBeanType, null);
- this.notificationExecutorStatsBean.registerMBean();
+ if (notificationExecutorStatsBean != null) {
+ notificationExecutorStatsBean.registerMBean();
+ }
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dataStoreExecutor,
+ dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dataStoreExecutor,
"data-store-executor", mBeanType, null);
- this.dataStoreExecutorStatsBean.registerMBean();
+ if (dataStoreExecutorStatsBean != null) {
+ dataStoreExecutorStatsBean.registerMBean();
+ }
}
@Override
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-config</artifactId>
+ <packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/20-clustering-test-app.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/modules.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider">
+ prefix:clustering-it-provider
+ </type>
+ <name>clustering-it-provider</name>
+
+ <rpc-registry>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <name>binding-rpc-broker</name>
+ </rpc-registry>
+ <data-broker>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
+ </data-broker>
+ <notification-service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
+ binding:binding-notification-service
+ </type>
+ <name>binding-notification-broker</name>
+ </notification-service>
+ </module>
+ </modules>
+ </data>
+
+ </configuration>
+
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider?module=clustering-it-provider&revision=2014-08-19</capability>
+
+ </required-capabilities>
+
+</snapshot>
+
--- /dev/null
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+# name = "<friendly_name_of_the_module>"
+# shards = [
+# {
+# name="<any_name_that_is_unique_for_the_module>"
+# replicas = [
+# "<name_of_member_on_which_to_run>"
+# ]
+# ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+ {
+ name = "default"
+ shards = [
+ {
+ name="default"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "topology"
+ shards = [
+ {
+ name="topology"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "inventory"
+ shards = [
+ {
+ name="inventory"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "toaster"
+ shards = [
+ {
+ name="toaster"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car"
+ shards = [
+ {
+ name="car"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "people"
+ shards = [
+ {
+ name="people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car-people"
+ shards = [
+ {
+ name="car-people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+
+]
--- /dev/null
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+# name = "<friendly_name_of_module>"
+# namespace = "<the yang namespace of the module>"
+# shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+ {
+ name = "inventory"
+ namespace = "urn:opendaylight:inventory"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "topology"
+ namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "toaster"
+ namespace = "http://netconfcentral.org/ns/toaster"
+ shard-strategy = "module"
+ },
+ {
+ name = "car"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+ shard-strategy = "module"
+ }
+ {
+ name = "people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+ shard-strategy = "module"
+ }
+
+ {
+ name = "car-people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+ shard-strategy = "module"
+ }
+]
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-model</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>org.opendaylight.controller.sal-clustering-it-model</Bundle-Name>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/generated-sources/sal</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!--This plugin's configuration is used to store Eclipse
+ m2e settings only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ <version>${ietf-inet-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ <version>${ietf-yang-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ <version>${yang-ext.version}</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+module car-people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "c"; revision-date 2014-08-18; }
+ import people { prefix "people"; revision-date 2014-08-18; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ container car-people {
+ description
+ "Top-level container for all people car map";
+
+ list car-person {
+ key "car-id person-id";
+ description "A mapping of cars and people.";
+ leaf car-id {
+ type c:car-id;
+ }
+
+ leaf person-id {
+ type people:person-id;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car-purchase {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
+
+ prefix cp;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "car"; revision-date 2014-08-18; }
+ import people { prefix "person"; revision-date 2014-08-18; }
+ import yang-ext {prefix "ext"; revision-date "2013-07-09";}
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car purchase for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ rpc buy-car {
+ description
+ "buy a new car";
+ input {
+ leaf person {
+ ext:context-reference "person:person-context";
+ type person:person-ref;
+ description "A reference to a particular person.";
+ }
+
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+ }
+
+ notification carBought {
+ description
+ "Indicates that a person bought a car.";
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef car-id {
+ type inet:uri;
+ description "An identifier for car entry.";
+ }
+
+ grouping car-entry {
+ description "Describes the contents of a car entry -
+ Details of the car manufacturer, model etc";
+ leaf id {
+ type car-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf model {
+ type string;
+ }
+ leaf manufacturer {
+ type string;
+ }
+
+ leaf year {
+ type uint32;
+ }
+
+ leaf category {
+ type string;
+ }
+ }
+
+ container cars {
+ description
+ "Top-level container for all car objects.";
+ list car-entry {
+ key "id";
+ description "A list of cars (as defined by the 'grouping car-entry').";
+ uses car-entry;
+ }
+ }
+
+
+}
\ No newline at end of file
--- /dev/null
+module people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people";
+
+ prefix people;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for person for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef person-id {
+ type inet:uri;
+ description "An identifier for person.";
+ }
+
+ typedef person-ref {
+ type instance-identifier;
+ description "A reference that points to an people:people/person in the data tree.";
+ }
+ identity person-context {
+ description "A person-context is a classifier for person elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
+ }
+
+ grouping person {
+ description "Describes the details of the person";
+
+ leaf id {
+ type person-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf gender {
+ type string;
+ }
+
+ leaf age {
+ type uint32;
+ }
+
+ leaf address {
+ type string;
+ }
+
+ leaf contactNo {
+ type string;
+ }
+ }
+
+ container people {
+ description
+ "Top-level container for all people";
+
+ list person {
+ key "id";
+ description "A list of people (as defined by the 'grouping person').";
+ uses person;
+ }
+ }
+
+ rpc add-person {
+ description
+ "Add a person entry into database";
+ input {
+ uses person;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sal-samples</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it</artifactId>
+ <packaging>pom</packaging>
+ <modules>
+ <module>configuration</module>
+ <module>model</module>
+ <module>provider</module>
+ </modules>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-provider</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.controller.config.yang.config.clustering_it_provider</Export-Package>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-util</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>equinoxSDK381</groupId>
+ <artifactId>org.eclipse.osgi</artifactId>
+ <version>3.8.1.v20120830-144521</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.listener;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class PeopleCarListener implements CarPurchaseListener {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleCarListener.class);
+
+ private DataBroker dataProvider;
+
+
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+ @Override
+ public void onCarBought(CarBought notification) {
+ log.info("onCarBought notification : Adding car person entry");
+
+ final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
+ carPersonBuilder.setCarId(notification.getCarId());
+ carPersonBuilder.setPersonId(notification.getPersonId());
+ CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
+ carPersonBuilder.setKey(key);
+ final CarPerson carPerson = carPersonBuilder.build();
+
+ InstanceIdentifier<CarPerson> carPersonIId =
+ InstanceIdentifier.<CarPeople>builder(CarPeople.class).child(CarPerson.class, carPerson.getKey()).build();
+
+
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("Car bought, entry added to map of people and car [{}]", carPerson);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("Car bought, Failed entry addition to map of people and car [{}]", carPerson);
+ }
+ });
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+public class PeopleProvider implements PeopleService, AutoCloseable {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleProvider.class);
+
+ private DataBroker dataProvider;
+
+ private BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration;
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+
+ public void setRpcRegistration(BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration) {
+ this.rpcRegistration = rpcRegistration;
+ }
+
+ @Override
+ public Future<RpcResult<Void>> addPerson(AddPersonInput input) {
+ log.info("RPC addPerson : adding person [{}]", input);
+
+ PersonBuilder builder = new PersonBuilder(input);
+ final Person person = builder.build();
+ final SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+
+ // Each entry will be identifiable by a unique key, we have to create that identifier
+ final InstanceIdentifier.InstanceIdentifierBuilder<Person> personIdBuilder =
+ InstanceIdentifier.<People>builder(People.class)
+ .child(Person.class, person.getKey());
+ final InstanceIdentifier personId = personIdBuilder.build();
+ // Place entry in data store tree
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, personId, person);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("RPC addPerson : person added successfully [{}]", person);
+ rpcRegistration.registerPath(PersonContext.class, personId);
+ log.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("RPC addPerson : person addition failed [{}]", person);
+ futureResult.set(RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, t.getMessage()).build());
+ }
+ });
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+
+public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable{
+
+ private static final Logger log = LoggerFactory.getLogger(PurchaseCarProvider.class);
+
+ private NotificationProviderService notificationProvider;
+
+
+ public void setNotificationProvider(final NotificationProviderService salService) {
+ this.notificationProvider = salService;
+ }
+
+
+ @Override
+ public Future<RpcResult<Void>> buyCar(BuyCarInput input) {
+ log.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+ SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+ CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder();
+ carBoughtBuilder.setCarId(input.getCarId());
+ carBoughtBuilder.setPersonId(input.getPersonId());
+ notificationProvider.publish(carBoughtBuilder.build());
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+
+
+import org.opendaylight.controller.clustering.it.listener.PeopleCarListener;
+import org.opendaylight.controller.clustering.it.provider.PeopleProvider;
+import org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+public class ClusteringItProviderModule extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModule {
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.clustering_it_provider.ClusteringItProviderModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ DataBroker dataBrokerService = getDataBrokerDependency();
+ NotificationProviderService notificationProvider = getNotificationServiceDependency();
+
+ // Add routed RPC registration for car purchase
+ final PurchaseCarProvider purchaseCar = new PurchaseCarProvider();
+ purchaseCar.setNotificationProvider(notificationProvider);
+
+ final BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> purchaseCarRpc = getRpcRegistryDependency()
+ .addRoutedRpcImplementation(CarPurchaseService.class, purchaseCar);
+
+ // Add people provider registration
+ final PeopleProvider people = new PeopleProvider();
+ people.setDataProvider(dataBrokerService);
+
+ people.setRpcRegistration(purchaseCarRpc);
+
+ final BindingAwareBroker.RpcRegistration<PeopleService> peopleRpcReg = getRpcRegistryDependency()
+ .addRpcImplementation(PeopleService.class, people);
+
+
+
+ final PeopleCarListener peopleCarListener = new PeopleCarListener();
+ peopleCarListener.setDataProvider(dataBrokerService);
+
+ final ListenerRegistration<NotificationListener> listenerReg =
+ getNotificationServiceDependency().registerNotificationListener( peopleCarListener );
+
+ // Wrap toaster as AutoCloseable and close registrations to md-sal at
+ // close()
+ final class AutoCloseableToaster implements AutoCloseable {
+
+ @Override
+ public void close() throws Exception {
+ peopleRpcReg.close();
+ purchaseCarRpc.close();
+ people.close();
+ purchaseCar.close();
+ listenerReg.close();
+ }
+ }
+
+ AutoCloseable ret = new AutoCloseableToaster();
+ return ret;
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: clustering-it-provider yang module local name: clustering-it-provider
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Aug 19 14:44:46 PDT 2014
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+public class ClusteringItProviderModuleFactory extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModuleFactory {
+
+}
--- /dev/null
+module clustering-it-provider {
+
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider";
+ prefix "clustering-it-provider";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; }
+
+ description
+ "This module contains the base YANG definitions for
+ clustering-it-provider implementation.";
+
+ revision "2014-08-19" {
+ description
+ "Initial revision.";
+ }
+
+ // This is the definition of the service implementation as a module identity.
+ identity clustering-it-provider {
+ base config:module-type;
+
+ // Specifies the prefix for generated java classes.
+ config:java-name-prefix ClusteringItProvider;
+ }
+
+ // Augments the 'configuration' choice node under modules/module.
+ augment "/config:modules/config:module/config:configuration" {
+ case clustering-it-provider {
+ when "/config:modules/config:module/config:type = 'clustering-it-provider'";
+
+ container rpc-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-rpc-registry;
+ }
+ }
+ }
+
+ container notification-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-notification-service;
+ }
+ }
+ }
+
+ container data-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity mdsal:binding-async-data-broker;
+ }
+ }
+ }
+ }
+ }
+}
<module>toaster-provider</module>
<module>toaster-config</module>
<module>l2switch</module>
+ <module>clustering-test-app</module>
</modules>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
package org.opendaylight.controller.netconf.client;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import io.netty.channel.Channel;
logger.debug("Netconf session {} should use exi.", session);
NetconfStartExiMessage startExiMessage = (NetconfStartExiMessage) sessionPreferences.getStartExiMessage();
tryToInitiateExi(session, startExiMessage);
- // Exi is not supported, release session immediately
} else {
+ // Exi is not supported, release session immediately
logger.debug("Netconf session {} isn't capable of using exi.", session);
negotiationSuccessful(session);
}
private long extractSessionId(final Document doc) {
final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+ Preconditions.checkState(sessionIdNode != null, "");
String textContent = sessionIdNode.getTextContent();
if (textContent == null || textContent.equals("")) {
throw new IllegalStateException("Session id not received from server");
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+
+import java.net.InetSocketAddress;
+
+public class NetconfClientConfigurationTest {
+ @Test
+ public void testNetconfClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategy strategy = Mockito.mock(ReconnectStrategy.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ NetconfClientConfiguration cfg = NetconfClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(strategy).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getReconnectStrategy());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelPromise;
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+
+public class NetconfClientDispatcherImplTest {
+ @Test
+ public void testNetconfClientDispatcherImpl() throws Exception {
+ EventLoopGroup bossGroup = Mockito.mock(EventLoopGroup.class);
+ EventLoopGroup workerGroup = Mockito.mock(EventLoopGroup.class);
+ Timer timer = new HashedWheelTimer();
+
+ ChannelFuture chf = Mockito.mock(ChannelFuture.class);
+ Channel ch = Mockito.mock(Channel.class);
+ doReturn(ch).when(chf).channel();
+ Throwable thr = Mockito.mock(Throwable.class);
+ doReturn(chf).when(workerGroup).register(any(Channel.class));
+
+ ChannelPromise promise = Mockito.mock(ChannelPromise.class);
+ doReturn(promise).when(chf).addListener(any(GenericFutureListener.class));
+ doReturn(thr).when(chf).cause();
+
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory reconnectStrategyFactory = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ doReturn(5).when(reconnect).getConnectTimeout();
+ doReturn("").when(reconnect).toString();
+ doReturn("").when(handler).toString();
+ doReturn("").when(reconnectStrategyFactory).toString();
+ doReturn(reconnect).when(reconnectStrategyFactory).createReconnectStrategy();
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfReconnectingClientConfiguration cfg2 = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.TCP).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfClientDispatcherImpl dispatcher = new NetconfClientDispatcherImpl(bossGroup, workerGroup, timer);
+ Future<NetconfClientSession> sshSession = dispatcher.createClient(cfg);
+ Future<NetconfClientSession> tcpSession = dispatcher.createClient(cfg2);
+
+ Future<Void> sshReconn = dispatcher.createReconnectingClient(cfg);
+ Future<Void> tcpReconn = dispatcher.createReconnectingClient(cfg2);
+
+ assertNotNull(sshSession);
+ assertNotNull(tcpSession);
+ assertNotNull(sshReconn);
+ assertNotNull(tcpReconn);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.Promise;
+import org.apache.sshd.common.SessionListener;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionNegotiatorFactoryTest {
+ @Test
+ public void testGetSessionNegotiator() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class);
+ doReturn(sessionListener).when(listenerFactory).getSessionListener();
+
+ Channel channel = mock(Channel.class);
+ Promise promise = mock(Promise.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer,
+ Optional.<NetconfHelloMessageAdditionalHeader>absent(), 200L);
+
+ SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
+ assertNotNull(sessionNegotiator);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.*;
+import io.netty.handler.ssl.SslHandler;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.Promise;
+import org.apache.mina.handler.demux.ExceptionHandler;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.collections.Sets;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import io.netty.util.Timer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.w3c.dom.Document;
+import java.util.Set;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfClientSessionNegotiatorTest {
+
+ private NetconfHelloMessage helloMessage;
+ private ChannelPipeline pipeline;
+ private ChannelFuture future;
+ private Channel channel;
+ private ChannelInboundHandlerAdapter channelInboundHandlerAdapter;
+
+ @Before
+ public void setUp() throws Exception {
+ helloMessage = NetconfHelloMessage.createClientHello(Sets.newSet("exi:1.0"), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+ pipeline = mockChannelPipeline();
+ future = mockChannelFuture();
+ channel = mockChannel();
+ System.out.println("setup done");
+ }
+
+ private ChannelHandler mockChannelHandler() {
+ ChannelHandler handler = mock(ChannelHandler.class);
+ return handler;
+ }
+
+ private Channel mockChannel() {
+ Channel channel = mock(Channel.class);
+ ChannelHandler channelHandler = mockChannelHandler();
+ doReturn("").when(channel).toString();
+ doReturn(future).when(channel).close();
+ doReturn(future).when(channel).writeAndFlush(anyObject());
+ doReturn(true).when(channel).isOpen();
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(pipeline).toString();
+ doReturn(pipeline).when(pipeline).remove(any(ChannelHandler.class));
+ doReturn(channelHandler).when(pipeline).remove(anyString());
+ return channel;
+ }
+
+ private ChannelFuture mockChannelFuture() {
+ ChannelFuture future = mock(ChannelFuture.class);
+ doReturn(future).when(future).addListener(any(GenericFutureListener.class));
+ return future;
+ }
+
+ private ChannelPipeline mockChannelPipeline() {
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ ChannelHandler handler = mock(ChannelHandler.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ doReturn(null).when(pipeline).get(SslHandler.class);
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+ doReturn(handler).when(pipeline).replace(anyString(), anyString(), any(ChunkedFramingMechanismEncoder.class));
+
+ NetconfXMLToHelloMessageDecoder messageDecoder = new NetconfXMLToHelloMessageDecoder();
+ doReturn(messageDecoder).when(pipeline).replace(anyString(), anyString(), any(NetconfXMLToMessageDecoder.class));
+ doReturn(pipeline).when(pipeline).replace(any(ChannelHandler.class), anyString(), any(NetconfClientSession.class));
+ return pipeline;
+ }
+
+ private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise,
+ NetconfMessage startExi) {
+ ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class);
+ NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi);
+ doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class));
+
+ long timeout = 10L;
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ return new NetconfClientSessionNegotiator(preferences, promise, channel, timer, sessionListener, timeout);
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiator() throws Exception {
+ Promise promise = mock(Promise.class);
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("a", "b");
+ NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ negotiator.handleMessage(helloServerMessage);
+ verify(promise).setSuccess(anyObject());
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiatorWithEXI() throws Exception {
+ Promise promise = mock(Promise.class);
+ EXIOptions exiOptions = new EXIOptions();
+ NetconfStartExiMessage exiMessage = NetconfStartExiMessage.create(exiOptions, "msg-id");
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("exi:1.0");
+ NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]);
+ return null;
+ }
+ }).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+
+ ChannelHandlerContext handlerContext = mock(ChannelHandlerContext.class);
+ doReturn(pipeline).when(handlerContext).pipeline();
+ negotiator.handleMessage(helloMessage);
+ Document expectedResult = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ channelInboundHandlerAdapter.channelRead(handlerContext, new NetconfMessage(expectedResult));
+
+ verify(promise).setSuccess(anyObject());
+
+ // two calls for exiMessage, 2 for hello message
+ verify(pipeline, times(4)).replace(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.openexi.proc.common.EXIOptions;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionTest {
+
+ @Mock
+ ChannelHandler channelHandler;
+
+ @Mock
+ Channel channel;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testNetconfClientSession() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ long sessId = 20L;
+ Collection<String> caps = Lists.newArrayList("cap1", "cap2");
+
+ NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+
+ Mockito.doReturn(pipeline).when(channel).pipeline();
+ Mockito.doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+ Mockito.doReturn("").when(channelHandler).toString();
+
+ NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps);
+ session.addExiHandlers(codec);
+ session.stopExiCommunication();
+
+ assertEquals(caps, session.getServerCapabilities());
+ assertEquals(session, session.thisInstance());
+
+ Mockito.verify(pipeline, Mockito.times(4)).replace(anyString(), anyString(), Mockito.any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+
+public class NetconfReconnectingClientConfigurationTest {
+ @Test
+ public void testNetconfReconnectingClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory strategy = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).