From: Alessandro Boch Date: Wed, 17 Sep 2014 05:39:50 +0000 (+0000) Subject: Merge "Bug 1805: Fixed 2 bugs in ICMP.computeChecksum()." X-Git-Tag: release/helium~58 X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=commitdiff_plain;h=67534d812ccdfc1d152660165e4c69e972f60671;hp=6d8b00f5a86b543be5d9fa300970383c128250d3;p=controller.git Merge "Bug 1805: Fixed 2 bugs in ICMP.computeChecksum()." --- diff --git a/features/mdsal/pom.xml b/features/mdsal/pom.xml index 9b81f81ae4..960dfb37a1 100644 --- a/features/mdsal/pom.xml +++ b/features/mdsal/pom.xml @@ -242,6 +242,23 @@ xml config + + org.opendaylight.controller.samples + clustering-it-model + ${mdsal.version} + + + org.opendaylight.controller.samples + clustering-it-provider + ${mdsal.version} + + + org.opendaylight.controller.samples + clustering-it-config + ${mdsal.version} + xml + config + org.opendaylight.controller sal-rest-docgen diff --git a/features/mdsal/src/main/resources/features.xml b/features/mdsal/src/main/resources/features.xml index 132337828e..b6091ac161 100644 --- a/features/mdsal/src/main/resources/features.xml +++ b/features/mdsal/src/main/resources/features.xml @@ -10,9 +10,9 @@ mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features odl-mdsal-broker + odl-mdsal-clustering odl-restconf odl-mdsal-xsql - odl-mdsal-clustering odl-toaster @@ -35,6 +35,9 @@ odl-mdsal-broker war + + mvn:org.opendaylight.controller/sal-remote/${project.version} mvn:org.opendaylight.controller/sal-rest-connector/${project.version} mvn:com.google.code.gson/gson/${gson.version} mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version} @@ -47,7 +50,6 @@ mvn:io.netty/netty-common/${netty.version} mvn:io.netty/netty-handler/${netty.version} mvn:io.netty/netty-transport/${netty.version} - mvn:org.opendaylight.controller/sal-remote/${project.version} mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config @@ -108,4 +110,15 @@ mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf + + + odl-mdsal-clustering + odl-restconf + odl-yangtools-models + mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version} + mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version} + mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config + mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf + mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf + diff --git a/features/nsf/pom.xml b/features/nsf/pom.xml index 875ca2ca61..e677d491bc 100644 --- a/features/nsf/pom.xml +++ b/features/nsf/pom.xml @@ -264,6 +264,18 @@ org.opendaylight.controller.thirdparty net.sf.jung2 + + org.eclipse.persistence + org.eclipse.persistence.antlr + + + org.eclipse.persistence + org.eclipse.persistence.core + + + org.eclipse.persistence + org.eclipse.persistence.moxy + diff --git a/features/nsf/src/main/resources/features.xml b/features/nsf/src/main/resources/features.xml index 8dc51f1644..e8f7bc1e5c 100644 --- a/features/nsf/src/main/resources/features.xml +++ b/features/nsf/src/main/resources/features.xml @@ -67,6 +67,9 @@ mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version} mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version} mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version} + mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version} + mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version} + mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version} mvn:org.opendaylight.controller/networkconfig.neutron.northbound/${networkconfig.neutron.northbound.version} mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version} mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version} diff --git a/opendaylight/archetypes/opendaylight-configfile-archetype/pom.xml b/opendaylight/archetypes/opendaylight-configfile-archetype/pom.xml index 38c86164e9..56342218a0 100644 --- a/opendaylight/archetypes/opendaylight-configfile-archetype/pom.xml +++ b/opendaylight/archetypes/opendaylight-configfile-archetype/pom.xml @@ -38,15 +38,15 @@ opendaylight-release - http://nexus.opendaylight.org/content/repositories/opendaylight.release/ + ${nexusproxy}/repositories/opendaylight.release/ opendaylight-snapshot - http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/ + ${nexusproxy}/repositories/opendaylight.snapshot/ website - dav:http://nexus.opendaylight.org/content/sites/site/sal-parent + dav:${nexusproxy}/sites/site/sal-parent diff --git a/opendaylight/archetypes/opendaylight-karaf-distro-archetype/src/main/resources/archetype-resources/pom.xml b/opendaylight/archetypes/opendaylight-karaf-distro-archetype/src/main/resources/archetype-resources/pom.xml index 965c619695..fdc60625c8 100644 --- a/opendaylight/archetypes/opendaylight-karaf-distro-archetype/src/main/resources/archetype-resources/pom.xml +++ b/opendaylight/archetypes/opendaylight-karaf-distro-archetype/src/main/resources/archetype-resources/pom.xml @@ -245,6 +245,22 @@ false + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/assembly/system + false + true + true + true + true + true + + diff --git a/opendaylight/commons/opendaylight/pom.xml b/opendaylight/commons/opendaylight/pom.xml index 2e817b97f3..e3ffbf356a 100644 --- a/opendaylight/commons/opendaylight/pom.xml +++ b/opendaylight/commons/opendaylight/pom.xml @@ -180,7 +180,8 @@ java target/code-coverage/jacoco.exec target/code-coverage/jacoco-it.exec - org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages + org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2 + Sonar way with Findbugs 1.0.0 1.2.1 3.1.4.RELEASE @@ -211,7 +212,8 @@ 2013.09.07.4-SNAPSHOT 1.0.0-SNAPSHOT 0.6.2-SNAPSHOT - 0.12.0 + 0.12.0 + 0.9.7 @@ -2021,6 +2023,17 @@ xml runtime + + + org.openjdk.jmh + jmh-core + ${jmh.version} + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + @@ -2568,6 +2581,10 @@ + true @@ -2621,16 +2638,24 @@ http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/ + + opendaylight-release - http://nexus.opendaylight.org/content/repositories/opendaylight.release/ + ${nexusproxy}/repositories/opendaylight.release/ opendaylight-snapshot - http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/ + ${nexusproxy}/repositories/opendaylight.snapshot/ diff --git a/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv b/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv new file mode 100755 index 0000000000..947c65f6bd --- /dev/null +++ b/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv @@ -0,0 +1,55 @@ +#!/bin/sh +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf +# script; for example karaf, start, stop, admin, client, ... +# +# if [ "$KARAF_SCRIPT" == "SCRIPT_NAME" ]; then +# Actions go here... +# fi + +# +# general settings which should be applied for all scripts go here; please keep +# in mind that it is possible that scripts might be executed more than once, e.g. +# in example of the start script where the start script is executed first and the +# karaf script afterwards. +# + +# +# The following section shows the possible configuration options for the default +# karaf scripts +# +# export JAVA_HOME # Location of Java installation +# export JAVA_MIN_MEM # Minimum memory for the JVM +# export JAVA_MAX_MEM # Maximum memory for the JVM +# export JAVA_PERM_MEM # Minimum perm memory for the JVM +# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM +# export KARAF_HOME # Karaf home folder +# export KARAF_DATA # Karaf data folder +# export KARAF_BASE # Karaf base folder +# export KARAF_ETC # Karaf etc folder +# export KARAF_OPTS # Additional available Karaf options +# export KARAF_DEBUG # Enable debug mode +if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then + export JAVA_MAX_PERM_MEM="512m" +fi +if [ "x$JAVA_MAX_MEM" = "x" ]; then + export JAVA_MAX_MEM="2048m" +fi + diff --git a/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/etc/custom.properties b/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/etc/custom.properties index e0e2759b37..cdb6542013 100644 --- a/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/etc/custom.properties +++ b/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/etc/custom.properties @@ -127,3 +127,9 @@ java.util.logging.config.file=configuration/tomcat-logging.properties #Hosttracker hostsdb key scheme setting hosttracker.keyscheme=IP +# LISP Flow Mapping configuration +# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings +lisp.mappingOverwrite = true +# Enable the Solicit-Map-Request (SMR) mechanism +lisp.smr = false + diff --git a/opendaylight/md-sal/benchmark-data-store/pom.xml b/opendaylight/md-sal/benchmark-data-store/pom.xml new file mode 100644 index 0000000000..1af2287a10 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/pom.xml @@ -0,0 +1,72 @@ + + + + + sal-parent + org.opendaylight.controller + 1.1-SNAPSHOT + + 4.0.0 + + org.opendaylight.controller + benchmark-data-store + + + + org.opendaylight.yangtools + yang-data-impl + + + org.opendaylight.yangtools + yang-parser-impl + + + org.openjdk.jmh + jmh-core + + + org.openjdk.jmh + jmh-generator-annprocess + + + org.opendaylight.controller + sal-inmemory-datastore + + + + + + + org.codehaus.mojo + exec-maven-plugin + + test + java + + -classpath + + org.openjdk.jmh.Main + .* + + + + + run-benchmarks + integration-test + + exec + + + + + + + \ No newline at end of file diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java new file mode 100644 index 0000000000..aa5ef61ce4 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.md.sal.dom.store.benchmark; + +import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore; +import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction; +import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild; +import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode; +import org.opendaylight.yangtools.yang.data.api.schema.MapNode; +import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; +import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder; +import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder; +import org.opendaylight.yangtools.yang.model.api.SchemaContext; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Warmup; + +/** + * @author Lukas Sedlak + */ +public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark { + + private static final int WARMUP_ITERATIONS = 20; + private static final int MEASUREMENT_ITERATIONS = 20; + + private static final int OUTER_LIST_100K = 100000; + private static final int OUTER_LIST_50K = 50000; + private static final int OUTER_LIST_10K = 10000; + + private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K); + private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K); + private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K); + + private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) { + final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount]; + + for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) { + paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH) + .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey) + .build(); + } + return paths; + } + + private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1); + private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2); + private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10); + + private static MapNode initInnerListItems(final int count) { + final CollectionNodeBuilder mapEntryBuilder = ImmutableNodes + .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME); + + for (int i = 1; i <= count; ++i) { + mapEntryBuilder + .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i)); + } + return mapEntryBuilder.build(); + } + + private static final NormalizedNode[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST); + private static final NormalizedNode[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST); + private static final NormalizedNode[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST); + + private static NormalizedNode[] initOuterListItems(int outerListItemsCount, MapNode innerList) { + final NormalizedNode[] outerListItems = new NormalizedNode[outerListItemsCount]; + + for (int i = 0; i < outerListItemsCount; ++i) { + int outerListKey = i; + outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey) + .withChild(innerList).build(); + } + return outerListItems; + } + + protected SchemaContext schemaContext; + protected InMemoryDOMDataStore domStore; + + abstract public void setUp() throws Exception; + + abstract public void tearDown(); + + protected void initTestNode() throws Exception { + final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH) + .build(); + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + writeTx.write(testPath, provideOuterListNode()); + + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + + private DataContainerChild provideOuterListNode() { + return ImmutableContainerNodeBuilder + .create() + .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME)) + .withChild( + ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME) + .build()).build(); + } + + @Benchmark + @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception { + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) { + writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]); + } + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + + @Benchmark + @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception { + for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) { + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]); + + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + } + + @Benchmark + @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception { + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) { + writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]); + } + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + + @Benchmark + @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception { + for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) { + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]); + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + } + + @Benchmark + @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception { + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) { + writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]); + } + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + + @Benchmark + @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS) + public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception { + for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) { + DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction(); + writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]); + DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + cohort.canCommit().get(); + cohort.preCommit().get(); + cohort.commit().get(); + } + } +} diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java new file mode 100644 index 0000000000..024385b2a9 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.md.sal.dom.store.benchmark; + +import java.io.InputStream; +import java.util.Collections; +import java.util.Set; + +import org.opendaylight.yangtools.yang.common.QName; +import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; +import org.opendaylight.yangtools.yang.model.api.Module; +import org.opendaylight.yangtools.yang.model.api.SchemaContext; +import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl; + +/** + * Benchmark Model class loads the odl-datastore-test.yang model from resources. + *
+ * This class serves as facilitator class which holds several references to initialized yang model as static final + * members. + * + * @author Lukas Sedlak + */ +public final class BenchmarkModel { + + public static final QName TEST_QNAME = QName + .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test"); + public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list"); + public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list"); + public static final QName ID_QNAME = QName.create(TEST_QNAME, "id"); + public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name"); + private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang"; + + public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME); + public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build(); + + public static final InputStream getDatastoreBenchmarkInputStream() { + return getInputStream(DATASTORE_TEST_YANG); + } + + private static InputStream getInputStream(final String resourceName) { + return BenchmarkModel.class.getResourceAsStream(resourceName); + } + + public static SchemaContext createTestContext() { + YangParserImpl parser = new YangParserImpl(); + Set modules = parser.parseYangModelsFromStreams(Collections.singletonList( + getDatastoreBenchmarkInputStream())); + return parser.resolveSchemaContext(modules); + } +} diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java new file mode 100644 index 0000000000..4b9d66f4f2 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.md.sal.dom.store.benchmark; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore; +import org.opendaylight.yangtools.util.concurrent.SpecialExecutors; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; + +/** + * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance + * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as BlockingBoundedFastThreadPool + * and DOM Store Executor Service as Blocking Bounded Fast Thread Pool. + * + * @see org.opendaylight.yangtools.util.concurrent.SpecialExecutors + * @see org.opendaylight.controller.md.sal.dom.store.benchmark.AbstractInMemoryDatastoreWriteTransactionBenchmark + * + * @author Lukas Sedlak + */ +@State(Scope.Thread) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@Fork(1) +public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark { + + private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20; + private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000; + private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000; + + @Setup(Level.Trial) + public void setUp() throws Exception { + final String name = "DS_BENCHMARK"; + final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool( + MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL"); + + final ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor( + MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name ); + + domStore = new InMemoryDOMDataStore(name, domStoreExecutor, + dataChangeListenerExecutor); + schemaContext = BenchmarkModel.createTestContext(); + domStore.onGlobalContextUpdated(schemaContext); + initTestNode(); + } + + @TearDown + public void tearDown() { + schemaContext = null; + domStore = null; + } +} diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java new file mode 100644 index 0000000000..6a0ceccd53 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.md.sal.dom.store.benchmark; + +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore; +import org.opendaylight.yangtools.util.concurrent.SpecialExecutors; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; + +/** + * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance + * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool + * and DOM Store Executor Service as Same Thread Executor. + * + * @author Lukas Sedlak + */ +@State(Scope.Thread) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@Fork(1) +public class InMemoryDataStoreWithSameThreadedExecutorBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark { + + private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20; + private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000; + + @Setup(Level.Trial) + public void setUp() throws Exception { + final String name = "DS_BENCHMARK"; + final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool( + MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL"); + + domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(), + dataChangeListenerExecutor); + schemaContext = BenchmarkModel.createTestContext(); + domStore.onGlobalContextUpdated(schemaContext); + initTestNode(); + } + + @TearDown + public void tearDown() { + schemaContext = null; + domStore = null; + } +} diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java new file mode 100644 index 0000000000..d3dda96af5 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.md.sal.dom.store.benchmark; + +import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.TimeUnit; +import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.TearDown; + +/** + * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance + * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Same Thread Executor + * and DOM Store Executor Service as Same Thread Executor. + * + * @author Lukas Sedlak + */ +@State(Scope.Thread) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@Fork(1) +public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark { + + @Setup(Level.Trial) + public void setUp() throws Exception { + domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(), + MoreExecutors.sameThreadExecutor()); + schemaContext = BenchmarkModel.createTestContext(); + domStore.onGlobalContextUpdated(schemaContext); + initTestNode(); + } + + @TearDown + public void tearDown() { + schemaContext = null; + domStore = null; + } +} diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang b/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang new file mode 100644 index 0000000000..730ca17173 --- /dev/null +++ b/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang @@ -0,0 +1,42 @@ +module odl-datastore-test { + yang-version 1; + namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test"; + prefix "store-test"; + + revision "2014-03-13" { + description "Initial revision."; + } + + container test { + list outer-list { + key id; + leaf id { + type int32; + } + choice outer-choice { + case one { + leaf one { + type string; + } + } + case two-three { + leaf two { + type string; + } + leaf three { + type string; + } + } + } + list inner-list { + key name; + leaf name { + type int32; + } + leaf value { + type string; + } + } + } + } +} \ No newline at end of file diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/FromSalConversionsUtils.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/FromSalConversionsUtils.java index 1b648dc98c..ecf1a94c18 100644 --- a/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/FromSalConversionsUtils.java +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/FromSalConversionsUtils.java @@ -61,10 +61,16 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026 import com.google.common.net.InetAddresses; -public class FromSalConversionsUtils { +/** + * MD-SAL to AD-SAL conversions collection + */ +public final class FromSalConversionsUtils { - private FromSalConversionsUtils() { + /** http://en.wikipedia.org/wiki/IPv4#Packet_structure (end of octet number 1, bit 14.+15.) */ + public static final int ENC_FIELD_BIT_SIZE = 2; + private FromSalConversionsUtils() { + throw new IllegalAccessError("forcing no instance for factory"); } @SuppressWarnings("unused") @@ -469,5 +475,12 @@ public class FromSalConversionsUtils { return true; } + /** + * @param nwDscp NW-DSCP + * @return shifted to NW-TOS (with empty ECN part) + */ + public static int dscpToTos(int nwDscp) { + return (short) (nwDscp << ENC_FIELD_BIT_SIZE); + } } diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/MDFlowMapping.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/MDFlowMapping.java index 5837e35b3a..00511bc744 100644 --- a/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/MDFlowMapping.java +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/MDFlowMapping.java @@ -315,7 +315,7 @@ public final class MDFlowMapping { private static SetNwTosActionCase _toAction(final SetNwTos sourceAction) { return new SetNwTosActionCaseBuilder() - .setSetNwTosAction(new SetNwTosActionBuilder().setTos(sourceAction.getNwTos()).build()) + .setSetNwTosAction(new SetNwTosActionBuilder().setTos(FromSalConversionsUtils.dscpToTos(sourceAction.getNwTos())).build()) .build(); } diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ToSalConversionsUtils.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ToSalConversionsUtils.java index 28dd57c3b7..dcc1a4660b 100644 --- a/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ToSalConversionsUtils.java +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ToSalConversionsUtils.java @@ -128,7 +128,7 @@ public class ToSalConversionsUtils { private static final Logger LOG = LoggerFactory.getLogger(ToSalConversionsUtils.class); private ToSalConversionsUtils() { - + throw new IllegalAccessError("forcing no instance for factory"); } public static Flow toFlow(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow source, Node node) { @@ -287,7 +287,7 @@ public class ToSalConversionsUtils { } else if (sourceAction instanceof SetNwTosActionCase) { Integer tos = ((SetNwTosActionCase) sourceAction).getSetNwTosAction().getTos(); if (tos != null) { - targetAction.add(new SetNwTos(tos)); + targetAction.add(new SetNwTos(ToSalConversionsUtils.tosToNwDscp(tos))); } } else if (sourceAction instanceof SetTpDstActionCase) { PortNumber port = ((SetTpDstActionCase) sourceAction).getSetTpDstAction().getPort(); @@ -643,4 +643,12 @@ public class ToSalConversionsUtils { return mac; } + + /** + * @param nwTos NW-TOS + * @return shifted to NW-DSCP + */ + public static int tosToNwDscp(int nwTos) { + return (short) (nwTos >>> FromSalConversionsUtils.ENC_FIELD_BIT_SIZE); + } } diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java new file mode 100644 index 0000000000..b09e816f61 --- /dev/null +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java @@ -0,0 +1,31 @@ +/** + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.sal.compatibility.test; + +import org.junit.Assert; +import org.junit.Test; +import org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils; + +/** + * test of {@link FromSalConversionsUtils} + */ +public class FromSalConversionsUtilsTest { + + /** + * Test method for {@link org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils#dscpToTos(int)}. + */ + @Test + public void testDscpToTos() { + Assert.assertEquals(0, FromSalConversionsUtils.dscpToTos(0)); + Assert.assertEquals(4, FromSalConversionsUtils.dscpToTos(1)); + Assert.assertEquals(252, FromSalConversionsUtils.dscpToTos(63)); + Assert.assertEquals(256, FromSalConversionsUtils.dscpToTos(64)); + Assert.assertEquals(-4, FromSalConversionsUtils.dscpToTos(-1)); + } + +} diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java index 9f787b7e39..98df90112d 100644 --- a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java @@ -293,7 +293,7 @@ public class TestFromSalConversionsUtils { } assertTrue("Ipv4 address wasn't found.", ipv4AddressFound); } else if (innerAction instanceof SetNwTosActionCase) { - assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 63, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos()); + assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 252, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos()); } else if (innerAction instanceof SetNwDstActionCase) { Address address = ((SetNwDstActionCase) innerAction).getSetNwDstAction().getAddress(); boolean ipv4AddressFound = false; diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestToSalConversionsUtils.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestToSalConversionsUtils.java index 60b77394c1..16d0bb424d 100644 --- a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestToSalConversionsUtils.java +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestToSalConversionsUtils.java @@ -499,7 +499,7 @@ public class TestToSalConversionsUtils { private void prepareActionSetNwTos(SetNwTosActionCaseBuilder wrapper) { SetNwTosActionBuilder setNwTosActionBuilder = new SetNwTosActionBuilder(); - setNwTosActionBuilder.setTos(63); + setNwTosActionBuilder.setTos(252); wrapper.setSetNwTosAction(setNwTosActionBuilder.build()); } diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java new file mode 100644 index 0000000000..aa25c18317 --- /dev/null +++ b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java @@ -0,0 +1,31 @@ +/** + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ +package org.opendaylight.controller.sal.compatibility.test; + +import org.junit.Assert; +import org.junit.Test; +import org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils; + +/** + * test of {@link ToSalConversionsUtils} + */ +public class ToSalConversionsUtilsTest { + + /** + * Test method for {@link org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils#tosToNwDscp(int)}. + */ + @Test + public void testTosToNwDscp() { + Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(0)); + Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(1)); + Assert.assertEquals(1, ToSalConversionsUtils.tosToNwDscp(4)); + Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(252)); + Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(253)); + Assert.assertEquals(-1, ToSalConversionsUtils.tosToNwDscp(-1)); + } +} diff --git a/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/FlowForwarder.java b/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/FlowForwarder.java index e0c16a0806..9951bf7448 100644 --- a/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/FlowForwarder.java +++ b/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/FlowForwarder.java @@ -77,7 +77,7 @@ public class FlowForwarder extends AbstractListeningCommiter { if (tableIdValidationPrecondition(tableKey, removeDataObj)) { final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj); builder.setFlowRef(new FlowRef(identifier)); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey))); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); this.provider.getSalFlowService().removeFlow(builder.build()); @@ -93,7 +93,7 @@ public class FlowForwarder extends AbstractListeningCommiter { if (tableIdValidationPrecondition(tableKey, update)) { final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder(); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setFlowRef(new FlowRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build()); @@ -112,7 +112,7 @@ public class FlowForwarder extends AbstractListeningCommiter { if (tableIdValidationPrecondition(tableKey, addDataObj)) { final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setFlowRef(new FlowRef(identifier)); builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey))); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); diff --git a/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/GroupForwarder.java b/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/GroupForwarder.java index 72e35ce8db..1b2c532323 100644 --- a/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/GroupForwarder.java +++ b/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/GroupForwarder.java @@ -78,7 +78,7 @@ public class GroupForwarder extends AbstractListeningCommiter { final Group group = (removeDataObj); final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setGroupRef(new GroupRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); this.provider.getSalGroupService().removeGroup(builder.build()); @@ -93,7 +93,7 @@ public class GroupForwarder extends AbstractListeningCommiter { final Group updatedGroup = (update); final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder(); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setGroupRef(new GroupRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build()); @@ -109,7 +109,7 @@ public class GroupForwarder extends AbstractListeningCommiter { final Group group = (addDataObj); final AddGroupInputBuilder builder = new AddGroupInputBuilder(group); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setGroupRef(new GroupRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); this.provider.getSalGroupService().addGroup(builder.build()); diff --git a/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/MeterForwarder.java b/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/MeterForwarder.java index 8a805b0297..2f3de2a171 100644 --- a/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/MeterForwarder.java +++ b/opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/MeterForwarder.java @@ -77,7 +77,7 @@ public class MeterForwarder extends AbstractListeningCommiter { final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setMeterRef(new MeterRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); this.provider.getSalMeterService().removeMeter(builder.build()); @@ -90,7 +90,7 @@ public class MeterForwarder extends AbstractListeningCommiter { final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder(); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setMeterRef(new MeterRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build()); @@ -105,7 +105,7 @@ public class MeterForwarder extends AbstractListeningCommiter { final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj); - builder.setNode(new NodeRef(nodeIdent)); + builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class))); builder.setMeterRef(new MeterRef(identifier)); builder.setTransactionUri(new Uri(provider.getNewTransactionId())); this.provider.getSalMeterService().addMeter(builder.build()); diff --git a/opendaylight/md-sal/pom.xml b/opendaylight/md-sal/pom.xml index ce830eaa62..71a0de9939 100644 --- a/opendaylight/md-sal/pom.xml +++ b/opendaylight/md-sal/pom.xml @@ -216,5 +216,14 @@ sal-binding-dom-it + + benchmarks + + false + + + benchmark-data-store + + \ No newline at end of file diff --git a/opendaylight/md-sal/sal-akka-raft/pom.xml b/opendaylight/md-sal/sal-akka-raft/pom.xml index 98c81c267f..e68e781525 100644 --- a/opendaylight/md-sal/sal-akka-raft/pom.xml +++ b/opendaylight/md-sal/sal-akka-raft/pom.xml @@ -99,6 +99,7 @@ ${project.groupId}.${project.artifactId} org.opendaylight.cluster.raft * + *
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java index c4ff108611..3bfdf732cf 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java @@ -67,11 +67,15 @@ public class ExampleActor extends RaftActor { } } else if (message instanceof PrintState) { - LOG.debug("State of the node:{} has entries={}, {}", - getId(), state.size(), getReplicatedLogState()); + if(LOG.isDebugEnabled()) { + LOG.debug("State of the node:{} has entries={}, {}", + getId(), state.size(), getReplicatedLogState()); + } } else if (message instanceof PrintRole) { - LOG.debug("{} = {}, Peers={}", getId(), getRaftState(),getPeers()); + if(LOG.isDebugEnabled()) { + LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers()); + } } else { super.onReceiveCommand(message); @@ -106,7 +110,9 @@ public class ExampleActor extends RaftActor { } catch (Exception e) { LOG.error("Exception in applying snapshot", e); } - LOG.debug("Snapshot applied to state :" + ((HashMap) state).size()); + if(LOG.isDebugEnabled()) { + LOG.debug("Snapshot applied to state :" + ((HashMap) state).size()); + } } private ByteString fromObject(Object snapshot) throws Exception { diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java index 75c237f503..9d06f63604 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java @@ -18,7 +18,7 @@ import java.util.concurrent.TimeUnit; */ public class DefaultConfigParamsImpl implements ConfigParams { - private static final int SNAPSHOT_BATCH_COUNT = 100000; + private static final int SNAPSHOT_BATCH_COUNT = 20000; /** * The maximum election time variance diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java index c8cbcca6e8..8270f2949a 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java @@ -96,7 +96,7 @@ public abstract class RaftActor extends UntypedPersistentActor { * This context should NOT be passed directly to any other actor it is * only to be consumed by the RaftActorBehaviors */ - private RaftActorContext context; + protected RaftActorContext context; /** * The in-memory journal @@ -123,7 +123,7 @@ public abstract class RaftActor extends UntypedPersistentActor { @Override public void onReceiveRecover(Object message) { if (message instanceof SnapshotOffer) { - LOG.debug("SnapshotOffer called.."); + LOG.info("SnapshotOffer called.."); SnapshotOffer offer = (SnapshotOffer) message; Snapshot snapshot = (Snapshot) offer.snapshot(); @@ -134,11 +134,13 @@ public abstract class RaftActor extends UntypedPersistentActor { context.setReplicatedLog(replicatedLog); context.setLastApplied(snapshot.getLastAppliedIndex()); + context.setCommitIndex(snapshot.getLastAppliedIndex()); - LOG.debug("Applied snapshot to replicatedLog. " + - "snapshotIndex={}, snapshotTerm={}, journal-size={}", + LOG.info("Applied snapshot to replicatedLog. " + + "snapshotIndex={}, snapshotTerm={}, journal-size={}", replicatedLog.snapshotIndex, replicatedLog.snapshotTerm, - replicatedLog.size()); + replicatedLog.size() + ); // Apply the snapshot to the actors state applySnapshot(ByteString.copyFrom(snapshot.getState())); @@ -151,12 +153,16 @@ public abstract class RaftActor extends UntypedPersistentActor { applyState(null, "recovery", logEntry.getData()); context.setLastApplied(logEntry.getIndex()); context.setCommitIndex(logEntry.getIndex()); + } else if (message instanceof DeleteEntries) { replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex()); + } else if (message instanceof UpdateElectionTerm) { - context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor()); + context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), + ((UpdateElectionTerm) message).getVotedFor()); + } else if (message instanceof RecoveryCompleted) { - LOG.debug( + LOG.info( "RecoveryCompleted - Switching actor to Follower - " + "Persistence Id = " + persistenceId() + " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " + @@ -172,9 +178,11 @@ public abstract class RaftActor extends UntypedPersistentActor { if (message instanceof ApplyState){ ApplyState applyState = (ApplyState) message; - LOG.debug("Applying state for log index {} data {}", - applyState.getReplicatedLogEntry().getIndex(), - applyState.getReplicatedLogEntry().getData()); + if(LOG.isDebugEnabled()) { + LOG.debug("Applying state for log index {} data {}", + applyState.getReplicatedLogEntry().getIndex(), + applyState.getReplicatedLogEntry().getData()); + } applyState(applyState.getClientActor(), applyState.getIdentifier(), applyState.getReplicatedLogEntry().getData()); @@ -182,9 +190,12 @@ public abstract class RaftActor extends UntypedPersistentActor { } else if(message instanceof ApplySnapshot ) { Snapshot snapshot = ((ApplySnapshot) message).getSnapshot(); - LOG.debug("ApplySnapshot called on Follower Actor " + - "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(), - snapshot.getLastAppliedTerm()); + if(LOG.isDebugEnabled()) { + LOG.debug("ApplySnapshot called on Follower Actor " + + "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(), + snapshot.getLastAppliedTerm() + ); + } applySnapshot(ByteString.copyFrom(snapshot.getState())); //clears the followers log, sets the snapshot index to ensure adjusted-index works @@ -236,23 +247,25 @@ public abstract class RaftActor extends UntypedPersistentActor { context.removePeer(rrp.getName()); } else if (message instanceof CaptureSnapshot) { - LOG.debug("CaptureSnapshot received by actor"); + LOG.info("CaptureSnapshot received by actor"); CaptureSnapshot cs = (CaptureSnapshot)message; captureSnapshot = cs; createSnapshot(); } else if (message instanceof CaptureSnapshotReply){ - LOG.debug("CaptureSnapshotReply received by actor"); + LOG.info("CaptureSnapshotReply received by actor"); CaptureSnapshotReply csr = (CaptureSnapshotReply) message; ByteString stateInBytes = csr.getSnapshot(); - LOG.debug("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size()); + LOG.info("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size()); handleCaptureSnapshotReply(stateInBytes); } else { if (!(message instanceof AppendEntriesMessages.AppendEntries) && !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) { - LOG.debug("onReceiveCommand: message:" + message.getClass()); + if(LOG.isDebugEnabled()) { + LOG.debug("onReceiveCommand: message:" + message.getClass()); + } } RaftState state = @@ -262,6 +275,8 @@ public abstract class RaftActor extends UntypedPersistentActor { if(oldBehavior != currentBehavior){ onStateChanged(); } + + onLeaderChanged(oldBehavior.getLeaderId(), currentBehavior.getLeaderId()); } } @@ -291,7 +306,9 @@ public abstract class RaftActor extends UntypedPersistentActor { context.getReplicatedLog().lastIndex() + 1, context.getTermInformation().getCurrentTerm(), data); - LOG.debug("Persist data {}", replicatedLogEntry); + if(LOG.isDebugEnabled()) { + LOG.debug("Persist data {}", replicatedLogEntry); + } replicatedLog .appendAndPersist(clientActor, identifier, replicatedLogEntry); @@ -426,6 +443,8 @@ public abstract class RaftActor extends UntypedPersistentActor { */ protected abstract void onStateChanged(); + protected void onLeaderChanged(String oldLeader, String newLeader){}; + private RaftActorBehavior switchBehavior(RaftState state) { if (currentBehavior != null) { if (currentBehavior.state() == state) { @@ -478,8 +497,10 @@ public abstract class RaftActor extends UntypedPersistentActor { return null; } String peerAddress = context.getPeerAddress(leaderId); - LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = " - + peerAddress); + if(LOG.isDebugEnabled()) { + LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = " + + peerAddress); + } return peerAddress; } @@ -579,10 +600,13 @@ public abstract class RaftActor extends UntypedPersistentActor { lastAppliedTerm = lastAppliedEntry.getTerm(); } - LOG.debug("Snapshot Capture logSize: {}", journal.size()); - LOG.debug("Snapshot Capture lastApplied:{} ", context.getLastApplied()); - LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex); - LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm); + if(LOG.isDebugEnabled()) { + LOG.debug("Snapshot Capture logSize: {}", journal.size()); + LOG.debug("Snapshot Capture lastApplied:{} ", + context.getLastApplied()); + LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex); + LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm); + } // send a CaptureSnapshot to self to make the expensive operation async. getSelf().tell(new CaptureSnapshot( @@ -634,8 +658,9 @@ public abstract class RaftActor extends UntypedPersistentActor { } @Override public void update(long currentTerm, String votedFor) { - LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor); - + if(LOG.isDebugEnabled()) { + LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor); + } this.currentTerm = currentTerm; this.votedFor = votedFor; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java index 7e896fed29..35d563b784 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java @@ -272,6 +272,17 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { return null; } + /** + * Find the client request tracker for a specific logIndex + * + * @param logIndex + * @return + */ + protected ClientRequestTracker removeClientRequestTracker(long logIndex) { + return null; + } + + /** * Find the log index from the previous to last entry in the log * @@ -311,7 +322,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { i < index + 1; i++) { ActorRef clientActor = null; String identifier = null; - ClientRequestTracker tracker = findClientRequestTracker(i); + ClientRequestTracker tracker = removeClientRequestTracker(i); if (tracker != null) { clientActor = tracker.getClientActor(); @@ -321,19 +332,19 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior { context.getReplicatedLog().get(i); if (replicatedLogEntry != null) { + // Send a local message to the local RaftActor (it's derived class to be + // specific to apply the log to it's index) actor().tell(new ApplyState(clientActor, identifier, replicatedLogEntry), actor()); newLastApplied = i; } else { //if one index is not present in the log, no point in looping // around as the rest wont be present either - context.getLogger().error( + context.getLogger().warning( "Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index ); break; } } - // Send a local message to the local RaftActor (it's derived class to be - // specific to apply the log to it's index) context.getLogger().debug("Setting last applied to {}", newLastApplied); context.setLastApplied(newLastApplied); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java index 610fdc987f..1cfdf9dba8 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java @@ -9,6 +9,7 @@ package org.opendaylight.controller.cluster.raft.behaviors; import akka.actor.ActorRef; +import akka.event.LoggingAdapter; import com.google.protobuf.ByteString; import org.opendaylight.controller.cluster.raft.RaftActorContext; import org.opendaylight.controller.cluster.raft.RaftState; @@ -38,9 +39,13 @@ import java.util.ArrayList; public class Follower extends AbstractRaftActorBehavior { private ByteString snapshotChunksCollected = ByteString.EMPTY; + private final LoggingAdapter LOG; + public Follower(RaftActorContext context) { super(context); + LOG = context.getLogger(); + scheduleElection(electionDuration()); } @@ -48,8 +53,9 @@ public class Follower extends AbstractRaftActorBehavior { AppendEntries appendEntries) { if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) { - context.getLogger() - .debug(appendEntries.toString()); + if(LOG.isDebugEnabled()) { + LOG.debug(appendEntries.toString()); + } } // TODO : Refactor this method into a bunch of smaller methods @@ -79,9 +85,10 @@ public class Follower extends AbstractRaftActorBehavior { // an entry at prevLogIndex and this follower has no entries in // it's log. - context.getLogger().debug( - "The followers log is empty and the senders prevLogIndex is {}", - appendEntries.getPrevLogIndex()); + if(LOG.isDebugEnabled()) { + LOG.debug("The followers log is empty and the senders prevLogIndex is {}", + appendEntries.getPrevLogIndex()); + } } else if (lastIndex() > -1 && appendEntries.getPrevLogIndex() != -1 @@ -90,9 +97,10 @@ public class Follower extends AbstractRaftActorBehavior { // The follower's log is out of sync because the Leader's // prevLogIndex entry was not found in it's log - context.getLogger().debug( - "The log is not empty but the prevLogIndex {} was not found in it", - appendEntries.getPrevLogIndex()); + if(LOG.isDebugEnabled()) { + LOG.debug("The log is not empty but the prevLogIndex {} was not found in it", + appendEntries.getPrevLogIndex()); + } } else if (lastIndex() > -1 && previousEntry != null @@ -102,10 +110,12 @@ public class Follower extends AbstractRaftActorBehavior { // prevLogIndex entry does exist in the follower's log but it has // a different term in it - context.getLogger().debug( - "Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}" - , previousEntry.getTerm() - , appendEntries.getPrevLogTerm()); + if(LOG.isDebugEnabled()) { + LOG.debug( + "Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}" + , previousEntry.getTerm() + , appendEntries.getPrevLogTerm()); + } } else { outOfSync = false; } @@ -113,9 +123,12 @@ public class Follower extends AbstractRaftActorBehavior { if (outOfSync) { // We found that the log was out of sync so just send a negative // reply and return - context.getLogger().debug("Follower is out-of-sync, " + - "so sending negative reply, lastIndex():{}, lastTerm():{}", - lastIndex(), lastTerm()); + if(LOG.isDebugEnabled()) { + LOG.debug("Follower is out-of-sync, " + + "so sending negative reply, lastIndex():{}, lastTerm():{}", + lastIndex(), lastTerm() + ); + } sender.tell( new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex(), lastTerm()), actor() @@ -125,10 +138,12 @@ public class Follower extends AbstractRaftActorBehavior { if (appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) { - context.getLogger().debug( - "Number of entries to be appended = " + appendEntries - .getEntries().size() - ); + if(LOG.isDebugEnabled()) { + LOG.debug( + "Number of entries to be appended = " + appendEntries + .getEntries().size() + ); + } // 3. If an existing entry conflicts with a new one (same index // but different terms), delete the existing entry and all that @@ -151,10 +166,12 @@ public class Follower extends AbstractRaftActorBehavior { continue; } - context.getLogger().debug( - "Removing entries from log starting at " - + matchEntry.getIndex() - ); + if(LOG.isDebugEnabled()) { + LOG.debug( + "Removing entries from log starting at " + + matchEntry.getIndex() + ); + } // Entries do not match so remove all subsequent entries context.getReplicatedLog() @@ -163,10 +180,12 @@ public class Follower extends AbstractRaftActorBehavior { } } - context.getLogger().debug( - "After cleanup entries to be added from = " + (addEntriesFrom - + lastIndex()) - ); + if(LOG.isDebugEnabled()) { + context.getLogger().debug( + "After cleanup entries to be added from = " + (addEntriesFrom + + lastIndex()) + ); + } // 4. Append any new entries not already in the log for (int i = addEntriesFrom; @@ -181,8 +200,9 @@ public class Follower extends AbstractRaftActorBehavior { .appendAndPersist(appendEntries.getEntries().get(i)); } - context.getLogger().debug( - "Log size is now " + context.getReplicatedLog().size()); + if(LOG.isDebugEnabled()) { + LOG.debug("Log size is now " + context.getReplicatedLog().size()); + } } @@ -195,8 +215,9 @@ public class Follower extends AbstractRaftActorBehavior { context.getReplicatedLog().lastIndex())); if (prevCommitIndex != context.getCommitIndex()) { - context.getLogger() - .debug("Commit index set to " + context.getCommitIndex()); + if(LOG.isDebugEnabled()) { + LOG.debug("Commit index set to " + context.getCommitIndex()); + } } // If commitIndex > lastApplied: increment lastApplied, apply @@ -204,10 +225,14 @@ public class Follower extends AbstractRaftActorBehavior { // check if there are any entries to be applied. last-applied can be equal to last-index if (appendEntries.getLeaderCommit() > context.getLastApplied() && context.getLastApplied() < lastIndex()) { - context.getLogger().debug("applyLogToStateMachine, " + - "appendEntries.getLeaderCommit():{}," + - "context.getLastApplied():{}, lastIndex():{}", - appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()); + if(LOG.isDebugEnabled()) { + LOG.debug("applyLogToStateMachine, " + + "appendEntries.getLeaderCommit():{}," + + "context.getLastApplied():{}, lastIndex():{}", + appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex() + ); + } + applyLogToStateMachine(appendEntries.getLeaderCommit()); } @@ -259,9 +284,13 @@ public class Follower extends AbstractRaftActorBehavior { } private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) { - context.getLogger().debug("InstallSnapshot received by follower " + - "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(), - installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()); + + if(LOG.isDebugEnabled()) { + LOG.debug("InstallSnapshot received by follower " + + "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(), + installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks() + ); + } try { if (installSnapshot.getChunkIndex() == installSnapshot.getTotalChunks()) { @@ -283,8 +312,11 @@ public class Follower extends AbstractRaftActorBehavior { } else { // we have more to go snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData()); - context.getLogger().debug("Chunk={},snapshotChunksCollected.size:{}", - installSnapshot.getChunkIndex(), snapshotChunksCollected.size()); + + if(LOG.isDebugEnabled()) { + LOG.debug("Chunk={},snapshotChunksCollected.size:{}", + installSnapshot.getChunkIndex(), snapshotChunksCollected.size()); + } } sender.tell(new InstallSnapshotReply( diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java index 90948ffef7..199d2d61cf 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java @@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.raft.behaviors; import akka.actor.ActorRef; import akka.actor.ActorSelection; import akka.actor.Cancellable; +import akka.event.LoggingAdapter; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import org.opendaylight.controller.cluster.raft.ClientRequestTracker; @@ -80,9 +81,13 @@ public class Leader extends AbstractRaftActorBehavior { private final int minReplicationCount; + private final LoggingAdapter LOG; + public Leader(RaftActorContext context) { super(context); + LOG = context.getLogger(); + if (lastIndex() >= 0) { context.setCommitIndex(lastIndex()); } @@ -98,7 +103,9 @@ public class Leader extends AbstractRaftActorBehavior { followerToLog.put(followerId, followerLogInformation); } - context.getLogger().debug("Election:Leader has following peers:"+ followers); + if(LOG.isDebugEnabled()) { + LOG.debug("Election:Leader has following peers:" + followers); + } if (followers.size() > 0) { minReplicationCount = (followers.size() + 1) / 2 + 1; @@ -123,7 +130,9 @@ public class Leader extends AbstractRaftActorBehavior { @Override protected RaftState handleAppendEntries(ActorRef sender, AppendEntries appendEntries) { - context.getLogger().debug(appendEntries.toString()); + if(LOG.isDebugEnabled()) { + LOG.debug(appendEntries.toString()); + } return state(); } @@ -132,8 +141,9 @@ public class Leader extends AbstractRaftActorBehavior { AppendEntriesReply appendEntriesReply) { if(! appendEntriesReply.isSuccess()) { - context.getLogger() - .debug(appendEntriesReply.toString()); + if(LOG.isDebugEnabled()) { + LOG.debug(appendEntriesReply.toString()); + } } // Update the FollowerLogInformation @@ -142,7 +152,7 @@ public class Leader extends AbstractRaftActorBehavior { followerToLog.get(followerId); if(followerLogInformation == null){ - context.getLogger().error("Unknown follower {}", followerId); + LOG.error("Unknown follower {}", followerId); return state(); } @@ -196,6 +206,16 @@ public class Leader extends AbstractRaftActorBehavior { return state(); } + protected ClientRequestTracker removeClientRequestTracker(long logIndex) { + + ClientRequestTracker toRemove = findClientRequestTracker(logIndex); + if(toRemove != null) { + trackerList.remove(toRemove); + } + + return toRemove; + } + protected ClientRequestTracker findClientRequestTracker(long logIndex) { for (ClientRequestTracker tracker : trackerList) { if (tracker.getIndex() == logIndex) { @@ -260,10 +280,13 @@ public class Leader extends AbstractRaftActorBehavior { if (reply.isSuccess()) { if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) { //this was the last chunk reply - context.getLogger().debug("InstallSnapshotReply received, " + - "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}", - reply.getChunkIndex(), followerId, - context.getReplicatedLog().getSnapshotIndex() + 1); + if(LOG.isDebugEnabled()) { + LOG.debug("InstallSnapshotReply received, " + + "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}", + reply.getChunkIndex(), followerId, + context.getReplicatedLog().getSnapshotIndex() + 1 + ); + } FollowerLogInformation followerLogInformation = followerToLog.get(followerId); @@ -272,31 +295,38 @@ public class Leader extends AbstractRaftActorBehavior { followerLogInformation.setNextIndex( context.getReplicatedLog().getSnapshotIndex() + 1); mapFollowerToSnapshot.remove(followerId); - context.getLogger().debug("followerToLog.get(followerId).getNextIndex().get()=" + - followerToLog.get(followerId).getNextIndex().get()); + + if(LOG.isDebugEnabled()) { + LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" + + followerToLog.get(followerId).getNextIndex().get()); + } } else { followerToSnapshot.markSendStatus(true); } } else { - context.getLogger().info("InstallSnapshotReply received, " + - "sending snapshot chunk failed, Will retry, Chunk:{}", - reply.getChunkIndex()); + LOG.info("InstallSnapshotReply received, " + + "sending snapshot chunk failed, Will retry, Chunk:{}", + reply.getChunkIndex() + ); followerToSnapshot.markSendStatus(false); } } else { - context.getLogger().error("ERROR!!" + - "FollowerId in InstallSnapshotReply not known to Leader" + - " or Chunk Index in InstallSnapshotReply not matching {} != {}", - followerToSnapshot.getChunkIndex(), reply.getChunkIndex() ); + LOG.error("ERROR!!" + + "FollowerId in InstallSnapshotReply not known to Leader" + + " or Chunk Index in InstallSnapshotReply not matching {} != {}", + followerToSnapshot.getChunkIndex(), reply.getChunkIndex() + ); } } private void replicate(Replicate replicate) { long logIndex = replicate.getReplicatedLogEntry().getIndex(); - context.getLogger().debug("Replicate message " + logIndex); + if(LOG.isDebugEnabled()) { + LOG.debug("Replicate message " + logIndex); + } // Create a tracker entry we will use this later to notify the // client actor @@ -350,10 +380,13 @@ public class Leader extends AbstractRaftActorBehavior { if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) { // if the follower is just not starting and leader's index // is more than followers index - context.getLogger().debug("SendInstallSnapshot to follower:{}," + - "follower-nextIndex:{}, leader-snapshot-index:{}, " + - "leader-last-index:{}", followerId, - followerNextIndex, leaderSnapShotIndex, leaderLastIndex); + if(LOG.isDebugEnabled()) { + LOG.debug("SendInstallSnapshot to follower:{}," + + "follower-nextIndex:{}, leader-snapshot-index:{}, " + + "leader-last-index:{}", followerId, + followerNextIndex, leaderSnapShotIndex, leaderLastIndex + ); + } actor().tell(new SendInstallSnapshot(), actor()); } else { @@ -412,11 +445,11 @@ public class Leader extends AbstractRaftActorBehavior { ).toSerializable(), actor() ); - context.getLogger().info("InstallSnapshot sent to follower {}, Chunk: {}/{}", + LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}", followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(), mapFollowerToSnapshot.get(followerId).getTotalChunks()); } catch (IOException e) { - context.getLogger().error("InstallSnapshot failed for Leader.", e); + LOG.error("InstallSnapshot failed for Leader.", e); } } @@ -431,7 +464,9 @@ public class Leader extends AbstractRaftActorBehavior { mapFollowerToSnapshot.put(followerId, followerToSnapshot); } ByteString nextChunk = followerToSnapshot.getNextChunk(); - context.getLogger().debug("Leader's snapshot nextChunk size:{}", nextChunk.size()); + if(LOG.isDebugEnabled()) { + LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size()); + } return nextChunk; } @@ -526,8 +561,10 @@ public class Leader extends AbstractRaftActorBehavior { int size = snapshotBytes.size(); totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) + ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0); - context.getLogger().debug("Snapshot {} bytes, total chunks to send:{}", - size, totalChunks); + if(LOG.isDebugEnabled()) { + LOG.debug("Snapshot {} bytes, total chunks to send:{}", + size, totalChunks); + } } public ByteString getSnapshotBytes() { @@ -591,8 +628,10 @@ public class Leader extends AbstractRaftActorBehavior { } } - context.getLogger().debug("length={}, offset={},size={}", - snapshotLength, start, size); + if(LOG.isDebugEnabled()) { + LOG.debug("length={}, offset={},size={}", + snapshotLength, start, size); + } return getSnapshotBytes().substring(start, start + size); } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java index 6665d7549b..5149cf9f34 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java @@ -132,7 +132,7 @@ public class AppendEntries extends AbstractRaftRPC { try { if(leProtoBuff.getData() != null && leProtoBuff.getData().getClientPayloadClassName() != null) { String clientPayloadClassName = leProtoBuff.getData().getClientPayloadClassName(); - payload = (Payload)Class.forName(clientPayloadClassName).newInstance(); + payload = (Payload) Class.forName(clientPayloadClassName).newInstance(); payload = payload.decode(leProtoBuff.getData()); payload.setClientPayloadClassName(clientPayloadClassName); } else { diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java index 9d40fa3d9e..c084cba822 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages; import com.google.protobuf.ByteString; -import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages; +import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages; public class InstallSnapshot extends AbstractRaftRPC { diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java index 12123db129..9b099c2aba 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java @@ -2,18 +2,24 @@ package org.opendaylight.controller.cluster.raft; import akka.actor.ActorRef; import akka.actor.ActorSystem; +import akka.actor.PoisonPill; import akka.actor.Props; import akka.event.Logging; import akka.japi.Creator; import akka.testkit.JavaTestKit; +import akka.testkit.TestActorRef; import com.google.protobuf.ByteString; import org.junit.Test; import org.opendaylight.controller.cluster.raft.client.messages.FindLeader; import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply; +import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; +import static junit.framework.Assert.assertTrue; import static junit.framework.TestCase.assertEquals; public class RaftActorTest extends AbstractActorTest { @@ -21,11 +27,21 @@ public class RaftActorTest extends AbstractActorTest { public static class MockRaftActor extends RaftActor { + boolean applySnapshotCalled = false; + public MockRaftActor(String id, Map peerAddresses) { super(id, peerAddresses); } + public RaftActorContext getRaftActorContext() { + return context; + } + + public boolean isApplySnapshotCalled() { + return applySnapshotCalled; + } + public static Props props(final String id, final Map peerAddresses){ return Props.create(new Creator(){ @@ -45,7 +61,7 @@ public class RaftActorTest extends AbstractActorTest { } @Override protected void applySnapshot(ByteString snapshot) { - throw new UnsupportedOperationException("applySnapshot"); + applySnapshotCalled = true; } @Override protected void onStateChanged() { @@ -134,5 +150,56 @@ public class RaftActorTest extends AbstractActorTest { kit.findLeader(kit.getRaftActor().path().toString()); } + @Test + public void testActorRecovery() { + new JavaTestKit(getSystem()) {{ + new Within(duration("1 seconds")) { + protected void run() { + + String persistenceId = "follower10"; + + ActorRef followerActor = getSystem().actorOf( + MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId); + + + List entries = new ArrayList<>(); + ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E")); + ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F")); + entries.add(entry1); + entries.add(entry2); + + int lastApplied = 3; + int lastIndex = 5; + Snapshot snapshot = Snapshot.create("A B C D".getBytes(), entries, lastIndex, 1 , lastApplied, 1); + MockSnapshotStore.setMockSnapshot(snapshot); + MockSnapshotStore.setPersistenceId(persistenceId); + + followerActor.tell(PoisonPill.getInstance(), null); + try { + // give some time for actor to die + Thread.sleep(200); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + TestActorRef ref = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId, Collections.EMPTY_MAP)); + try { + //give some time for snapshot offer to get called. + Thread.sleep(200); + } catch (InterruptedException e) { + e.printStackTrace(); + } + RaftActorContext context = ref.underlyingActor().getRaftActorContext(); + assertEquals(entries.size(), context.getReplicatedLog().size()); + assertEquals(lastApplied, context.getLastApplied()); + assertEquals(lastApplied, context.getCommitIndex()); + assertTrue(ref.underlyingActor().isApplySnapshotCalled()); + } + + }; + }}; + + } + } diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java index 73c9f96b82..c4ef51d968 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java @@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapsho import org.opendaylight.controller.cluster.raft.messages.AppendEntries; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply; -import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages; import org.opendaylight.controller.cluster.raft.utils.DoNothingActor; +import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages; import java.io.ByteArrayOutputStream; import java.io.IOException; diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java new file mode 100644 index 0000000000..d70bf920ae --- /dev/null +++ b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.cluster.raft.utils; + +import akka.dispatch.Futures; +import akka.japi.Option; +import akka.persistence.SelectedSnapshot; +import akka.persistence.SnapshotMetadata; +import akka.persistence.SnapshotSelectionCriteria; +import akka.persistence.snapshot.japi.SnapshotStore; +import org.opendaylight.controller.cluster.raft.Snapshot; +import scala.concurrent.Future; + + +public class MockSnapshotStore extends SnapshotStore { + + private static Snapshot mockSnapshot; + private static String persistenceId; + + public static void setMockSnapshot(Snapshot s) { + mockSnapshot = s; + } + + public static void setPersistenceId(String pId) { + persistenceId = pId; + } + + @Override + public Future> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) { + if (mockSnapshot == null) { + return Futures.successful(Option.none()); + } + + SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345); + SelectedSnapshot selectedSnapshot = + new SelectedSnapshot(smd, mockSnapshot); + return Futures.successful(Option.some(selectedSnapshot)); + } + + @Override + public Future doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) { + return null; + } + + @Override + public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception { + + } + + @Override + public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception { + + } + + @Override + public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception { + + } +} diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf b/opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf index 2b753004c4..6b2cc22038 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf +++ b/opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf @@ -1,4 +1,6 @@ akka { + persistence.snapshot-store.plugin = "mock-snapshot-store" + loglevel = "DEBUG" loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"] @@ -19,3 +21,10 @@ akka { } } } + +mock-snapshot-store { + # Class name of the plugin. + class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore" + # Dispatcher for the plugin actor. + plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" +} diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java index ef56d02a2e..cf37cbdd00 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java @@ -17,7 +17,9 @@ public abstract class AbstractUntypedActor extends UntypedActor { Logging.getLogger(getContext().system(), this); public AbstractUntypedActor() { - LOG.debug("Actor created {}", getSelf()); + if(LOG.isDebugEnabled()) { + LOG.debug("Actor created {}", getSelf()); + } getContext(). system(). actorSelection("user/termination-monitor"). @@ -27,11 +29,13 @@ public abstract class AbstractUntypedActor extends UntypedActor { @Override public void onReceive(Object message) throws Exception { final String messageType = message.getClass().getSimpleName(); - LOG.debug("Received message {}", messageType); - + if(LOG.isDebugEnabled()) { + LOG.debug("Received message {}", messageType); + } handleReceive(message); - - LOG.debug("Done handling message {}", messageType); + if(LOG.isDebugEnabled()) { + LOG.debug("Done handling message {}", messageType); + } } protected abstract void handleReceive(Object message) throws Exception; @@ -41,7 +45,9 @@ public abstract class AbstractUntypedActor extends UntypedActor { } protected void unknownMessage(Object message) throws Exception { - LOG.debug("Received unhandled message {}", message); + if(LOG.isDebugEnabled()) { + LOG.debug("Received unhandled message {}", message); + } unhandled(message); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayload.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/CompositeModificationPayload.java similarity index 95% rename from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayload.java rename to opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/CompositeModificationPayload.java index 2e822f1d43..8822ac83a6 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayload.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/CompositeModificationPayload.java @@ -6,13 +6,12 @@ * and is available at http://www.eclipse.org/legal/epl-v10.html */ -package org.opendaylight.controller.cluster.datastore; +package org.opendaylight.controller.cluster.raft.protobuff.client.messages; import com.google.common.base.Preconditions; import com.google.protobuf.GeneratedMessage; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.UnknownFieldSet; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages; import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages; diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java similarity index 100% rename from opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java rename to opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/messages/InstallSnapshotMessages.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/cluster/raft/InstallSnapshotMessages.java similarity index 87% rename from opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/messages/InstallSnapshotMessages.java rename to opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/cluster/raft/InstallSnapshotMessages.java index e801ae1c10..b93be3e009 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/messages/InstallSnapshotMessages.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/cluster/raft/InstallSnapshotMessages.java @@ -1,7 +1,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: InstallSnapshot.proto -package org.opendaylight.controller.cluster.raft.protobuff.messages; +package org.opendaylight.controller.protobuff.messages.cluster.raft; public final class InstallSnapshotMessages { private InstallSnapshotMessages() {} @@ -186,14 +186,14 @@ public final class InstallSnapshotMessages { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor; + return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable + return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class); + org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class); } public static com.google.protobuf.Parser PARSER = @@ -245,7 +245,7 @@ public final class InstallSnapshotMessages { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -261,7 +261,7 @@ public final class InstallSnapshotMessages { getLeaderIdBytes() { java.lang.Object ref = leaderId_; if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); leaderId_ = b; @@ -442,53 +442,53 @@ public final class InstallSnapshotMessages { return super.writeReplace(); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data) + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input) + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input) + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom( + public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -497,7 +497,7 @@ public final class InstallSnapshotMessages { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot prototype) { + public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -513,20 +513,20 @@ public final class InstallSnapshotMessages { */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshotOrBuilder { + implements org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshotOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor; + return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable + return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class); + org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class); } - // Construct using org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.newBuilder() + // Construct using org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -569,23 +569,23 @@ public final class InstallSnapshotMessages { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor; + return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor; } - public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() { - return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance(); + public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() { + return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance(); } - public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot build() { - org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = buildPartial(); + public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot build() { + org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot buildPartial() { - org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot(this); + public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot buildPartial() { + org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -622,16 +622,16 @@ public final class InstallSnapshotMessages { } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) { - return mergeFrom((org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot)other); + if (other instanceof org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) { + return mergeFrom((org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot other) { - if (other == org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this; + public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot other) { + if (other == org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this; if (other.hasTerm()) { setTerm(other.getTerm()); } @@ -667,11 +667,11 @@ public final class InstallSnapshotMessages { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parsedMessage = null; + org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage(); + parsedMessage = (org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -744,7 +744,7 @@ public final class InstallSnapshotMessages { getLeaderIdBytes() { java.lang.Object ref = leaderId_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); leaderId_ = b; @@ -988,8 +988,8 @@ public final class InstallSnapshotMessages { "\021lastIncludedIndex\030\003 \001(\003\022\030\n\020lastIncluded" + "Term\030\004 \001(\003\022\014\n\004data\030\005 \001(\014\022\022\n\nchunkIndex\030\006" + " \001(\005\022\023\n\013totalChunks\030\007 \001(\005BX\n;org.openday" + - "light.controller.cluster.raft.protobuff." + - "messagesB\027InstallSnapshotMessagesH\001" + "light.controller.protobuff.messages.clus" + + "ter.raftB\027InstallSnapshotMessagesH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlStreamUtils.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlStreamUtils.java index c9d5e89ae1..0f93f43c56 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlStreamUtils.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlStreamUtils.java @@ -100,7 +100,9 @@ public class XmlStreamUtils { for (Entry e: prefixes.getPrefixes()) { writer.writeNamespace(e.getValue(), e.getKey().toString()); } - LOG.debug("Instance identifier with Random prefix is now {}", str); + if(LOG.isDebugEnabled()) { + LOG.debug("Instance identifier with Random prefix is now {}", str); + } writer.writeCharacters(str); } @@ -169,7 +171,7 @@ public class XmlStreamUtils { DataSchemaNode childSchema = null; if (schema instanceof DataNodeContainer) { childSchema = SchemaUtils.findFirstSchema(child.getNodeType(), ((DataNodeContainer) schema).getChildNodes()).orNull(); - if (childSchema == null) { + if (childSchema == null && LOG.isDebugEnabled()) { LOG.debug("Probably the data node \"{}\" does not conform to schema", child == null ? "" : child.getNodeType().getLocalName()); } } @@ -192,7 +194,9 @@ public class XmlStreamUtils { */ public void writeValue(final @Nonnull XMLStreamWriter writer, final @Nonnull TypeDefinition type, final Object value) throws XMLStreamException { if (value == null) { - LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName()); + if(LOG.isDebugEnabled()){ + LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName()); + } return; } @@ -232,18 +236,24 @@ public class XmlStreamUtils { writer.writeNamespace(prefix, qname.getNamespace().toString()); writer.writeCharacters(prefix + ':' + qname.getLocalName()); } else { - LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass()); + if(LOG.isDebugEnabled()) { + LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass()); + } writer.writeCharacters(String.valueOf(value)); } } private static void write(final @Nonnull XMLStreamWriter writer, final @Nonnull InstanceIdentifierTypeDefinition type, final @Nonnull Object value) throws XMLStreamException { if (value instanceof YangInstanceIdentifier) { - LOG.debug("Writing InstanceIdentifier object {}", value); + if(LOG.isDebugEnabled()) { + LOG.debug("Writing InstanceIdentifier object {}", value); + } write(writer, (YangInstanceIdentifier)value); } else { - LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass()); - writer.writeCharacters(String.valueOf(value)); + if(LOG.isDebugEnabled()) { + LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass()); + } + writer.writeCharacters(String.valueOf(value)); } } } diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlUtils.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlUtils.java index ea8f4a3ef1..d0cc2adb5f 100644 --- a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlUtils.java +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlUtils.java @@ -74,7 +74,9 @@ public class XmlUtils { * @return xml String */ public static String inputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){ - LOG.debug("Converting input composite node to xml {}", cNode); + if(LOG.isDebugEnabled()) { + LOG.debug("Converting input composite node to xml {}", cNode); + } if (cNode == null) { return BLANK; } @@ -88,12 +90,14 @@ public class XmlUtils { Set rpcs = schemaContext.getOperations(); for(RpcDefinition rpc : rpcs) { if(rpc.getQName().equals(cNode.getNodeType())){ - LOG.debug("Found the rpc definition from schema context matching with input composite node {}", rpc.getQName()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Found the rpc definition from schema context matching with input composite node {}", rpc.getQName()); + } CompositeNode inputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "input")); domTree = XmlDocumentUtils.toDocument(inputContainer, rpc.getInput(), XmlDocumentUtils.defaultValueCodecProvider()); - - LOG.debug("input composite node to document conversion complete, document is {}", domTree); + if(LOG.isDebugEnabled()) { + LOG.debug("input composite node to document conversion complete, document is {}", domTree); + } break; } } @@ -111,7 +115,9 @@ public class XmlUtils { * @return xml string */ public static String outputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){ - LOG.debug("Converting output composite node to xml {}", cNode); + if(LOG.isDebugEnabled()) { + LOG.debug("Converting output composite node to xml {}", cNode); + } if (cNode == null) { return BLANK; } @@ -125,12 +131,14 @@ public class XmlUtils { Set rpcs = schemaContext.getOperations(); for(RpcDefinition rpc : rpcs) { if(rpc.getQName().equals(cNode.getNodeType())){ - LOG.debug("Found the rpc definition from schema context matching with output composite node {}", rpc.getQName()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Found the rpc definition from schema context matching with output composite node {}", rpc.getQName()); + } CompositeNode outputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "output")); domTree = XmlDocumentUtils.toDocument(outputContainer, rpc.getOutput(), XmlDocumentUtils.defaultValueCodecProvider()); - - LOG.debug("output composite node to document conversion complete, document is {}", domTree); + if(LOG.isDebugEnabled()) { + LOG.debug("output composite node to document conversion complete, document is {}", domTree); + } break; } } @@ -152,8 +160,9 @@ public class XmlUtils { LOG.error("Error during translation of Document to OutputStream", e); } - LOG.debug("Document to string conversion complete, xml string is {} ", writer.toString()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Document to string conversion complete, xml string is {} ", writer.toString()); + } return writer.toString(); } @@ -188,7 +197,9 @@ public class XmlUtils { * @return CompositeNode object based on the input, if any of the input parameter is null, a null object is returned */ public static CompositeNode inputXmlToCompositeNode(QName rpc, String xml, SchemaContext schemaContext){ - LOG.debug("Converting input xml to composite node {}", xml); + if(LOG.isDebugEnabled()) { + LOG.debug("Converting input xml to composite node {}", xml); + } if (xml==null || xml.length()==0) { return null; } @@ -208,8 +219,9 @@ public class XmlUtils { Set rpcs = schemaContext.getOperations(); for(RpcDefinition rpcDef : rpcs) { if(rpcDef.getQName().equals(rpc)){ - LOG.debug("found the rpc definition from schema context matching rpc {}", rpc); - + if(LOG.isDebugEnabled()) { + LOG.debug("found the rpc definition from schema context matching rpc {}", rpc); + } if(rpcDef.getInput() == null) { LOG.warn("found rpc definition's input is null"); return null; @@ -225,9 +237,9 @@ public class XmlUtils { List> dataNodes = XmlDocumentUtils.toDomNodes(xmlData, Optional.of(rpcDef.getInput().getChildNodes()), schemaContext); - - LOG.debug("Converted xml input to list of nodes {}", dataNodes); - + if(LOG.isDebugEnabled()) { + LOG.debug("Converted xml input to list of nodes {}", dataNodes); + } final CompositeNodeBuilder it = ImmutableCompositeNode.builder(); it.setQName(rpc); it.add(ImmutableCompositeNode.create(input, dataNodes)); @@ -240,8 +252,9 @@ public class XmlUtils { } catch (IOException e) { LOG.error("Error during building data tree from XML", e); } - - LOG.debug("Xml to composite node conversion complete {} ", compositeNode); + if(LOG.isDebugEnabled()) { + LOG.debug("Xml to composite node conversion complete {} ", compositeNode); + } return compositeNode; } diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/resources/InstallSnapshot.proto b/opendaylight/md-sal/sal-clustering-commons/src/main/resources/InstallSnapshot.proto similarity index 82% rename from opendaylight/md-sal/sal-akka-raft/src/main/resources/InstallSnapshot.proto rename to opendaylight/md-sal/sal-clustering-commons/src/main/resources/InstallSnapshot.proto index 14f821b5e2..4198644b13 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/resources/InstallSnapshot.proto +++ b/opendaylight/md-sal/sal-clustering-commons/src/main/resources/InstallSnapshot.proto @@ -1,6 +1,6 @@ package org.opendaylight.controller.cluster.raft; -option java_package = "org.opendaylight.controller.cluster.raft.protobuff.messages"; +option java_package = "org.opendaylight.controller.protobuff.messages.cluster.raft"; option java_outer_classname = "InstallSnapshotMessages"; option optimize_for = SPEED; diff --git a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf index f632b9cc83..f196ad1644 100644 --- a/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf +++ b/opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf @@ -9,7 +9,11 @@ odl-cluster-data { metric-capture-enabled = true akka { + loglevel = "INFO" + loggers = ["akka.event.slf4j.Slf4jLogger"] + actor { + provider = "akka.cluster.ClusterActorRefProvider" serializers { java = "akka.serialization.JavaSerializer" @@ -55,6 +59,9 @@ odl-cluster-rpc { metric-capture-enabled = true akka { + loglevel = "INFO" + loggers = ["akka.event.slf4j.Slf4jLogger"] + actor { provider = "akka.cluster.ClusterActorRefProvider" diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java index 60313bf109..50952eaaf1 100644 --- a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java +++ b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java @@ -8,11 +8,10 @@ package org.opendaylight.controller.md.sal.common.api.data; +import com.google.common.base.Supplier; import org.opendaylight.yangtools.yang.common.RpcError; -import org.opendaylight.yangtools.yang.common.RpcResultBuilder; import org.opendaylight.yangtools.yang.common.RpcError.ErrorType; - -import com.google.common.base.Function; +import org.opendaylight.yangtools.yang.common.RpcResultBuilder; /** * A type of TransactionCommitFailedException that indicates a situation that would result in a @@ -24,23 +23,21 @@ import com.google.common.base.Function; * @author Thomas Pantelis */ public class TransactionCommitDeadlockException extends TransactionCommitFailedException { - private static final long serialVersionUID = 1L; - private static final String DEADLOCK_MESSAGE = "An attempt to block on a ListenableFuture via a get method from a write " + "transaction submit was detected that would result in deadlock. The commit " + "result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock."; + private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE); - public static Function DEADLOCK_EXECUTOR_FUNCTION = new Function() { + public static final Supplier DEADLOCK_EXCEPTION_SUPPLIER = new Supplier() { @Override - public Exception apply(Void notUsed) { - return new TransactionCommitDeadlockException( DEADLOCK_MESSAGE, - RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE)); + public Exception get() { + return new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR); } }; - public TransactionCommitDeadlockException(String message, final RpcError... errors) { + public TransactionCommitDeadlockException(final String message, final RpcError... errors) { super(message, errors); } } diff --git a/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/service/AbstractDataTransaction.java b/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/service/AbstractDataTransaction.java index d544c4b371..b2a03c2987 100644 --- a/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/service/AbstractDataTransaction.java +++ b/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/service/AbstractDataTransaction.java @@ -7,9 +7,13 @@ */ package org.opendaylight.controller.md.sal.common.impl.service; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.CheckedFuture; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; - import org.opendaylight.controller.md.sal.common.api.TransactionStatus; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; import org.opendaylight.controller.md.sal.common.impl.AbstractDataModification; @@ -19,15 +23,11 @@ import org.opendaylight.yangtools.yang.common.RpcResultBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.AsyncFunction; -import com.google.common.util.concurrent.CheckedFuture; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; - public abstract class AbstractDataTransaction

, D extends Object> extends AbstractDataModification { - private final static Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class); + private static final Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class); + private static final ListenableFuture> SUCCESS_FUTURE = + Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build()); private final Object identifier; private final long allocationTime; @@ -55,9 +55,10 @@ public abstract class AbstractDataTransaction

, D extends Objec @Override public Future> commit() { readyTime = System.nanoTime(); - LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime)); + if (LOG.isDebugEnabled()) { + LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime)); + } changeStatus(TransactionStatus.SUBMITED); - return this.broker.commit(this); } @@ -88,7 +89,7 @@ public abstract class AbstractDataTransaction

, D extends Objec } @Override - public boolean equals(Object obj) { + public boolean equals(final Object obj) { if (this == obj) { return true; } @@ -118,13 +119,18 @@ public abstract class AbstractDataTransaction

, D extends Objec public void succeeded() { this.completeTime = System.nanoTime(); - LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime)); + if (LOG.isDebugEnabled()) { + LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime)); + } changeStatus(TransactionStatus.COMMITED); } public void failed() { this.completeTime = System.nanoTime(); - LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime)); + + if (LOG.isDebugEnabled()) { + LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime)); + } changeStatus(TransactionStatus.FAILED); } @@ -134,14 +140,12 @@ public abstract class AbstractDataTransaction

, D extends Objec this.onStatusChange(status); } - public static ListenableFuture> convertToLegacyCommitFuture( - CheckedFuture from ) { + public static ListenableFuture> convertToLegacyCommitFuture(final CheckedFuture from) { return Futures.transform(from, new AsyncFunction>() { @Override - public ListenableFuture> apply(Void input) throws Exception { - return Futures.immediateFuture(RpcResultBuilder. - success(TransactionStatus.COMMITED).build()); + public ListenableFuture> apply(final Void input) { + return SUCCESS_FUTURE; } - } ); + }); } } diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStatsMXBeanImpl.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStatsMXBeanImpl.java index b67855d731..3de49ae296 100644 --- a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStatsMXBeanImpl.java +++ b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStatsMXBeanImpl.java @@ -16,6 +16,8 @@ import java.util.concurrent.ThreadPoolExecutor; import javax.annotation.Nullable; import org.opendaylight.yangtools.util.concurrent.CountingRejectedExecutionHandler; import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * MXBean implementation of the ThreadExecutorStatsMXBean interface that retrieves statistics @@ -25,7 +27,7 @@ import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue; */ public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean implements ThreadExecutorStatsMXBean { - + private static final Logger LOG = LoggerFactory.getLogger(ThreadExecutorStatsMXBeanImpl.class); private final ThreadPoolExecutor executor; /** @@ -36,14 +38,53 @@ public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean * @param mBeanType Used as the type property in the bean's ObjectName. * @param mBeanCategory Used as the Category property in the bean's ObjectName. */ - public ThreadExecutorStatsMXBeanImpl(Executor executor, String mBeanName, - String mBeanType, @Nullable String mBeanCategory) { + public ThreadExecutorStatsMXBeanImpl(final ThreadPoolExecutor executor, final String mBeanName, + final String mBeanType, @Nullable final String mBeanCategory) { super(mBeanName, mBeanType, mBeanCategory); + this.executor = Preconditions.checkNotNull(executor); + } + + private static ThreadExecutorStatsMXBeanImpl createInternal(final Executor executor, + final String mBeanName, final String mBeanType, final String mBeanCategory) { + if (executor instanceof ThreadPoolExecutor) { + final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl( + (ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory); + return ret; + } + + LOG.info("Executor {} is not supported", executor); + return null; + } + + /** + * Creates a new bean if the backing executor is a ThreadPoolExecutor and registers it. + * + * @param executor the backing {@link Executor} + * @param mBeanName Used as the name property in the bean's ObjectName. + * @param mBeanType Used as the type property in the bean's ObjectName. + * @param mBeanCategory Used as the Category property in the bean's ObjectName. + * @return a registered ThreadExecutorStatsMXBeanImpl instance if the backing executor + * is a ThreadPoolExecutor, otherwise null. + */ + public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName, + final String mBeanType, @Nullable final String mBeanCategory) { + ThreadExecutorStatsMXBeanImpl ret = createInternal(executor, mBeanName, mBeanType, mBeanCategory); + if(ret != null) { + ret.registerMBean(); + } - Preconditions.checkArgument(executor instanceof ThreadPoolExecutor, - "The ExecutorService of type {} is not an instanceof ThreadPoolExecutor", - executor.getClass()); - this.executor = (ThreadPoolExecutor)executor; + return ret; + } + + /** + * Creates a new bean if the backing executor is a ThreadPoolExecutor. + * + * @param executor the backing {@link Executor} + * @return a ThreadExecutorStatsMXBeanImpl instance if the backing executor + * is a ThreadPoolExecutor, otherwise null. + */ + public static ThreadExecutorStatsMXBeanImpl create(final Executor executor) { + return createInternal(executor, "", "", null); } @Override diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxy.java index e3cdbb4ee1..acf630e2e9 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxy.java @@ -25,9 +25,10 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; *

*/ public class DataChangeListenerRegistrationProxy implements ListenerRegistration { - private final ActorSelection listenerRegistrationActor; + private volatile ActorSelection listenerRegistrationActor; private final AsyncDataChangeListener listener; private final ActorRef dataChangeListenerActor; + private boolean closed = false; public >> DataChangeListenerRegistrationProxy( @@ -38,14 +39,51 @@ public class DataChangeListenerRegistrationProxy implements ListenerRegistration this.dataChangeListenerActor = dataChangeListenerActor; } + public >> + DataChangeListenerRegistrationProxy( + L listener, ActorRef dataChangeListenerActor) { + this(null, listener, dataChangeListenerActor); + } + @Override public Object getInstance() { return listener; } + public void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) { + boolean sendCloseMessage = false; + synchronized(this) { + if(closed) { + sendCloseMessage = true; + } else { + this.listenerRegistrationActor = listenerRegistrationActor; + } + } + if(sendCloseMessage) { + listenerRegistrationActor.tell(new + CloseDataChangeListenerRegistration().toSerializable(), null); + } + + this.listenerRegistrationActor = listenerRegistrationActor; + } + + public ActorSelection getListenerRegistrationActor() { + return listenerRegistrationActor; + } + @Override public void close() { - listenerRegistrationActor.tell(new CloseDataChangeListenerRegistration().toSerializable(), null); + + boolean sendCloseMessage; + synchronized(this) { + sendCloseMessage = !closed && listenerRegistrationActor != null; + closed = true; + } + if(sendCloseMessage) { + listenerRegistrationActor.tell(new + CloseDataChangeListenerRegistration().toSerializable(), null); + } + dataChangeListenerActor.tell(PoisonPill.getInstance(), null); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java index db01d51535..c780881a2f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java @@ -10,9 +10,9 @@ package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; import akka.actor.ActorSystem; - +import akka.dispatch.OnComplete; +import akka.util.Timeout; import com.google.common.base.Preconditions; - import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier; import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener; import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply; @@ -32,6 +32,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.opendaylight.yangtools.yang.model.api.SchemaContextListener; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import scala.concurrent.Future; /** * @@ -39,6 +40,7 @@ import org.slf4j.LoggerFactory; public class DistributedDataStore implements DOMStore, SchemaContextListener, AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class); + public static final int REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR = 24; // 24 times the usual operation timeout private final ActorContext actorContext; @@ -69,33 +71,48 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Au @Override public >> ListenerRegistration registerChangeListener( - YangInstanceIdentifier path, L listener, + final YangInstanceIdentifier path, L listener, AsyncDataBroker.DataChangeScope scope) { Preconditions.checkNotNull(path, "path should not be null"); Preconditions.checkNotNull(listener, "listener should not be null"); - - LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope); - + if(LOG.isDebugEnabled()) { + LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope); + } ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf( DataChangeListener.props(listener )); String shardName = ShardStrategyFactory.getStrategy(path).findShard(path); - Object result = actorContext.executeLocalShardOperation(shardName, - new RegisterChangeListener(path, dataChangeListenerActor.path(), scope)); - - if (result != null) { - RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result; - return new DataChangeListenerRegistrationProxy(actorContext - .actorSelection(reply.getListenerRegistrationPath()), listener, - dataChangeListenerActor); + Future future = actorContext.executeLocalShardOperationAsync(shardName, + new RegisterChangeListener(path, dataChangeListenerActor.path(), scope), + new Timeout(actorContext.getOperationDuration().$times( + REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR))); + + if (future != null) { + final DataChangeListenerRegistrationProxy listenerRegistrationProxy = + new DataChangeListenerRegistrationProxy(listener, dataChangeListenerActor); + + future.onComplete(new OnComplete(){ + + @Override public void onComplete(Throwable failure, Object result) + throws Throwable { + if(failure != null){ + LOG.error("Failed to register listener at path " + path.toString(), failure); + return; + } + RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result; + listenerRegistrationProxy.setListenerRegistrationActor(actorContext + .actorSelection(reply.getListenerRegistrationPath())); + } + }, actorContext.getActorSystem().dispatcher()); + return listenerRegistrationProxy; + } + if(LOG.isDebugEnabled()) { + LOG.debug( + "No local shard for shardName {} was found so returning a noop registration", + shardName); } - - LOG.debug( - "No local shard for shardName {} was found so returning a noop registration", - shardName); - return new NoOpDataChangeListenerRegistration(listener); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreProperties.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreProperties.java deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java index 0737d2020b..0fa27706e1 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java @@ -35,7 +35,6 @@ import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain; import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply; import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction; -import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply; import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply; import org.opendaylight.controller.cluster.datastore.messages.EnableNotification; import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction; @@ -53,6 +52,7 @@ import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl; import org.opendaylight.controller.cluster.raft.RaftActor; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply; +import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload; import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener; import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException; import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore; @@ -171,8 +171,11 @@ public class Shard extends RaftActor { } @Override public void onReceiveRecover(Object message) { - LOG.debug("onReceiveRecover: Received message {} from {}", message.getClass().toString(), - getSender()); + if(LOG.isDebugEnabled()) { + LOG.debug("onReceiveRecover: Received message {} from {}", + message.getClass().toString(), + getSender()); + } if (message instanceof RecoveryFailure){ LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause"); @@ -182,8 +185,11 @@ public class Shard extends RaftActor { } @Override public void onReceiveCommand(Object message) { - LOG.debug("onReceiveCommand: Received message {} from {}", message.getClass().toString(), - getSender()); + if(LOG.isDebugEnabled()) { + LOG.debug("onReceiveCommand: Received message {} from {}", + message.getClass().toString(), + getSender()); + } if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) { // This must be for install snapshot. Don't want to open this up and trigger @@ -192,6 +198,7 @@ public class Shard extends RaftActor { .tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)), self()); + createSnapshotTransaction = null; // Send a PoisonPill instead of sending close transaction because we do not really need // a response getSender().tell(PoisonPill.getInstance(), self()); @@ -297,7 +304,9 @@ public class Shard extends RaftActor { ShardTransactionIdentifier.builder() .remoteTransactionId(remoteTransactionId) .build(); - LOG.debug("Creating transaction : {} ", transactionId); + if(LOG.isDebugEnabled()) { + LOG.debug("Creating transaction : {} ", transactionId); + } ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId, transactionChainId); @@ -324,13 +333,19 @@ public class Shard extends RaftActor { DOMStoreThreePhaseCommitCohort cohort = modificationToCohort.remove(serialized); if (cohort == null) { - LOG.debug( - "Could not find cohort for modification : {}. Writing modification using a new transaction", - modification); + + if(LOG.isDebugEnabled()) { + LOG.debug( + "Could not find cohort for modification : {}. Writing modification using a new transaction", + modification); + } + DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction(); - LOG.debug("Created new transaction {}", transaction.getIdentifier().toString()); + if(LOG.isDebugEnabled()) { + LOG.debug("Created new transaction {}", transaction.getIdentifier().toString()); + } modification.apply(transaction); try { @@ -351,13 +366,12 @@ public class Shard extends RaftActor { return; } - final ListenableFuture future = cohort.commit(); - final ActorRef self = getSelf(); + ListenableFuture future = cohort.commit(); Futures.addCallback(future, new FutureCallback() { @Override public void onSuccess(Void v) { - sender.tell(new CommitTransactionReply().toSerializable(), self); + sender.tell(new CommitTransactionReply().toSerializable(), getSelf()); shardMBean.incrementCommittedTransactionCount(); shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis()); } @@ -366,7 +380,7 @@ public class Shard extends RaftActor { public void onFailure(Throwable t) { LOG.error(t, "An exception happened during commit"); shardMBean.incrementFailedTransactionsCount(); - sender.tell(new akka.actor.Status.Failure(t), self); + sender.tell(new akka.actor.Status.Failure(t), getSelf()); } }); @@ -400,8 +414,10 @@ public class Shard extends RaftActor { private void registerChangeListener( RegisterChangeListener registerChangeListener) { - LOG.debug("registerDataChangeListener for {}", registerChangeListener - .getPath()); + if(LOG.isDebugEnabled()) { + LOG.debug("registerDataChangeListener for {}", registerChangeListener + .getPath()); + } ActorSelection dataChangeListenerPath = getContext() @@ -429,23 +445,17 @@ public class Shard extends RaftActor { getContext().actorOf( DataChangeListenerRegistration.props(registration)); - LOG.debug( - "registerDataChangeListener sending reply, listenerRegistrationPath = {} " - , listenerRegistration.path().toString()); + if(LOG.isDebugEnabled()) { + LOG.debug( + "registerDataChangeListener sending reply, listenerRegistrationPath = {} " + , listenerRegistration.path().toString()); + } getSender() .tell(new RegisterChangeListenerReply(listenerRegistration.path()), getSelf()); } - private void createTransactionChain() { - DOMStoreTransactionChain chain = store.createTransactionChain(); - ActorRef transactionChain = getContext().actorOf( - ShardTransactionChain.props(chain, schemaContext, datastoreContext, shardMBean)); - getSender().tell(new CreateTransactionChainReply(transactionChain.path()).toSerializable(), - getSelf()); - } - private boolean isMetricsCaptureEnabled(){ CommonConfig config = new CommonConfig(getContext().system().settings().config()); return config.isMetricCaptureEnabled(); @@ -467,7 +477,7 @@ public class Shard extends RaftActor { } } else { - LOG.error("Unknown state received {}", data); + LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader()); } // Update stats @@ -502,6 +512,8 @@ public class Shard extends RaftActor { // Since this will be done only on Recovery or when this actor is a Follower // we can safely commit everything in here. We not need to worry about event notifications // as they would have already been disabled on the follower + + LOG.info("Applying snapshot"); try { DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction(); NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot); @@ -516,6 +528,8 @@ public class Shard extends RaftActor { syncCommitTransaction(transaction); } catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) { LOG.error(e, "An exception occurred when applying snapshot"); + } finally { + LOG.info("Done applying snapshot"); } } @@ -525,17 +539,17 @@ public class Shard extends RaftActor { .tell(new EnableNotification(isLeader()), getSelf()); } - if (getLeaderId() != null) { - shardMBean.setLeader(getLeaderId()); - } - shardMBean.setRaftState(getRaftState().name()); shardMBean.setCurrentTerm(getCurrentTerm()); // If this actor is no longer the leader close all the transaction chains if(!isLeader()){ for(Map.Entry entry : transactionChains.entrySet()){ - LOG.debug("onStateChanged: Closing transaction chain {} because shard {} is no longer the leader", entry.getKey(), getId()); + if(LOG.isDebugEnabled()) { + LOG.debug( + "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader", + entry.getKey(), getId()); + } entry.getValue().close(); } @@ -543,6 +557,10 @@ public class Shard extends RaftActor { } } + @Override protected void onLeaderChanged(String oldLeader, String newLeader) { + shardMBean.setLeader(newLeader); + } + @Override public String persistenceId() { return this.name.toString(); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java index 13ecaa5619..a97c00f1d8 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java @@ -337,11 +337,11 @@ public class ShardManager extends AbstractUntypedActorWithMetering { peerAddress); if(peerAddresses.containsKey(peerId)){ peerAddresses.put(peerId, peerAddress); - - LOG.debug( - "Sending PeerAddressResolved for peer {} with address {} to {}", - peerId, peerAddress, actor.path()); - + if(LOG.isDebugEnabled()) { + LOG.debug( + "Sending PeerAddressResolved for peer {} with address {} to {}", + peerId, peerAddress, actor.path()); + } actor .tell(new PeerAddressResolved(peerId, peerAddress), getSelf()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java index b810ed9575..f5ca6e3c5a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java @@ -105,7 +105,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor { getSender().tell(new GetCompositeModificationReply( new ImmutableCompositeModification(modification)), getSelf()); } else if (message instanceof ReceiveTimeout) { - LOG.debug("Got ReceiveTimeout for inactivity - closing Tx"); + if(LOG.isDebugEnabled()) { + LOG.debug("Got ReceiveTimeout for inactivity - closing Tx"); + } closeTransaction(false); } else { throw new UnknownMessageException(message); @@ -163,8 +165,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor { protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) { modification.addModification( new WriteModification(message.getPath(), message.getData(),schemaContext)); - LOG.debug("writeData at path : " + message.getPath().toString()); - + if(LOG.isDebugEnabled()) { + LOG.debug("writeData at path : " + message.getPath().toString()); + } try { transaction.write(message.getPath(), message.getData()); getSender().tell(new WriteDataReply().toSerializable(), getSelf()); @@ -176,7 +179,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor { protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) { modification.addModification( new MergeModification(message.getPath(), message.getData(), schemaContext)); - LOG.debug("mergeData at path : " + message.getPath().toString()); + if(LOG.isDebugEnabled()) { + LOG.debug("mergeData at path : " + message.getPath().toString()); + } try { transaction.merge(message.getPath(), message.getData()); getSender().tell(new MergeDataReply().toSerializable(), getSelf()); @@ -186,7 +191,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor { } protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) { - LOG.debug("deleteData at path : " + message.getPath().toString()); + if(LOG.isDebugEnabled()) { + LOG.debug("deleteData at path : " + message.getPath().toString()); + } modification.addModification(new DeleteModification(message.getPath())); try { transaction.delete(message.getPath()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java index e6ac7f8dbc..0c3d33a78c 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java @@ -25,7 +25,9 @@ public class TerminationMonitor extends UntypedActor{ @Override public void onReceive(Object message) throws Exception { if(message instanceof Terminated){ Terminated terminated = (Terminated) message; - LOG.debug("Actor terminated : {}", terminated.actor()); + if(LOG.isDebugEnabled()) { + LOG.debug("Actor terminated : {}", terminated.actor()); + } } else if(message instanceof Monitor){ Monitor monitor = (Monitor) message; getContext().watch(monitor.getActorRef()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java index e3ae5dac7b..df85bb136a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java @@ -101,7 +101,9 @@ public class ThreePhaseCommitCohort extends AbstractUntypedActor { private void commit(CommitTransaction message) { // Forward the commit to the shard - log.debug("Forward commit transaction to Shard {} ", shardActor); + if(log.isDebugEnabled()) { + log.debug("Forward commit transaction to Shard {} ", shardActor); + } shardActor.forward(new ForwardedCommitTransaction(cohort, modification), getContext()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java index a5be69531d..a7a5b31b17 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java @@ -65,9 +65,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho @Override public Void apply(Iterable paths) { cohortPaths = Lists.newArrayList(paths); - - LOG.debug("Tx {} successfully built cohort path list: {}", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} successfully built cohort path list: {}", transactionId, cohortPaths); + } return null; } }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher()); @@ -75,8 +76,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho @Override public ListenableFuture canCommit() { - LOG.debug("Tx {} canCommit", transactionId); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} canCommit", transactionId); + } final SettableFuture returnFuture = SettableFuture.create(); // The first phase of canCommit is to gather the list of cohort actor paths that will @@ -89,7 +91,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho @Override public void onComplete(Throwable failure, Void notUsed) throws Throwable { if(failure != null) { - LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure); + } returnFuture.setException(failure); } else { finishCanCommit(returnFuture); @@ -101,9 +105,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho } private void finishCanCommit(final SettableFuture returnFuture) { - - LOG.debug("Tx {} finishCanCommit", transactionId); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} finishCanCommit", transactionId); + } // The last phase of canCommit is to invoke all the cohort actors asynchronously to perform // their canCommit processing. If any one fails then we'll fail canCommit. @@ -114,7 +118,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho @Override public void onComplete(Throwable failure, Iterable responses) throws Throwable { if(failure != null) { - LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure); + } returnFuture.setException(failure); return; } @@ -135,9 +141,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho return; } } - - LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result); + } returnFuture.set(Boolean.valueOf(result)); } }, actorContext.getActorSystem().dispatcher()); @@ -146,9 +152,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho private Future> invokeCohorts(Object message) { List> futureList = Lists.newArrayListWithCapacity(cohortPaths.size()); for(ActorPath actorPath : cohortPaths) { - - LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath); + } ActorSelection cohort = actorContext.actorSelection(actorPath); futureList.add(actorContext.executeRemoteOperationAsync(cohort, message)); @@ -184,8 +190,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho private ListenableFuture voidOperation(final String operationName, final Object message, final Class expectedResponseClass, final boolean propagateException) { - LOG.debug("Tx {} {}", transactionId, operationName); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} {}", transactionId, operationName); + } final SettableFuture returnFuture = SettableFuture.create(); // The cohort actor list should already be built at this point by the canCommit phase but, @@ -199,9 +206,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho @Override public void onComplete(Throwable failure, Void notUsed) throws Throwable { if(failure != null) { - LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId, + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId, operationName, failure); - + } if(propagateException) { returnFuture.setException(failure); } else { @@ -221,9 +229,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho private void finishVoidOperation(final String operationName, final Object message, final Class expectedResponseClass, final boolean propagateException, final SettableFuture returnFuture) { - - LOG.debug("Tx {} finish {}", transactionId, operationName); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} finish {}", transactionId, operationName); + } Future> combinedFuture = invokeCohorts(message); combinedFuture.onComplete(new OnComplete>() { @@ -243,9 +251,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho } if(exceptionToPropagate != null) { - LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId, + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId, operationName, exceptionToPropagate); - + } if(propagateException) { // We don't log the exception here to avoid redundant logging since we're // propagating to the caller in MD-SAL core who will log it. @@ -254,12 +263,16 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho // Since the caller doesn't want us to propagate the exception we'll also // not log it normally. But it's usually not good to totally silence // exceptions so we'll log it to debug level. - LOG.debug(String.format("%s failed", message.getClass().getSimpleName()), + if(LOG.isDebugEnabled()) { + LOG.debug(String.format("%s failed", message.getClass().getSimpleName()), exceptionToPropagate); + } returnFuture.set(null); } } else { - LOG.debug("Tx {}: {} succeeded", transactionId, operationName); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {}: {} succeeded", transactionId, operationName); + } returnFuture.set(null); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java index 97a9ff0bf3..6cf16b4426 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java @@ -224,8 +224,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { new TransactionProxyCleanupPhantomReference(this); phantomReferenceCache.put(cleanup, cleanup); } - - LOG.debug("Created txn {} of type {}", identifier, transactionType); + if(LOG.isDebugEnabled()) { + LOG.debug("Created txn {} of type {}", identifier, transactionType); + } } @Override @@ -235,8 +236,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY, "Read operation on write-only transaction is not allowed"); - LOG.debug("Tx {} read {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} read {}", identifier, path); + } createTransactionIfMissing(actorContext, path); return transactionContext(path).readData(path); @@ -248,8 +250,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY, "Exists operation on write-only transaction is not allowed"); - LOG.debug("Tx {} exists {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} exists {}", identifier, path); + } createTransactionIfMissing(actorContext, path); return transactionContext(path).dataExists(path); @@ -267,8 +270,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { checkModificationState(); - LOG.debug("Tx {} write {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} write {}", identifier, path); + } createTransactionIfMissing(actorContext, path); transactionContext(path).writeData(path, data); @@ -279,8 +283,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { checkModificationState(); - LOG.debug("Tx {} merge {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} merge {}", identifier, path); + } createTransactionIfMissing(actorContext, path); transactionContext(path).mergeData(path, data); @@ -290,9 +295,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { public void delete(YangInstanceIdentifier path) { checkModificationState(); - - LOG.debug("Tx {} delete {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} delete {}", identifier, path); + } createTransactionIfMissing(actorContext, path); transactionContext(path).deleteData(path); @@ -305,16 +310,18 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { inReadyState = true; - LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier, + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier, remoteTransactionPaths.size()); - + } List> cohortPathFutures = Lists.newArrayList(); for(TransactionContext transactionContext : remoteTransactionPaths.values()) { - LOG.debug("Tx {} Readying transaction for shard {}", identifier, + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} Readying transaction for shard {}", identifier, transactionContext.getShardName()); - + } cohortPathFutures.add(transactionContext.readyTransaction()); } @@ -381,8 +388,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { String transactionPath = reply.getTransactionPath(); - LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath); + } ActorSelection transactionActor = actorContext.actorSelection(transactionPath); if (transactionType == TransactionType.READ_ONLY) { @@ -404,7 +412,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { "Invalid reply type {} for CreateTransaction", response.getClass())); } } catch (Exception e) { - LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage()); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage()); + } remoteTransactionPaths .put(shardName, new NoOpTransactionContext(shardName, e, identifier)); } @@ -489,15 +499,18 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { @Override public void closeTransaction() { - LOG.debug("Tx {} closeTransaction called", identifier); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} closeTransaction called", identifier); + } actorContext.sendRemoteOperationAsync(getActor(), new CloseTransaction().toSerializable()); } @Override public Future readyTransaction() { - LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending", identifier, recordedOperationFutures.size()); - + } // Send the ReadyTransaction message to the Tx actor. final Future replyFuture = actorContext.executeRemoteOperationAsync(getActor(), @@ -522,10 +535,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { return combinedFutures.transform(new AbstractFunction1, ActorPath>() { @Override public ActorPath apply(Iterable notUsed) { - - LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded", identifier); - + } // At this point all the Futures succeeded and we need to extract the cohort // actor path from the ReadyTransactionReply. For the recorded operations, they // don't return any data so we're only interested that they completed @@ -543,9 +556,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { String resolvedCohortPath = getResolvedCohortPath( reply.getCohortPath().toString()); - LOG.debug("Tx {} readyTransaction: resolved cohort path {}", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readyTransaction: resolved cohort path {}", identifier, resolvedCohortPath); - + } return actorContext.actorFor(resolvedCohortPath); } else { // Throwing an exception here will fail the Future. @@ -559,21 +573,27 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { @Override public void deleteData(YangInstanceIdentifier path) { - LOG.debug("Tx {} deleteData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} deleteData called path = {}", identifier, path); + } recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(), new DeleteData(path).toSerializable() )); } @Override public void mergeData(YangInstanceIdentifier path, NormalizedNode data) { - LOG.debug("Tx {} mergeData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} mergeData called path = {}", identifier, path); + } recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(), new MergeData(path, data, schemaContext).toSerializable())); } @Override public void writeData(YangInstanceIdentifier path, NormalizedNode data) { - LOG.debug("Tx {} writeData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} writeData called path = {}", identifier, path); + } recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(), new WriteData(path, data, schemaContext).toSerializable())); } @@ -582,8 +602,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { public CheckedFuture>, ReadFailedException> readData( final YangInstanceIdentifier path) { - LOG.debug("Tx {} readData called path = {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readData called path = {}", identifier, path); + } final SettableFuture>> returnFuture = SettableFuture.create(); // If there were any previous recorded put/merge/delete operation reply Futures then we @@ -593,9 +614,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { if(recordedOperationFutures.isEmpty()) { finishReadData(path, returnFuture); } else { - LOG.debug("Tx {} readData: verifying {} previous recorded operations", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readData: verifying {} previous recorded operations", identifier, recordedOperationFutures.size()); - + } // Note: we make a copy of recordedOperationFutures to be on the safe side in case // Futures#sequence accesses the passed List on a different thread, as // recordedOperationFutures is not synchronized. @@ -608,9 +630,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { public void onComplete(Throwable failure, Iterable notUsed) throws Throwable { if(failure != null) { - LOG.debug("Tx {} readData: a recorded operation failed: {}", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readData: a recorded operation failed: {}", identifier, failure); - + } returnFuture.setException(new ReadFailedException( "The read could not be performed because a previous put, merge," + "or delete operation failed", failure)); @@ -629,20 +652,23 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { private void finishReadData(final YangInstanceIdentifier path, final SettableFuture>> returnFuture) { - LOG.debug("Tx {} finishReadData called path = {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} finishReadData called path = {}", identifier, path); + } OnComplete onComplete = new OnComplete() { @Override public void onComplete(Throwable failure, Object readResponse) throws Throwable { if(failure != null) { - LOG.debug("Tx {} read operation failed: {}", identifier, failure); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} read operation failed: {}", identifier, failure); + } returnFuture.setException(new ReadFailedException( "Error reading data for path " + path, failure)); } else { - LOG.debug("Tx {} read operation succeeded", identifier, failure); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} read operation succeeded", identifier, failure); + } if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) { ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext, path, readResponse); @@ -669,8 +695,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { public CheckedFuture dataExists( final YangInstanceIdentifier path) { - LOG.debug("Tx {} dataExists called path = {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} dataExists called path = {}", identifier, path); + } final SettableFuture returnFuture = SettableFuture.create(); // If there were any previous recorded put/merge/delete operation reply Futures then we @@ -681,9 +708,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { if(recordedOperationFutures.isEmpty()) { finishDataExists(path, returnFuture); } else { - LOG.debug("Tx {} dataExists: verifying {} previous recorded operations", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} dataExists: verifying {} previous recorded operations", identifier, recordedOperationFutures.size()); - + } // Note: we make a copy of recordedOperationFutures to be on the safe side in case // Futures#sequence accesses the passed List on a different thread, as // recordedOperationFutures is not synchronized. @@ -696,9 +724,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { public void onComplete(Throwable failure, Iterable notUsed) throws Throwable { if(failure != null) { - LOG.debug("Tx {} dataExists: a recorded operation failed: {}", + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} dataExists: a recorded operation failed: {}", identifier, failure); - + } returnFuture.setException(new ReadFailedException( "The data exists could not be performed because a previous " + "put, merge, or delete operation failed", failure)); @@ -717,19 +746,22 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { private void finishDataExists(final YangInstanceIdentifier path, final SettableFuture returnFuture) { - LOG.debug("Tx {} finishDataExists called path = {}", identifier, path); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} finishDataExists called path = {}", identifier, path); + } OnComplete onComplete = new OnComplete() { @Override public void onComplete(Throwable failure, Object response) throws Throwable { if(failure != null) { - LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure); + } returnFuture.setException(new ReadFailedException( "Error checking data exists for path " + path, failure)); } else { - LOG.debug("Tx {} dataExists operation succeeded", identifier, failure); - + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} dataExists operation succeeded", identifier, failure); + } if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) { returnFuture.set(Boolean.valueOf(DataExistsReply. fromSerializable(response).exists())); @@ -761,34 +793,46 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { @Override public void closeTransaction() { - LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier); + if(LOG.isDebugEnabled()) { + LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier); + } } @Override public Future readyTransaction() { - LOG.debug("Tx {} readyTransaction called", identifier); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readyTransaction called", identifier); + } return akka.dispatch.Futures.failed(failure); } @Override public void deleteData(YangInstanceIdentifier path) { - LOG.debug("Tx {} deleteData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} deleteData called path = {}", identifier, path); + } } @Override public void mergeData(YangInstanceIdentifier path, NormalizedNode data) { - LOG.debug("Tx {} mergeData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} mergeData called path = {}", identifier, path); + } } @Override public void writeData(YangInstanceIdentifier path, NormalizedNode data) { - LOG.debug("Tx {} writeData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} writeData called path = {}", identifier, path); + } } @Override public CheckedFuture>, ReadFailedException> readData( YangInstanceIdentifier path) { - LOG.debug("Tx {} readData called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} readData called path = {}", identifier, path); + } return Futures.immediateFailedCheckedFuture(new ReadFailedException( "Error reading data for path " + path, failure)); } @@ -796,7 +840,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction { @Override public CheckedFuture dataExists( YangInstanceIdentifier path) { - LOG.debug("Tx {} dataExists called path = {}", identifier, path); + if(LOG.isDebugEnabled()) { + LOG.debug("Tx {} dataExists called path = {}", identifier, path); + } return Futures.immediateFailedCheckedFuture(new ReadFailedException( "Error checking exists for path " + path, failure)); } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/AbstractBaseMBean.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/AbstractBaseMBean.java deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java index 0a1964b053..0959c2a959 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java @@ -74,16 +74,14 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean { } public void setDataStoreExecutor(ExecutorService dsExecutor) { - this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor, - "notification-executor", getMBeanType(), getMBeanCategory()); + this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor); } public void setNotificationManager(QueuedNotificationManager manager) { this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager, "notification-manager", getMBeanType(), getMBeanCategory()); - this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(), - "data-store-executor", getMBeanType(), getMBeanCategory()); + this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor()); } @Override @@ -230,7 +228,8 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean { @Override public ThreadExecutorStats getDataStoreExecutorStats() { - return dataStoreExecutorStatsBean.toThreadExecutorStats(); + return dataStoreExecutorStatsBean == null ? null : + dataStoreExecutorStatsBean.toThreadExecutorStats(); } @Override diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMBean.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMBean.java deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java index c989b275df..8ba333d279 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java @@ -13,8 +13,8 @@ import akka.actor.ActorRef; import akka.actor.ActorSelection; import akka.actor.ActorSystem; import akka.actor.PoisonPill; +import akka.pattern.Patterns; import akka.util.Timeout; - import org.opendaylight.controller.cluster.datastore.ClusterWrapper; import org.opendaylight.controller.cluster.datastore.Configuration; import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException; @@ -27,7 +27,6 @@ import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContex import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import scala.concurrent.Await; import scala.concurrent.Future; import scala.concurrent.duration.Duration; @@ -126,8 +125,9 @@ public class ActorContext { if (result instanceof LocalShardFound) { LocalShardFound found = (LocalShardFound) result; - LOG.debug("Local shard found {}", found.getPath()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Local shard found {}", found.getPath()); + } return found.getPath(); } @@ -142,8 +142,9 @@ public class ActorContext { if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) { PrimaryFound found = PrimaryFound.fromSerializable(result); - LOG.debug("Primary found {}", found.getPrimaryPath()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Primary found {}", found.getPrimaryPath()); + } return found.getPrimaryPath(); } throw new PrimaryNotFoundException("Could not find primary for shardName " + shardName); @@ -176,9 +177,10 @@ public class ActorContext { */ public Object executeRemoteOperation(ActorSelection actor, Object message) { - LOG.debug("Sending remote message {} to {}", message.getClass().toString(), - actor.toString()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Sending remote message {} to {}", message.getClass().toString(), + actor.toString()); + } Future future = ask(actor, message, operationTimeout); try { @@ -198,8 +200,9 @@ public class ActorContext { */ public Future executeRemoteOperationAsync(ActorSelection actor, Object message) { - LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString()); + } return ask(actor, message, operationTimeout); } @@ -266,6 +269,30 @@ public class ActorContext { } + /** + * Execute an operation on the the local shard only asynchronously + * + *

+ * This method first finds the address of the local shard if any. It then + * executes the operation on it. + *

+ * + * @param shardName the name of the shard on which the operation needs to be executed + * @param message the message that needs to be sent to the shard + * @param timeout the amount of time that this method should wait for a response before timing out + * @return null if the shard could not be located else a future on which the caller can wait + * + */ + public Future executeLocalShardOperationAsync(String shardName, Object message, Timeout timeout) { + ActorRef local = findLocalShard(shardName); + if(local == null){ + return null; + } + return Patterns.ask(local, message, timeout); + } + + + public void shutdown() { shardManager.tell(PoisonPill.getInstance(), null); actorSystem.shutdown(); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractActorTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractActorTest.java index 4c550a768c..022ef9bbaf 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractActorTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractActorTest.java @@ -25,12 +25,16 @@ public abstract class AbstractActorTest { System.setProperty("shard.persistent", "false"); system = ActorSystem.create("test"); + + deletePersistenceFiles(); } @AfterClass public static void tearDownClass() throws IOException { JavaTestKit.shutdownActorSystem(system); system = null; + + deletePersistenceFiles(); } protected static void deletePersistenceFiles() throws IOException { diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayloadTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayloadTest.java index be43911fe1..04d889fbe0 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayloadTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayloadTest.java @@ -7,9 +7,10 @@ import org.opendaylight.controller.cluster.datastore.modification.MutableComposi import org.opendaylight.controller.cluster.datastore.modification.WriteModification; import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.messages.AppendEntries; +import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload; import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; -import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages; import org.opendaylight.controller.md.cluster.datastore.model.TestModel; +import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages; import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; import java.io.File; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxyTest.java index 3d0aaa0082..ab3ff795d3 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxyTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxyTest.java @@ -17,6 +17,10 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import java.util.List; +import static junit.framework.TestCase.assertEquals; +import static junit.framework.TestCase.assertNotNull; +import static junit.framework.TestCase.assertTrue; + public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{ private ActorRef dataChangeListenerActor = getSystem().actorOf(Props.create(DoNothingActor.class)); @@ -64,14 +68,41 @@ public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{ Object messages = testContext .executeLocalOperation(actorRef, "messages"); - Assert.assertNotNull(messages); + assertNotNull(messages); - Assert.assertTrue(messages instanceof List); + assertTrue(messages instanceof List); List listMessages = (List) messages; - Assert.assertEquals(1, listMessages.size()); + assertEquals(1, listMessages.size()); + + assertTrue(listMessages.get(0).getClass() + .equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS)); + } + + @Test + public void testCloseWhenRegistrationIsNull() throws Exception { + final Props props = Props.create(MessageCollectorActor.class); + final ActorRef actorRef = getSystem().actorOf(props); + + DataChangeListenerRegistrationProxy proxy = + new DataChangeListenerRegistrationProxy( + new MockDataChangeListener(), dataChangeListenerActor); + + proxy.close(); + + //Check if it was received by the remote actor + ActorContext + testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration()); + Object messages = testContext + .executeLocalOperation(actorRef, "messages"); + + assertNotNull(messages); + + assertTrue(messages instanceof List); + + List listMessages = (List) messages; - Assert.assertTrue(listMessages.get(0).getClass().equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS)); + assertEquals(0, listMessages.size()); } } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java index aeb47de888..08c3ea9602 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java @@ -1,14 +1,20 @@ package org.opendaylight.controller.cluster.datastore; +import akka.actor.ActorPath; import akka.actor.ActorRef; +import akka.actor.ActorSelection; import akka.actor.ActorSystem; import akka.actor.Props; - +import akka.dispatch.ExecutionContexts; +import akka.dispatch.Futures; +import akka.util.Timeout; +import com.google.common.util.concurrent.MoreExecutors; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply; import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory; +import org.opendaylight.controller.cluster.datastore.utils.ActorContext; import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor; import org.opendaylight.controller.cluster.datastore.utils.MockActorContext; import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration; @@ -24,13 +30,23 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction; import org.opendaylight.yangtools.concepts.ListenerRegistration; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import scala.concurrent.ExecutionContextExecutor; +import scala.concurrent.Future; +import scala.concurrent.duration.FiniteDuration; + +import java.util.concurrent.TimeUnit; +import static junit.framework.TestCase.assertEquals; +import static junit.framework.TestCase.assertNull; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DistributedDataStoreTest extends AbstractActorTest{ @@ -95,20 +111,108 @@ public class DistributedDataStoreTest extends AbstractActorTest{ @Test public void testRegisterChangeListenerWhenShardIsLocal() throws Exception { + ActorContext actorContext = mock(ActorContext.class); + + distributedDataStore = new DistributedDataStore(actorContext); + distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext()); - mockActorContext.setExecuteLocalShardOperationResponse(new RegisterChangeListenerReply(doNothingActorRef.path())); + Future future = mock(Future.class); + when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS)); + when(actorContext.getActorSystem()).thenReturn(getSystem()); + when(actorContext + .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(future); ListenerRegistration registration = - distributedDataStore.registerChangeListener(TestModel.TEST_PATH, new AsyncDataChangeListener>() { - @Override - public void onDataChanged(AsyncDataChangeEvent> change) { - throw new UnsupportedOperationException("onDataChanged"); - } - }, AsyncDataBroker.DataChangeScope.BASE); + distributedDataStore.registerChangeListener(TestModel.TEST_PATH, + mock(AsyncDataChangeListener.class), + AsyncDataBroker.DataChangeScope.BASE); - assertTrue(registration instanceof DataChangeListenerRegistrationProxy); + assertNotNull(registration); + + assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass()); + } + + @Test + public void testRegisterChangeListenerWhenSuccessfulReplyReceived() throws Exception { + ActorContext actorContext = mock(ActorContext.class); + + distributedDataStore = new DistributedDataStore(actorContext); + distributedDataStore.onGlobalContextUpdated( + TestModel.createTestContext()); + + ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor()); + + // Make Future successful + Future f = Futures.successful(new RegisterChangeListenerReply(doNothingActorRef.path())); + + // Setup the mocks + ActorSystem actorSystem = mock(ActorSystem.class); + ActorSelection actorSelection = mock(ActorSelection.class); + + when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS)); + when(actorSystem.dispatcher()).thenReturn(executor); + when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef); + when(actorContext.getActorSystem()).thenReturn(actorSystem); + when(actorContext + .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f); + when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection); + + ListenerRegistration registration = + distributedDataStore.registerChangeListener(TestModel.TEST_PATH, + mock(AsyncDataChangeListener.class), + AsyncDataBroker.DataChangeScope.BASE); assertNotNull(registration); + + assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass()); + + ActorSelection listenerRegistrationActor = + ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor(); + + assertNotNull(listenerRegistrationActor); + + assertEquals(actorSelection, listenerRegistrationActor); + } + + @Test + public void testRegisterChangeListenerWhenSuccessfulReplyFailed() throws Exception { + ActorContext actorContext = mock(ActorContext.class); + + distributedDataStore = new DistributedDataStore(actorContext); + distributedDataStore.onGlobalContextUpdated( + TestModel.createTestContext()); + + ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor()); + + // Make Future fail + Future f = Futures.failed(new IllegalArgumentException()); + + // Setup the mocks + ActorSystem actorSystem = mock(ActorSystem.class); + ActorSelection actorSelection = mock(ActorSelection.class); + + when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS)); + when(actorSystem.dispatcher()).thenReturn(executor); + when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef); + when(actorContext.getActorSystem()).thenReturn(actorSystem); + when(actorContext + .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f); + when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection); + + ListenerRegistration registration = + distributedDataStore.registerChangeListener(TestModel.TEST_PATH, + mock(AsyncDataChangeListener.class), + AsyncDataBroker.DataChangeScope.BASE); + + assertNotNull(registration); + + assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass()); + + ActorSelection listenerRegistrationActor = + ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor(); + + assertNull(listenerRegistrationActor); + } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java index 06bcac8d78..deb71c2df4 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java @@ -343,11 +343,16 @@ public class ShardTest extends AbstractActorTest { subject.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef()); - waitForLogMessage(Logging.Debug.class, subject, "CaptureSnapshotReply received by actor"); + waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor"); + + subject.tell(new CaptureSnapshot(-1,-1,-1,-1), + getRef()); + + waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor"); + } }; - Thread.sleep(2000); deletePersistenceFiles(); }}; } diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java new file mode 100644 index 0000000000..0e492f0fbb --- /dev/null +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.cluster.datastore.utils; + +import akka.dispatch.Futures; +import akka.japi.Option; +import akka.persistence.SelectedSnapshot; +import akka.persistence.SnapshotMetadata; +import akka.persistence.SnapshotSelectionCriteria; +import akka.persistence.snapshot.japi.SnapshotStore; +import com.google.common.collect.Iterables; +import scala.concurrent.Future; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class InMemorySnapshotStore extends SnapshotStore { + + Map> snapshots = new HashMap<>(); + + @Override public Future> doLoadAsync(String s, + SnapshotSelectionCriteria snapshotSelectionCriteria) { + List snapshotList = snapshots.get(s); + if(snapshotList == null){ + return Futures.successful(Option.none()); + } + + Snapshot snapshot = Iterables.getLast(snapshotList); + SelectedSnapshot selectedSnapshot = + new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData()); + return Futures.successful(Option.some(selectedSnapshot)); + } + + @Override public Future doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) { + List snapshotList = snapshots.get(snapshotMetadata.persistenceId()); + + if(snapshotList == null){ + snapshotList = new ArrayList<>(); + snapshots.put(snapshotMetadata.persistenceId(), snapshotList); + } + snapshotList.add(new Snapshot(snapshotMetadata, o)); + + return Futures.successful(null); + } + + @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception { + } + + @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception { + List snapshotList = snapshots.get(snapshotMetadata.persistenceId()); + + if(snapshotList == null){ + return; + } + + int deleteIndex = -1; + + for(int i=0;i snapshotList = snapshots.get(s); + + if(snapshotList == null){ + return; + } + + // TODO : This is a quick and dirty implementation. Do actual match later. + snapshotList.clear(); + snapshots.remove(s); + } + + private static class Snapshot { + private final SnapshotMetadata metadata; + private final Object data; + + private Snapshot(SnapshotMetadata metadata, Object data) { + this.metadata = metadata; + this.data = data; + } + + public SnapshotMetadata getMetadata() { + return metadata; + } + + public Object getData() { + return data; + } + } +} diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Client.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Client.java index 2671be80bb..a2b78c6c15 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Client.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Client.java @@ -13,7 +13,7 @@ import akka.actor.ActorSystem; import akka.actor.Props; import akka.actor.UntypedActor; import com.typesafe.config.ConfigFactory; -import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload; +import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload; import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification; import org.opendaylight.controller.cluster.datastore.modification.WriteModification; import org.opendaylight.controller.cluster.example.messages.KeyValue; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Server.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Server.java index 0e6d535301..e6bdf5aac3 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Server.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Server.java @@ -12,7 +12,7 @@ import akka.actor.ActorSystem; import akka.actor.Props; import akka.actor.UntypedActor; import com.typesafe.config.ConfigFactory; -import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload; +import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload; import org.opendaylight.controller.cluster.example.messages.KeyValue; import org.opendaylight.controller.cluster.raft.messages.AppendEntries; import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf index 794b376af8..f0dadc618b 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf @@ -1,4 +1,6 @@ akka { + persistence.snapshot-store.plugin = "in-memory-snapshot-store" + loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"] actor { @@ -14,6 +16,14 @@ akka { } } } + +in-memory-snapshot-store { + # Class name of the plugin. + class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore" + # Dispatcher for the plugin actor. + plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher" +} + bounded-mailbox { mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" mailbox-capacity = 1000 diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/DomInmemoryDataBrokerModule.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/DomInmemoryDataBrokerModule.java index b423bbd0e5..ac62974d29 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/DomInmemoryDataBrokerModule.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/DomInmemoryDataBrokerModule.java @@ -7,10 +7,12 @@ */ package org.opendaylight.controller.config.yang.md.sal.dom.impl; +import java.util.EnumMap; +import java.util.Map; import java.util.concurrent.ExecutorService; - import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException; +import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean; import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl; import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl; import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl; @@ -18,7 +20,6 @@ import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFac import org.opendaylight.controller.sal.core.spi.data.DOMStore; import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService; import org.opendaylight.yangtools.util.concurrent.SpecialExecutors; -import com.google.common.collect.ImmutableMap; /** * @@ -59,9 +60,10 @@ public final class DomInmemoryDataBrokerModule extends //we will default to InMemoryDOMDataStore creation configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency()); } - ImmutableMap datastores = ImmutableMap - . builder().put(LogicalDatastoreType.OPERATIONAL, operStore) - .put(LogicalDatastoreType.CONFIGURATION, configStore).build(); + + final Map datastores = new EnumMap<>(LogicalDatastoreType.class); + datastores.put(LogicalDatastoreType.OPERATIONAL, operStore); + datastores.put(LogicalDatastoreType.CONFIGURATION, configStore); /* * We use a single-threaded executor for commits with a bounded queue capacity. If the @@ -88,29 +90,30 @@ public final class DomInmemoryDataBrokerModule extends DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores, new DeadlockDetectingListeningExecutorService(commitExecutor, - TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, + TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, listenableFutureExecutor)); final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl( newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE); commitStatsMXBean.registerMBean(); - final ThreadExecutorStatsMXBeanImpl commitExecutorStatsMXBean = - new ThreadExecutorStatsMXBeanImpl(commitExecutor, "CommitExecutorStats", + final AbstractMXBean commitExecutorStatsMXBean = + ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats", JMX_BEAN_TYPE, null); - commitExecutorStatsMXBean.registerMBean(); - - final ThreadExecutorStatsMXBeanImpl commitFutureStatsMXBean = - new ThreadExecutorStatsMXBeanImpl(listenableFutureExecutor, + final AbstractMXBean commitFutureStatsMXBean = + ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor, "CommitFutureExecutorStats", JMX_BEAN_TYPE, null); - commitFutureStatsMXBean.registerMBean(); newDataBroker.setCloseable(new AutoCloseable() { @Override public void close() { commitStatsMXBean.unregisterMBean(); - commitExecutorStatsMXBean.unregisterMBean(); - commitFutureStatsMXBean.unregisterMBean(); + if (commitExecutorStatsMXBean != null) { + commitExecutorStatsMXBean.unregisterMBean(); + } + if (commitFutureStatsMXBean != null) { + commitFutureStatsMXBean.unregisterMBean(); + } } }); diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java index d3791a0878..15d53f5310 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java @@ -6,14 +6,14 @@ */ package org.opendaylight.controller.md.sal.dom.broker.impl; +import com.google.common.base.Preconditions; +import java.util.Collection; +import java.util.Map; import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction; import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; - /** * Composite DOM Transaction backed by {@link DOMStoreTransaction}. * @@ -29,7 +29,7 @@ import com.google.common.collect.ImmutableMap; abstract class AbstractDOMForwardedCompositeTransaction implements AsyncTransaction> { - private final ImmutableMap backingTxs; + private final Map backingTxs; private final Object identifier; /** @@ -41,7 +41,7 @@ abstract class AbstractDOMForwardedCompositeTransaction backingTxs) { + protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map backingTxs) { this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null"); this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null"); } @@ -58,15 +58,17 @@ abstract class AbstractDOMForwardedCompositeTransaction getSubtransactions() { + protected Collection getSubtransactions() { return backingTxs.values(); } @@ -77,9 +79,8 @@ abstract class AbstractDOMForwardedCompositeTransaction * Type of {@link DOMStoreTransactionFactory} factory. */ -public abstract class AbstractDOMForwardedTransactionFactory implements DOMDataCommitImplementation, AutoCloseable { - - private final ImmutableMap storeTxFactories; - - private boolean closed; +abstract class AbstractDOMForwardedTransactionFactory implements DOMDataCommitImplementation, AutoCloseable { + @SuppressWarnings("rawtypes") + private static final AtomicIntegerFieldUpdater UPDATER = + AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed"); + private final Map storeTxFactories; + private volatile int closed = 0; protected AbstractDOMForwardedTransactionFactory(final Map txFactories) { - this.storeTxFactories = ImmutableMap.copyOf(txFactories); + this.storeTxFactories = new EnumMap<>(txFactories); } /** @@ -74,17 +72,16 @@ public abstract class AbstractDOMForwardedTransactionFactory builder = ImmutableMap.builder(); + + final Map txns = new EnumMap<>(LogicalDatastoreType.class); for (Entry store : storeTxFactories.entrySet()) { - builder.put(store.getKey(), store.getValue().newReadOnlyTransaction()); + txns.put(store.getKey(), store.getValue().newReadOnlyTransaction()); } - return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), builder.build()); + return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns); } - - /** * Creates a new composite write-only transaction * @@ -124,14 +121,14 @@ public abstract class AbstractDOMForwardedTransactionFactory builder = ImmutableMap.builder(); + + final Map txns = new EnumMap<>(LogicalDatastoreType.class); for (Entry store : storeTxFactories.entrySet()) { - builder.put(store.getKey(), store.getValue().newWriteOnlyTransaction()); + txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction()); } - return new DOMForwardedWriteTransaction(newTransactionIdentifier(), builder.build(), - this); + return new DOMForwardedWriteTransaction(newTransactionIdentifier(), txns, this); } /** @@ -177,15 +174,15 @@ public abstract class AbstractDOMForwardedTransactionFactory builder = ImmutableMap.builder(); + + final Map txns = new EnumMap<>(LogicalDatastoreType.class); for (Entry store : storeTxFactories.entrySet()) { - builder.put(store.getKey(), store.getValue().newReadWriteTransaction()); + txns.put(store.getKey(), store.getValue().newReadWriteTransaction()); } - return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), builder.build(), this); + return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this); } /** @@ -203,21 +200,19 @@ public abstract class AbstractDOMForwardedTransactionFactory implements DOMDataBroker, AutoCloseable { @@ -43,13 +41,13 @@ public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory datastores, + public DOMDataBrokerImpl(final Map datastores, final ListeningExecutorService executor) { super(datastores); this.coordinator = new DOMDataCommitCoordinatorImpl(executor); } - public void setCloseable(AutoCloseable closeable) { + public void setCloseable(final AutoCloseable closeable) { this.closeable = closeable; } @@ -86,13 +84,14 @@ public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory backingChainsBuilder = ImmutableMap - .builder(); + checkNotClosed(); + + final Map backingChains = new EnumMap<>(LogicalDatastoreType.class); for (Entry entry : getTxFactories().entrySet()) { - backingChainsBuilder.put(entry.getKey(), entry.getValue().createTransactionChain()); + backingChains.put(entry.getKey(), entry.getValue().createTransactionChain()); } - long chainId = chainNum.getAndIncrement(); - ImmutableMap backingChains = backingChainsBuilder.build(); + + final long chainId = chainNum.getAndIncrement(); LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener, backingChains); return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, coordinator, listener); diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java index 227693ca4d..7cd6afa466 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java @@ -6,10 +6,11 @@ */ package org.opendaylight.controller.md.sal.dom.broker.impl; +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.CheckedFuture; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; - -import javax.annotation.concurrent.GuardedBy; - import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; @@ -20,11 +21,6 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.CheckedFuture; - /** * NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which is backed * by several {@link DOMStoreTransactionChain} differentiated by provided @@ -35,12 +31,12 @@ public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTrans implements DOMTransactionChain, DOMDataCommitErrorListener { private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class); + private final AtomicLong txNum = new AtomicLong(); private final DOMDataCommitExecutor coordinator; private final TransactionChainListener listener; private final long chainId; - private final AtomicLong txNum = new AtomicLong(); - @GuardedBy("this") - private boolean failed = false; + + private volatile boolean failed = false; /** * @@ -58,7 +54,7 @@ public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTrans * If any of arguments is null. */ public DOMDataBrokerTransactionChainImpl(final long chainId, - final ImmutableMap chains, + final Map chains, final DOMDataCommitExecutor coordinator, final TransactionChainListener listener) { super(chains); this.chainId = chainId; @@ -72,26 +68,30 @@ public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTrans } @Override - public synchronized CheckedFuture submit( + public CheckedFuture submit( final DOMDataWriteTransaction transaction, final Iterable cohorts) { + checkNotClosed(); + return coordinator.submit(transaction, cohorts, Optional. of(this)); } @Override - public synchronized void close() { + public void close() { super.close(); + for (DOMStoreTransactionChain subChain : getTxFactories().values()) { subChain.close(); } if (!failed) { LOG.debug("Transaction chain {} successfully finished.", this); + // FIXME: this event should be emitted once all operations complete listener.onTransactionChainSuccessful(this); } } @Override - public synchronized void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) { + public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) { failed = true; LOG.debug("Transaction chain {} failed.", this, cause); listener.onTransactionChainFailed(this, tx, cause); diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataCommitCoordinatorImpl.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataCommitCoordinatorImpl.java index 3fde8d360f..77cf105ed6 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataCommitCoordinatorImpl.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataCommitCoordinatorImpl.java @@ -6,13 +6,18 @@ */ package org.opendaylight.controller.md.sal.dom.broker.impl; -import java.util.List; +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import com.google.common.base.Throwables; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.CheckedFuture; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.RejectedExecutionException; - -import javax.annotation.concurrent.GuardedBy; - +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction; import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort; @@ -21,17 +26,6 @@ import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Function; -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableList.Builder; -import com.google.common.util.concurrent.CheckedFuture; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; - /** * * Implementation of blocking three phase commit coordinator, which which @@ -49,28 +43,8 @@ import com.google.common.util.concurrent.ListeningExecutorService; public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { private static final Logger LOG = LoggerFactory.getLogger(DOMDataCommitCoordinatorImpl.class); - - /** - * Runs AND binary operation between all booleans in supplied iteration of booleans. - * - * This method will stop evaluating iterables if first found is false. - */ - private static final Function, Boolean> AND_FUNCTION = new Function, Boolean>() { - - @Override - public Boolean apply(final Iterable input) { - for(boolean value : input) { - if(!value) { - return Boolean.FALSE; - } - } - return Boolean.TRUE; - } - }; - - private final ListeningExecutorService executor; - private final DurationStatsTracker commitStatsTracker = new DurationStatsTracker(); + private final ListeningExecutorService executor; /** * @@ -153,19 +127,17 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { } /** - * * Implementation of blocking three-phase commit-coordination tasks without - * support of cancelation. - * + * support of cancellation. */ - private static class CommitCoordinationTask implements Callable { - + private static final class CommitCoordinationTask implements Callable { + private static final AtomicReferenceFieldUpdater PHASE_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase"); private final DOMDataWriteTransaction tx; private final Iterable cohorts; private final DurationStatsTracker commitStatTracker; - - @GuardedBy("this") - private CommitPhase currentPhase; + private final int cohortSize; + private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED; public CommitCoordinationTask(final DOMDataWriteTransaction transaction, final Iterable cohorts, @@ -173,25 +145,26 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { final DurationStatsTracker commitStatTracker) { this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null"); this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null"); - this.currentPhase = CommitPhase.SUBMITTED; this.commitStatTracker = commitStatTracker; + this.cohortSize = Iterables.size(cohorts); } @Override public Void call() throws TransactionCommitFailedException { + final long startTime = commitStatTracker != null ? System.nanoTime() : 0; - long startTime = System.nanoTime(); try { canCommitBlocking(); preCommitBlocking(); commitBlocking(); return null; } catch (TransactionCommitFailedException e) { - LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, e); - abortBlocking(e); + final CommitPhase phase = currentPhase; + LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e); + abortBlocking(e, phase); throw e; } finally { - if(commitStatTracker != null) { + if (commitStatTracker != null) { commitStatTracker.addDuration(System.nanoTime() - startTime); } } @@ -210,78 +183,63 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { * */ private void canCommitBlocking() throws TransactionCommitFailedException { - final Boolean canCommitResult = canCommitAll().checkedGet(); - if (!canCommitResult) { - throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available."); + for (ListenableFuture canCommit : canCommitAll()) { + try { + final Boolean result = (Boolean)canCommit.get(); + if (result == null || !result) { + throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available."); + } + } catch (InterruptedException | ExecutionException e) { + throw TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e); + } } } /** * - * Invokes preCommit on underlying cohorts and blocks till - * all results are returned. + * Invokes canCommit on underlying cohorts and returns composite future + * which will contains {@link Boolean#TRUE} only and only if + * all cohorts returned true. * - * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current - * state is not CAN_COMMIT - * throws IllegalStateException. + * Valid state transition is from SUBMITTED to CAN_COMMIT, + * if currentPhase is not SUBMITTED throws IllegalStateException. * - * @throws TransactionCommitFailedException - * If one of cohorts failed preCommit + * @return List of all cohorts futures from can commit phase. * */ - private void preCommitBlocking() throws TransactionCommitFailedException { - preCommitAll().checkedGet(); + private ListenableFuture[] canCommitAll() { + changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT); + + final ListenableFuture[] ops = new ListenableFuture[cohortSize]; + int i = 0; + for (DOMStoreThreePhaseCommitCohort cohort : cohorts) { + ops[i++] = cohort.canCommit(); + } + return ops; } /** * - * Invokes commit on underlying cohorts and blocks till + * Invokes preCommit on underlying cohorts and blocks till * all results are returned. * - * Valid state transition is from PRE_COMMIT to COMMIT, if not throws - * IllegalStateException. + * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current + * state is not CAN_COMMIT + * throws IllegalStateException. * * @throws TransactionCommitFailedException * If one of cohorts failed preCommit * */ - private void commitBlocking() throws TransactionCommitFailedException { - commitAll().checkedGet(); - } - - /** - * Aborts transaction. - * - * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all - * cohorts, blocks - * for all results. If any of the abort failed throws - * IllegalStateException, - * which will contains originalCause as suppressed Exception. - * - * If aborts we're successful throws supplied exception - * - * @param originalCause - * Exception which should be used to fail transaction for - * consumers of transaction - * future and listeners of transaction failure. - * @throws TransactionCommitFailedException - * on invocation of this method. - * originalCa - * @throws IllegalStateException - * if abort failed. - */ - private void abortBlocking(final TransactionCommitFailedException originalCause) - throws TransactionCommitFailedException { - LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, originalCause); - Exception cause = originalCause; + private void preCommitBlocking() throws TransactionCommitFailedException { + final ListenableFuture[] preCommitFutures = preCommitAll(); try { - abortAsyncAll().get(); + for(ListenableFuture future : preCommitFutures) { + future.get(); + } } catch (InterruptedException | ExecutionException e) { - LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e); - cause = new IllegalStateException("Abort failed.", e); - cause.addSuppressed(e); + throw TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER.apply(e); } - Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class); } /** @@ -295,27 +253,41 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { * state is not CAN_COMMIT * throws IllegalStateException. * - * @return Future which will complete once all cohorts completed - * preCommit. - * Future throws TransactionCommitFailedException - * If any of cohorts failed preCommit + * @return List of all cohorts futures from can commit phase. * */ - private CheckedFuture preCommitAll() { + private ListenableFuture[] preCommitAll() { changeStateFrom(CommitPhase.CAN_COMMIT, CommitPhase.PRE_COMMIT); - Builder> ops = ImmutableList.builder(); + + final ListenableFuture[] ops = new ListenableFuture[cohortSize]; + int i = 0; for (DOMStoreThreePhaseCommitCohort cohort : cohorts) { - ops.add(cohort.preCommit()); + ops[i++] = cohort.preCommit(); + } + return ops; + } + + /** + * + * Invokes commit on underlying cohorts and blocks till + * all results are returned. + * + * Valid state transition is from PRE_COMMIT to COMMIT, if not throws + * IllegalStateException. + * + * @throws TransactionCommitFailedException + * If one of cohorts failed preCommit + * + */ + private void commitBlocking() throws TransactionCommitFailedException { + final ListenableFuture[] commitFutures = commitAll(); + try { + for(ListenableFuture future : commitFutures) { + future.get(); + } + } catch (InterruptedException | ExecutionException e) { + throw TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e); } - /* - * We are returing all futures as list, not only succeeded ones in - * order to fail composite future if any of them failed. - * See Futures.allAsList for this description. - */ - @SuppressWarnings({ "unchecked", "rawtypes" }) - ListenableFuture compositeResult = (ListenableFuture) Futures.allAsList(ops.build()); - return MappingCheckedFuture.create(compositeResult, - TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER); } /** @@ -327,80 +299,80 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { * Valid state transition is from PRE_COMMIT to COMMIT, if not throws * IllegalStateException * - * @return Future which will complete once all cohorts completed - * commit. - * Future throws TransactionCommitFailedException - * If any of cohorts failed preCommit + * @return List of all cohorts futures from can commit phase. * */ - private CheckedFuture commitAll() { + private ListenableFuture[] commitAll() { changeStateFrom(CommitPhase.PRE_COMMIT, CommitPhase.COMMIT); - Builder> ops = ImmutableList.builder(); + + final ListenableFuture[] ops = new ListenableFuture[cohortSize]; + int i = 0; for (DOMStoreThreePhaseCommitCohort cohort : cohorts) { - ops.add(cohort.commit()); + ops[i++] = cohort.commit(); } - /* - * We are returing all futures as list, not only succeeded ones in - * order to fail composite future if any of them failed. - * See Futures.allAsList for this description. - */ - @SuppressWarnings({ "unchecked", "rawtypes" }) - ListenableFuture compositeResult = (ListenableFuture) Futures.allAsList(ops.build()); - return MappingCheckedFuture.create(compositeResult, - TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER); + return ops; } /** + * Aborts transaction. * - * Invokes canCommit on underlying cohorts and returns composite future - * which will contains {@link Boolean#TRUE} only and only if - * all cohorts returned true. - * - * Valid state transition is from SUBMITTED to CAN_COMMIT, - * if currentPhase is not SUBMITTED throws IllegalStateException. + * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all + * cohorts, blocks + * for all results. If any of the abort failed throws + * IllegalStateException, + * which will contains originalCause as suppressed Exception. * - * @return Future which will complete once all cohorts completed - * preCommit. - * Future throws TransactionCommitFailedException - * If any of cohorts failed preCommit + * If aborts we're successful throws supplied exception * + * @param originalCause + * Exception which should be used to fail transaction for + * consumers of transaction + * future and listeners of transaction failure. + * @param phase phase in which the problem ensued + * @throws TransactionCommitFailedException + * on invocation of this method. + * originalCa + * @throws IllegalStateException + * if abort failed. */ - private CheckedFuture canCommitAll() { - changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT); - Builder> canCommitOperations = ImmutableList.builder(); - for (DOMStoreThreePhaseCommitCohort cohort : cohorts) { - canCommitOperations.add(cohort.canCommit()); + private void abortBlocking(final TransactionCommitFailedException originalCause, final CommitPhase phase) + throws TransactionCommitFailedException { + LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, originalCause); + Exception cause = originalCause; + try { + abortAsyncAll(phase).get(); + } catch (InterruptedException | ExecutionException e) { + LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e); + cause = new IllegalStateException("Abort failed.", e); + cause.addSuppressed(e); } - ListenableFuture> allCanCommits = Futures.allAsList(canCommitOperations.build()); - ListenableFuture allSuccessFuture = Futures.transform(allCanCommits, AND_FUNCTION); - return MappingCheckedFuture.create(allSuccessFuture, - TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER); - + Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class); } /** - * * Invokes abort on underlying cohorts and returns future which - * completes - * once all abort on cohorts are completed. + * completes once all abort on cohorts are completed. * + * @param phase phase in which the problem ensued * @return Future which will complete once all cohorts completed * abort. - * */ - private ListenableFuture abortAsyncAll() { - changeStateFrom(currentPhase, CommitPhase.ABORT); - Builder> ops = ImmutableList.builder(); + private ListenableFuture abortAsyncAll(final CommitPhase phase) { + changeStateFrom(phase, CommitPhase.ABORT); + + final ListenableFuture[] ops = new ListenableFuture[cohortSize]; + int i = 0; for (DOMStoreThreePhaseCommitCohort cohort : cohorts) { - ops.add(cohort.abort()); + ops[i++] = cohort.abort(); } + /* - * We are returing all futures as list, not only succeeded ones in + * We are returning all futures as list, not only succeeded ones in * order to fail composite future if any of them failed. * See Futures.allAsList for this description. */ @SuppressWarnings({ "unchecked", "rawtypes" }) - ListenableFuture compositeResult = (ListenableFuture) Futures.allAsList(ops.build()); + ListenableFuture compositeResult = (ListenableFuture) Futures.allAsList(ops); return compositeResult; } @@ -423,14 +395,13 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor { * @throws IllegalStateException * If currentState of task does not match expected state */ - private synchronized void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) { - Preconditions.checkState(currentPhase.equals(currentExpected), - "Invalid state transition: Tx: %s current state: %s new state: %s", tx.getIdentifier(), - currentPhase, newState); - LOG.debug("Transaction {}: Phase {} Started ", tx.getIdentifier(), newState); - currentPhase = newState; - }; + private void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) { + final boolean success = PHASE_UPDATER.compareAndSet(this, currentExpected, newState); + Preconditions.checkState(success, "Invalid state transition: Tx: %s expected: %s current: %s target: %s", + tx.getIdentifier(), currentExpected, currentPhase, newState); + LOG.debug("Transaction {}: Phase {} Started", tx.getIdentifier(), newState); + }; } } diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java index 5e2a417d28..124bf9f0be 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java @@ -7,6 +7,9 @@ */ package org.opendaylight.controller.md.sal.dom.broker.impl; +import com.google.common.base.Optional; +import com.google.common.util.concurrent.CheckedFuture; +import java.util.Map; import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException; import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction; @@ -14,12 +17,7 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import com.google.common.base.Optional; -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.CheckedFuture; - /** - * * Read Only Transaction, which is composed of several * {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by * {@link LogicalDatastoreType} type parameter in @@ -30,7 +28,7 @@ class DOMForwardedReadOnlyTransaction extends DOMDataReadOnlyTransaction { protected DOMForwardedReadOnlyTransaction(final Object identifier, - final ImmutableMap backingTxs) { + final Map backingTxs) { super(identifier, backingTxs); } @@ -40,9 +38,10 @@ class DOMForwardedReadOnlyTransaction extends return getSubtransaction(store).read(path); } - @Override public CheckedFuture exists( - LogicalDatastoreType store, - YangInstanceIdentifier path) { + @Override + public CheckedFuture exists( + final LogicalDatastoreType store, + final YangInstanceIdentifier path) { return getSubtransaction(store).exists(path); } @@ -50,5 +49,4 @@ class DOMForwardedReadOnlyTransaction extends public void close() { closeSubtransactions(); } - } diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java index 67351ec945..662d48afdb 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java @@ -6,6 +6,9 @@ * and is available at http://www.eclipse.org/legal/epl-v10.html */package org.opendaylight.controller.md.sal.dom.broker.impl; +import com.google.common.base.Optional; +import com.google.common.util.concurrent.CheckedFuture; +import java.util.Map; import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException; import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction; @@ -13,10 +16,6 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransactio import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import com.google.common.base.Optional; -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.CheckedFuture; - /** * * Read-Write Transaction, which is composed of several @@ -35,12 +34,9 @@ import com.google.common.util.concurrent.CheckedFuture; * transactions. * */ - -class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction implements - DOMDataReadWriteTransaction { - +final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction implements DOMDataReadWriteTransaction { protected DOMForwardedReadWriteTransaction(final Object identifier, - final ImmutableMap backingTxs, + final Map backingTxs, final DOMDataCommitImplementation commitImpl) { super(identifier, backingTxs, commitImpl); } @@ -51,9 +47,10 @@ class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction exists( - LogicalDatastoreType store, - YangInstanceIdentifier path) { + @Override + public CheckedFuture exists( + final LogicalDatastoreType store, + final YangInstanceIdentifier path) { return getSubtransaction(store).exists(path); } } diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java index 5d4ad4d803..8c84af11ff 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java +++ b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java @@ -7,10 +7,15 @@ */ package org.opendaylight.controller.md.sal.dom.broker.impl; -import static com.google.common.base.Preconditions.checkState; - -import javax.annotation.concurrent.GuardedBy; - +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.CheckedFuture; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import org.opendaylight.controller.md.sal.common.api.TransactionStatus; import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; @@ -21,18 +26,12 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.common.RpcResult; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; - -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.CheckedFuture; -import com.google.common.util.concurrent.ListenableFuture; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * - * * Read-Write Transaction, which is composed of several - * {@link DOMStoreWriteTransaction} transactions. Subtransaction is selected by + * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by * {@link LogicalDatastoreType} type parameter in: * *
    @@ -46,114 +45,106 @@ import com.google.common.util.concurrent.ListenableFuture; * invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} for underlying * transactions. * - * @param - * Subtype of {@link DOMStoreWriteTransaction} which is used as + * @param Subtype of {@link DOMStoreWriteTransaction} which is used as * subtransaction. */ class DOMForwardedWriteTransaction extends AbstractDOMForwardedCompositeTransaction implements DOMDataWriteTransaction { + @SuppressWarnings("rawtypes") + private static final AtomicReferenceFieldUpdater IMPL_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, DOMDataCommitImplementation.class, "commitImpl"); + @SuppressWarnings("rawtypes") + private static final AtomicReferenceFieldUpdater FUTURE_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture"); + private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class); + private static final Future CANCELLED_FUTURE = Futures.immediateCancelledFuture(); /** - * Implementation of real commit. - * - * Transaction can not be commited if commitImpl is null, - * so this seting this property to null is also used to - * prevent write to - * already commited / canceled transaction {@link #checkNotCanceled() - * - * + * Implementation of real commit. It also acts as an indication that + * the transaction is running -- which we flip atomically using + * {@link #IMPL_UPDATER}. */ - @GuardedBy("this") private volatile DOMDataCommitImplementation commitImpl; /** + * Future task of transaction commit. It starts off as null, but is + * set appropriately on {@link #submit()} and {@link #cancel()} via + * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}. * - * Future task of transaction commit. - * - * This value is initially null, and is once updated if transaction - * is commited {@link #commit()}. - * If this future exists, transaction MUST not be commited again - * and all modifications should fail. See {@link #checkNotCommited()}. - * + * Lazy set is safe for use because it is only referenced to in the + * {@link #cancel()} slow path, where we will busy-wait for it. The + * fast path gets the benefit of a store-store barrier instead of the + * usual store-load barrier. */ - @GuardedBy("this") - private volatile CheckedFuture commitFuture; + private volatile Future commitFuture; protected DOMForwardedWriteTransaction(final Object identifier, - final ImmutableMap backingTxs, final DOMDataCommitImplementation commitImpl) { + final Map backingTxs, final DOMDataCommitImplementation commitImpl) { super(identifier, backingTxs); this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null."); } @Override public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode data) { - checkNotReady(); + checkRunning(commitImpl); getSubtransaction(store).write(path, data); } @Override public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) { - checkNotReady(); + checkRunning(commitImpl); getSubtransaction(store).delete(path); } @Override public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode data) { - checkNotReady(); + checkRunning(commitImpl); getSubtransaction(store).merge(path, data); } @Override - public synchronized boolean cancel() { - // Transaction is already canceled, we are safe to return true - final boolean cancelationResult; - if (commitImpl == null && commitFuture != null) { - // Transaction is submitted, we try to cancel future. - cancelationResult = commitFuture.cancel(false); - } else if(commitImpl == null) { + public boolean cancel() { + final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null); + if (impl != null) { + LOG.trace("Transaction {} cancelled before submit", getIdentifier()); + FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE); return true; - } else { - cancelationResult = true; - commitImpl = null; } - return cancelationResult; + // The transaction is in process of being submitted or cancelled. Busy-wait + // for the corresponding future. + Future future; + do { + future = commitFuture; + } while (future == null); + + return future.cancel(false); } @Override - public synchronized ListenableFuture> commit() { + public ListenableFuture> commit() { return AbstractDataTransaction.convertToLegacyCommitFuture(submit()); } @Override - public CheckedFuture submit() { - checkNotReady(); + public CheckedFuture submit() { + final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null); + checkRunning(impl); - ImmutableList.Builder cohortsBuilder = ImmutableList.builder(); - for (DOMStoreWriteTransaction subTx : getSubtransactions()) { - cohortsBuilder.add(subTx.ready()); - } - ImmutableList cohorts = cohortsBuilder.build(); - commitFuture = commitImpl.submit(this, cohorts); - - /* - *We remove reference to Commit Implementation in order - *to prevent memory leak - */ - commitImpl = null; - return commitFuture; - } + final Collection txns = getSubtransactions(); + final Collection cohorts = new ArrayList<>(txns.size()); - private void checkNotReady() { - checkNotCommited(); - checkNotCanceled(); - } + // FIXME: deal with errors thrown by backed (ready and submit can fail in theory) + for (DOMStoreWriteTransaction txn : txns) { + cohorts.add(txn.ready()); + } - private void checkNotCanceled() { - Preconditions.checkState(commitImpl != null, "Transaction was canceled."); + final CheckedFuture ret = impl.submit(this, cohorts); + FUTURE_UPDATER.lazySet(this, ret); + return ret; } - private void checkNotCommited() { - checkState(commitFuture == null, "Transaction was already submited."); + private void checkRunning(final DOMDataCommitImplementation impl) { + Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier()); } -} \ No newline at end of file +} diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java index e57d08f173..674d2ff44a 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java +++ b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java @@ -1,12 +1,19 @@ package org.opendaylight.controller.md.sal.dom.broker.impl; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION; import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL; - +import com.google.common.base.Optional; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.ForwardingExecutorService; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -15,7 +22,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -39,15 +45,6 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; import org.opendaylight.yangtools.yang.model.api.SchemaContext; -import com.google.common.base.Optional; -import com.google.common.collect.ImmutableMap; -import com.google.common.util.concurrent.ForwardingExecutorService; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; - public class DOMBrokerTest { private SchemaContext schemaContext; @@ -76,7 +73,7 @@ public class DOMBrokerTest { commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor()); futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB"); executor = new DeadlockDetectingListeningExecutorService(commitExecutor, - TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor); + TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor); domBroker = new DOMDataBrokerImpl(stores, executor); } @@ -215,19 +212,19 @@ public class DOMBrokerTest { TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() { @Override - public void onDataChanged( AsyncDataChangeEvent> change ) { + public void onDataChanged( final AsyncDataChangeEvent> change ) { DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction(); writeTx.put( OPERATIONAL, TestModel.TEST2_PATH, ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) ); Futures.addCallback( writeTx.submit(), new FutureCallback() { @Override - public void onSuccess( Void result ) { + public void onSuccess( final Void result ) { commitCompletedLatch.countDown(); } @Override - public void onFailure( Throwable t ) { + public void onFailure( final Throwable t ) { caughtCommitEx.set( t ); commitCompletedLatch.countDown(); } @@ -271,7 +268,7 @@ public class DOMBrokerTest { TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() { @Override - public void onDataChanged( AsyncDataChangeEvent> change ) { + public void onDataChanged( final AsyncDataChangeEvent> change ) { DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction(); writeTx.put( OPERATIONAL, TestModel.TEST2_PATH, ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) ); @@ -333,7 +330,7 @@ public class DOMBrokerTest { private final CountDownLatch latch = new CountDownLatch( 1 ); @Override - public void onDataChanged( AsyncDataChangeEvent> change ) { + public void onDataChanged( final AsyncDataChangeEvent> change ) { this.change = change; latch.countDown(); } @@ -347,7 +344,7 @@ public class DOMBrokerTest { ExecutorService delegate; - public CommitExecutorService( ExecutorService delegate ) { + public CommitExecutorService( final ExecutorService delegate ) { this.delegate = delegate; } diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImplTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImplTest.java index d796930302..613b7a60ab 100644 --- a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImplTest.java +++ b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImplTest.java @@ -9,6 +9,7 @@ package org.opendaylight.controller.md.sal.dom.broker.impl.jmx; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import org.junit.Test; import org.opendaylight.yangtools.util.DurationStatsTracker; @@ -29,13 +30,9 @@ public class CommitStatsMXBeanImplTest { commitStatsTracker.addDuration(100); - String prefix = "100.0 ns"; assertEquals("getTotalCommits", 1L, bean.getTotalCommits()); - assertEquals("getLongestCommitTime starts with \"" + prefix + "\"", true, - bean.getLongestCommitTime().startsWith("100.0 ns")); - assertEquals("getShortestCommitTime starts with \"" + prefix + "\"", true, - bean.getShortestCommitTime().startsWith(prefix)); - assertEquals("getAverageCommitTime starts with \"" + prefix + "\"", true, - bean.getAverageCommitTime().startsWith(prefix)); + assertNotNull(bean.getLongestCommitTime()); + assertNotNull(bean.getShortestCommitTime()); + assertNotNull(bean.getAverageCommitTime()); } } diff --git a/opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLAdapter.java b/opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLAdapter.java index 496f27ecaa..d1f11ba9a3 100644 --- a/opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLAdapter.java +++ b/opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLAdapter.java @@ -126,7 +126,7 @@ public class XSQLAdapter extends Thread implements SchemaContextListener { return this.bluePrint; } - public List collectModuleRoots(XSQLBluePrintNode table) { + public List collectModuleRoots(XSQLBluePrintNode table,LogicalDatastoreType type) { if (table.getParent().isModule()) { try { List result = new LinkedList(); @@ -136,8 +136,9 @@ public class XSQLAdapter extends Thread implements SchemaContextListener { .toInstance(); DOMDataReadTransaction t = this.domDataBroker .newReadOnlyTransaction(); - Object node = t.read(LogicalDatastoreType.OPERATIONAL, + Object node = t.read(type, instanceIdentifier).get(); + node = XSQLODLUtils.get(node, "reference"); if (node == null) { return result; @@ -157,14 +158,18 @@ public class XSQLAdapter extends Thread implements SchemaContextListener { XSQLAdapter.log(err); } } else { - return collectModuleRoots(table.getParent()); + return collectModuleRoots(table.getParent(),type); } return null; } public void execute(JDBCResultSet rs) { List tables = rs.getTables(); - List roots = collectModuleRoots(tables.get(0)); + List roots = collectModuleRoots(tables.get(0),LogicalDatastoreType.OPERATIONAL); + roots.addAll(collectModuleRoots(tables.get(0),LogicalDatastoreType.CONFIGURATION)); + if(roots.isEmpty()){ + rs.setFinished(true); + } XSQLBluePrintNode main = rs.getMainTable(); List tasks = new LinkedList(); @@ -487,13 +492,15 @@ public class XSQLAdapter extends Thread implements SchemaContextListener { out.print(prompt); char c = 0; byte data[] = new byte[1]; - while (c != '\n') { + while (!socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && c != '\n') { try { in.read(data); c = (char) data[0]; inputString.append(c); } catch (Exception err) { err.printStackTrace(out); + stopped = true; + break; } } diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java index 3e74861816..74fa73afb9 100644 --- a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java +++ b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java @@ -8,14 +8,12 @@ package org.opendaylight.controller.md.sal.dom.store.impl; import static com.google.common.base.Preconditions.checkState; - import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -62,8 +60,7 @@ import org.slf4j.LoggerFactory; * to implement {@link DOMStore} contract. * */ -public class InMemoryDOMDataStore implements DOMStore, Identifiable, SchemaContextListener, - TransactionReadyPrototype,AutoCloseable { +public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable, SchemaContextListener, AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class); private static final ListenableFuture SUCCESSFUL_FUTURE = Futures.immediateFuture(null); @@ -82,29 +79,26 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create(); private final ListenerTree listenerTree = ListenerTree.create(); private final AtomicLong txCounter = new AtomicLong(0); - private final ListeningExecutorService listeningExecutor; private final QueuedNotificationManager, DOMImmutableDataChangeEvent> dataChangeListenerNotificationManager; private final ExecutorService dataChangeListenerExecutor; - - private final ExecutorService domStoreExecutor; + private final ListeningExecutorService commitExecutor; private final boolean debugTransactions; private final String name; private volatile AutoCloseable closeable; - public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor, + public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor, final ExecutorService dataChangeListenerExecutor) { - this(name, domStoreExecutor, dataChangeListenerExecutor, + this(name, commitExecutor, dataChangeListenerExecutor, InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false); } - public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor, + public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor, final ExecutorService dataChangeListenerExecutor, final int maxDataChangeListenerQueueSize, final boolean debugTransactions) { this.name = Preconditions.checkNotNull(name); - this.domStoreExecutor = Preconditions.checkNotNull(domStoreExecutor); - this.listeningExecutor = MoreExecutors.listeningDecorator(this.domStoreExecutor); + this.commitExecutor = Preconditions.checkNotNull(commitExecutor); this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor); this.debugTransactions = debugTransactions; @@ -114,7 +108,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch "DataChangeListenerQueueMgr"); } - public void setCloseable(AutoCloseable closeable) { + public void setCloseable(final AutoCloseable closeable) { this.closeable = closeable; } @@ -123,7 +117,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch } public ExecutorService getDomStoreExecutor() { - return domStoreExecutor; + return commitExecutor; } @Override @@ -158,7 +152,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch @Override public void close() { - ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS); + ExecutorServiceUtil.tryGracefulShutdown(commitExecutor, 30, TimeUnit.SECONDS); ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS); if(closeable != null) { @@ -215,80 +209,95 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch } @Override - public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction writeTx) { - LOG.debug("Tx: {} is submitted. Modifications: {}", writeTx.getIdentifier(), writeTx.getMutatedView()); - return new ThreePhaseCommitImpl(writeTx); + protected void transactionAborted(final SnapshotBackedWriteTransaction tx) { + LOG.debug("Tx: {} is closed.", tx.getIdentifier()); + } + + @Override + protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) { + LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree); + return new ThreePhaseCommitImpl(tx, tree); } private Object nextIdentifier() { return name + "-" + txCounter.getAndIncrement(); } - private class DOMStoreTransactionChainImpl implements DOMStoreTransactionChain, TransactionReadyPrototype { - + private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain { + @GuardedBy("this") + private SnapshotBackedWriteTransaction allocatedTransaction; + @GuardedBy("this") + private DataTreeSnapshot readySnapshot; @GuardedBy("this") - private SnapshotBackedWriteTransaction latestOutstandingTx; - private boolean chainFailed = false; + @GuardedBy("this") private void checkFailed() { Preconditions.checkState(!chainFailed, "Transaction chain is failed."); } - @Override - public synchronized DOMStoreReadTransaction newReadOnlyTransaction() { - final DataTreeSnapshot snapshot; + @GuardedBy("this") + private DataTreeSnapshot getSnapshot() { checkFailed(); - if (latestOutstandingTx != null) { - checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready."); - snapshot = latestOutstandingTx.getMutatedView(); + + if (allocatedTransaction != null) { + Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier()); + return readySnapshot; } else { - snapshot = dataTree.takeSnapshot(); + return dataTree.takeSnapshot(); } + } + + @GuardedBy("this") + private T recordTransaction(final T transaction) { + allocatedTransaction = transaction; + readySnapshot = null; + return transaction; + } + + @Override + public synchronized DOMStoreReadTransaction newReadOnlyTransaction() { + final DataTreeSnapshot snapshot = getSnapshot(); return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot); } @Override public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() { - final DataTreeSnapshot snapshot; - checkFailed(); - if (latestOutstandingTx != null) { - checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready."); - snapshot = latestOutstandingTx.getMutatedView(); - } else { - snapshot = dataTree.takeSnapshot(); - } - final SnapshotBackedReadWriteTransaction ret = new SnapshotBackedReadWriteTransaction(nextIdentifier(), - getDebugTransactions(), snapshot, this); - latestOutstandingTx = ret; - return ret; + final DataTreeSnapshot snapshot = getSnapshot(); + return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(), + getDebugTransactions(), snapshot, this)); } @Override public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() { - final DataTreeSnapshot snapshot; - checkFailed(); - if (latestOutstandingTx != null) { - checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready."); - snapshot = latestOutstandingTx.getMutatedView(); - } else { - snapshot = dataTree.takeSnapshot(); + final DataTreeSnapshot snapshot = getSnapshot(); + return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(), + getDebugTransactions(), snapshot, this)); + } + + @Override + protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) { + if (tx.equals(allocatedTransaction)) { + Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot); + allocatedTransaction = null; } - final SnapshotBackedWriteTransaction ret = new SnapshotBackedWriteTransaction(nextIdentifier(), - getDebugTransactions(), snapshot, this); - latestOutstandingTx = ret; - return ret; } @Override - public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction tx) { - DOMStoreThreePhaseCommitCohort storeCohort = InMemoryDOMDataStore.this.ready(tx); - return new ChainedTransactionCommitImpl(tx, storeCohort, this); + protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) { + Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction); + if (readySnapshot != null) { + // The snapshot should have been cleared + LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree); + } + + final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree); + readySnapshot = tree; + return new ChainedTransactionCommitImpl(tx, cohort, this); } @Override public void close() { - // FIXME: this call doesn't look right here - listeningExecutor is shared and owned // by the outer class. //listeningExecutor.shutdownNow(); @@ -297,31 +306,30 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction, final Throwable t) { chainFailed = true; - } public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) { - // If committed transaction is latestOutstandingTx we clear - // latestOutstandingTx - // field in order to base new transactions on Datastore Data Tree - // directly. - if (transaction.equals(latestOutstandingTx)) { - latestOutstandingTx = null; + // If the committed transaction was the one we allocated last, + // we clear it and the ready snapshot, so the next transaction + // allocated refers to the data tree directly. + if (transaction.equals(allocatedTransaction)) { + if (readySnapshot == null) { + LOG.warn("Transaction {} committed while no ready snapshot present", transaction); + } + + allocatedTransaction = null; + readySnapshot = null; } } - } private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort { - private final SnapshotBackedWriteTransaction transaction; private final DOMStoreThreePhaseCommitCohort delegate; - private final DOMStoreTransactionChainImpl txChain; protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) { - super(); this.transaction = transaction; this.delegate = delegate; this.txChain = txChain; @@ -355,29 +363,26 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch public void onSuccess(final Void result) { txChain.onTransactionCommited(transaction); } - }); return commitFuture; } - } private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort { - private final SnapshotBackedWriteTransaction transaction; private final DataTreeModification modification; private ResolveDataChangeEventsTask listenerResolver; private DataTreeCandidate candidate; - public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction) { + public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) { this.transaction = writeTransaction; - this.modification = transaction.getMutatedView(); + this.modification = modification; } @Override public ListenableFuture canCommit() { - return listeningExecutor.submit(new Callable() { + return commitExecutor.submit(new Callable() { @Override public Boolean call() throws TransactionCommitFailedException { try { @@ -401,7 +406,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch @Override public ListenableFuture preCommit() { - return listeningExecutor.submit(new Callable() { + return commitExecutor.submit(new Callable() { @Override public Void call() { candidate = dataTree.prepare(modification); @@ -425,7 +430,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable, Sch * The commit has to occur atomically with regard to listener * registrations. */ - synchronized (this) { + synchronized (InMemoryDOMDataStore.this) { dataTree.commit(candidate); listenerResolver.resolve(dataChangeListenerNotificationManager); } diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java index dc1482c6ab..2ee8e182c2 100644 --- a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java +++ b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java @@ -7,6 +7,8 @@ */ package org.opendaylight.controller.md.sal.dom.store.impl; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; import java.util.concurrent.ExecutorService; import javax.annotation.Nullable; import org.opendaylight.controller.sal.core.api.model.SchemaService; @@ -57,7 +59,7 @@ public final class InMemoryDOMDataStoreFactory { @Nullable final InMemoryDOMDataStoreConfigProperties properties) { InMemoryDOMDataStoreConfigProperties actualProperties = properties; - if(actualProperties == null) { + if (actualProperties == null) { actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault(); } @@ -65,21 +67,18 @@ public final class InMemoryDOMDataStoreFactory { // task execution time to get higher throughput as DataChangeListeners typically provide // much of the business logic for a data model. If the executor queue size limit is reached, // subsequent submitted notifications will block the calling thread. - int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize(); int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize(); ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool( dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" ); - ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor( - actualProperties.getMaxDataStoreExecutorQueueSize(), "DOMStore-" + name ); - - InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name, - domStoreExecutor, dataChangeListenerExecutor, + final ListeningExecutorService commitExecutor = MoreExecutors.sameThreadExecutor(); + final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name, + commitExecutor, dataChangeListenerExecutor, actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions); - if(schemaService != null) { + if (schemaService != null) { schemaService.registerSchemaContextListener(dataStore); } diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadWriteTransaction.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadWriteTransaction.java index 2ae7425bbb..30fa6da58b 100644 --- a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadWriteTransaction.java +++ b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadWriteTransaction.java @@ -8,16 +8,13 @@ package org.opendaylight.controller.md.sal.dom.store.impl; import static com.google.common.base.Preconditions.checkNotNull; - import com.google.common.base.Optional; import com.google.common.util.concurrent.CheckedFuture; import com.google.common.util.concurrent.Futures; - import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException; import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; -import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -27,9 +24,7 @@ import org.slf4j.LoggerFactory; * and executed according to {@link TransactionReadyPrototype}. * */ -class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction - implements DOMStoreReadWriteTransaction { - +final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction { private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class); /** @@ -49,16 +44,18 @@ class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction LOG.debug("Tx: {} Read: {}", getIdentifier(), path); checkNotNull(path, "Path must not be null."); - DataTreeModification dataView = getMutatedView(); - if(dataView == null) { - return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed")); - } - + final Optional> result; try { - return Futures.immediateCheckedFuture(dataView.readNode(path)); + result = readSnapshotNode(path); } catch (Exception e) { LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e); - return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e)); + return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e)); + } + + if (result == null) { + return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed")); + } else { + return Futures.immediateCheckedFuture(result); } } diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedWriteTransaction.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedWriteTransaction.java index 6129df7478..60a23403b3 100644 --- a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedWriteTransaction.java +++ b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedWriteTransaction.java @@ -8,11 +8,11 @@ package org.opendaylight.controller.md.sal.dom.store.impl; import static com.google.common.base.Preconditions.checkState; - import com.google.common.base.Objects.ToStringHelper; +import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; - +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort; import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; @@ -29,11 +29,16 @@ import org.slf4j.LoggerFactory; * */ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction { - private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class); - private DataTreeModification mutableTree; - private boolean ready = false; - private TransactionReadyPrototype readyImpl; + private static final AtomicReferenceFieldUpdater READY_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl"); + private static final AtomicReferenceFieldUpdater TREE_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree"); + + // non-null when not ready + private volatile TransactionReadyPrototype readyImpl; + // non-null when not committed/closed + private volatile DataTreeModification mutableTree; /** * Creates new write-only transaction. @@ -48,27 +53,23 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug, final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) { super(identifier, debug); - mutableTree = snapshot.newModification(); this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null."); + mutableTree = snapshot.newModification(); LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot); } - @Override - public void close() { - LOG.debug("Store transaction: {} : Closed", getIdentifier()); - this.mutableTree = null; - this.readyImpl = null; - } - @Override public void write(final YangInstanceIdentifier path, final NormalizedNode data) { checkNotReady(); + + final DataTreeModification tree = mutableTree; + LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data); + try { - LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data); - mutableTree.write(path, data); + tree.write(path, data); // FIXME: Add checked exception } catch (Exception e) { - LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e); + LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e); // Rethrow original ones if they are subclasses of RuntimeException // or Error Throwables.propagateIfPossible(e); @@ -80,12 +81,15 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme @Override public void merge(final YangInstanceIdentifier path, final NormalizedNode data) { checkNotReady(); + + final DataTreeModification tree = mutableTree; + LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data); + try { - LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data); - mutableTree.merge(path, data); + tree.merge(path, data); // FIXME: Add checked exception } catch (Exception e) { - LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e); + LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e); // Rethrow original ones if they are subclasses of RuntimeException // or Error Throwables.propagateIfPossible(e); @@ -97,12 +101,15 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme @Override public void delete(final YangInstanceIdentifier path) { checkNotReady(); + + final DataTreeModification tree = mutableTree; + LOG.debug("Tx: {} Delete: {}", getIdentifier(), path); + try { - LOG.debug("Tx: {} Delete: {}", getIdentifier(), path); - mutableTree.delete(path); + tree.delete(path); // FIXME: Add checked exception } catch (Exception e) { - LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, mutableTree, e); + LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e); // Rethrow original ones if they are subclasses of RuntimeException // or Error Throwables.propagateIfPossible(e); @@ -111,30 +118,49 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme } } - protected final boolean isReady() { - return ready; + /** + * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does + * not allow data access after the transaction has been closed or readied. + * + * @param path Path to read + * @return null if the the transaction has been closed; + */ + protected final Optional> readSnapshotNode(final YangInstanceIdentifier path) { + return readyImpl == null ? null : mutableTree.readNode(path); } - protected final void checkNotReady() { - checkState(!ready, "Transaction %s is ready. No further modifications allowed.", getIdentifier()); + private final void checkNotReady() { + checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.", getIdentifier()); } @Override - public synchronized DOMStoreThreePhaseCommitCohort ready() { - checkState(!ready, "Transaction %s is already ready.", getIdentifier()); - ready = true; + public DOMStoreThreePhaseCommitCohort ready() { + final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null); + checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier()); + LOG.debug("Store transaction: {} : Ready", getIdentifier()); - mutableTree.ready(); - return readyImpl.ready(this); + + final DataTreeModification tree = mutableTree; + TREE_UPDATER.lazySet(this, null); + tree.ready(); + return wasReady.transactionReady(this, tree); } - protected DataTreeModification getMutatedView() { - return mutableTree; + @Override + public void close() { + final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null); + if (wasReady != null) { + LOG.debug("Store transaction: {} : Closed", getIdentifier()); + TREE_UPDATER.lazySet(this, null); + wasReady.transactionAborted(this); + } else { + LOG.debug("Store transaction: {} : Closed after submit", getIdentifier()); + } } @Override protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) { - return toStringHelper.add("ready", isReady()); + return toStringHelper.add("ready", readyImpl == null); } /** @@ -146,7 +172,14 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme * providing underlying logic for applying implementation. * */ - public static interface TransactionReadyPrototype { + abstract static class TransactionReadyPrototype { + /** + * Called when a transaction is closed without being readied. This is not invoked for + * transactions which are ready. + * + * @param tx Transaction which got aborted. + */ + protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx); /** * Returns a commit coordinator associated with supplied transactions. @@ -155,8 +188,10 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme * * @param tx * Transaction on which ready was invoked. + * @param tree + * Modified data tree which has been constructed. * @return DOMStoreThreePhaseCommitCohort associated with transaction */ - DOMStoreThreePhaseCommitCohort ready(SnapshotBackedWriteTransaction tx); + protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree); } } \ No newline at end of file diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java index b3608eceef..e00be2446a 100644 --- a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java +++ b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java @@ -9,7 +9,7 @@ package org.opendaylight.controller.md.sal.dom.store.impl.jmx; import java.util.concurrent.ExecutorService; - +import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean; import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl; import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl; import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager; @@ -21,24 +21,28 @@ import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager; */ public class InMemoryDataStoreStats implements AutoCloseable { - private final ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean; - private final ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean; + private final AbstractMXBean notificationExecutorStatsBean; + private final AbstractMXBean dataStoreExecutorStatsBean; private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean; - public InMemoryDataStoreStats(String mBeanType, QueuedNotificationManager manager, - ExecutorService dataStoreExecutor) { + public InMemoryDataStoreStats(final String mBeanType, final QueuedNotificationManager manager, + final ExecutorService dataStoreExecutor) { - this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager, + notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager, "notification-manager", mBeanType, null); notificationManagerStatsBean.registerMBean(); - this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(), + notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(), "notification-executor", mBeanType, null); - this.notificationExecutorStatsBean.registerMBean(); + if (notificationExecutorStatsBean != null) { + notificationExecutorStatsBean.registerMBean(); + } - this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dataStoreExecutor, + dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dataStoreExecutor, "data-store-executor", mBeanType, null); - this.dataStoreExecutorStatsBean.registerMBean(); + if (dataStoreExecutorStatsBean != null) { + dataStoreExecutorStatsBean.registerMBean(); + } } @Override diff --git a/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java b/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java index 0999efff0f..2642116927 100644 --- a/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java +++ b/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java @@ -119,6 +119,10 @@ public final class NetconfSessionCapabilities { return fromStrings(session.getServerCapabilities()); } + private static final QName cachedQName(String namespace, String revision, String moduleName) { + return QName.cachedReference(QName.create(namespace, revision, moduleName)); + } + public static NetconfSessionCapabilities fromStrings(final Collection capabilities) { final Set moduleBasedCaps = new HashSet<>(); final Set nonModuleCaps = Sets.newHashSet(capabilities); @@ -138,7 +142,7 @@ public final class NetconfSessionCapabilities { String revision = REVISION_PARAM.from(queryParams); if (revision != null) { - moduleBasedCaps.add(QName.create(namespace, revision, moduleName)); + moduleBasedCaps.add(cachedQName(namespace, revision, moduleName)); nonModuleCaps.remove(capability); continue; } @@ -158,7 +162,7 @@ public final class NetconfSessionCapabilities { } // FIXME: do we really want to continue here? - moduleBasedCaps.add(QName.cachedReference(QName.create(namespace, revision, moduleName))); + moduleBasedCaps.add(cachedQName(namespace, revision, moduleName)); nonModuleCaps.remove(capability); } diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java index 2e355d4f51..c82a72eaa5 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java @@ -38,8 +38,9 @@ public class RemoteRpcProviderFactory { Thread.currentThread().getContextClassLoader()); Config actorSystemConfig = config.get(); - LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render()); + } if (config.isMetricCaptureEnabled()) { LOG.info("Instrumentation is enabled in actor system {}. Metrics can be viewed in JMX console.", config.getActorSystemName()); diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RoutedRpcListener.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RoutedRpcListener.java index 98cf6a329f..2aaac5a78e 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RoutedRpcListener.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RoutedRpcListener.java @@ -53,7 +53,9 @@ public class RoutedRpcListener implements RouteChangeListener> announcements) { - LOG.debug("Announcing [{}]", announcements); + if(LOG.isDebugEnabled()) { + LOG.debug("Announcing [{}]", announcements); + } RpcRegistry.Messages.AddOrUpdateRoutes addRpcMsg = new RpcRegistry.Messages.AddOrUpdateRoutes(new ArrayList<>(announcements)); rpcRegistry.tell(addRpcMsg, ActorRef.noSender()); } @@ -63,7 +65,9 @@ public class RoutedRpcListener implements RouteChangeListener> removals){ - LOG.debug("Removing [{}]", removals); + if(LOG.isDebugEnabled()) { + LOG.debug("Removing [{}]", removals); + } RpcRegistry.Messages.RemoveRoutes removeRpcMsg = new RpcRegistry.Messages.RemoveRoutes(new ArrayList<>(removals)); rpcRegistry.tell(removeRpcMsg, ActorRef.noSender()); } diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcBroker.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcBroker.java index 6b02235dc7..2046e419d9 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcBroker.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcBroker.java @@ -79,8 +79,9 @@ public class RpcBroker extends AbstractUntypedActor { } private void invokeRemoteRpc(final InvokeRpc msg) { - LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc()); + } RpcRouter.RouteIdentifier routeId = new RouteIdentifierImpl( null, msg.getRpc(), msg.getIdentifier()); RpcRegistry.Messages.FindRouters findMsg = new RpcRegistry.Messages.FindRouters(routeId); @@ -147,8 +148,9 @@ public class RpcBroker extends AbstractUntypedActor { } private void executeRpc(final ExecuteRpc msg) { - LOG.debug("Executing rpc {}", msg.getRpc()); - + if(LOG.isDebugEnabled()) { + LOG.debug("Executing rpc {}", msg.getRpc()); + } Future> future = brokerSession.rpc(msg.getRpc(), XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(), schemaContext)); diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java index dee98521ae..22879dda2f 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java @@ -31,7 +31,9 @@ public class RpcListener implements RpcRegistrationListener{ @Override public void onRpcImplementationAdded(QName rpc) { - LOG.debug("Adding registration for [{}]", rpc); + if(LOG.isDebugEnabled()) { + LOG.debug("Adding registration for [{}]", rpc); + } RpcRouter.RouteIdentifier routeId = new RouteIdentifierImpl(null, rpc, null); List> routeIds = new ArrayList<>(); routeIds.add(routeId); @@ -41,7 +43,9 @@ public class RpcListener implements RpcRegistrationListener{ @Override public void onRpcImplementationRemoved(QName rpc) { - LOG.debug("Removing registration for [{}]", rpc); + if(LOG.isDebugEnabled()) { + LOG.debug("Removing registration for [{}]", rpc); + } RpcRouter.RouteIdentifier routeId = new RouteIdentifierImpl(null, rpc, null); List> routeIds = new ArrayList<>(); routeIds.add(routeId); diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java index abe2008c29..48ccd824d4 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java @@ -25,7 +25,9 @@ public class TerminationMonitor extends UntypedActor{ @Override public void onReceive(Object message) throws Exception { if(message instanceof Terminated){ Terminated terminated = (Terminated) message; - LOG.debug("Actor terminated : {}", terminated.actor()); + if(LOG.isDebugEnabled()) { + LOG.debug("Actor terminated : {}", terminated.actor()); + } }else if(message instanceof Monitor){ Monitor monitor = (Monitor) message; getContext().watch(monitor.getActorRef()); diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java index 3de3fc00d0..b50dfb1ba3 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java @@ -111,7 +111,9 @@ public class BucketStore extends AbstractUntypedActorWithMetering { receiveUpdateRemoteBuckets( ((UpdateRemoteBuckets) message).getBuckets()); } else { - log.debug("Unhandled message [{}]", message); + if(log.isDebugEnabled()) { + log.debug("Unhandled message [{}]", message); + } unhandled(message); } } @@ -236,8 +238,9 @@ public class BucketStore extends AbstractUntypedActorWithMetering { versions.put(entry.getKey(), remoteVersion); } } - - log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets); + if(log.isDebugEnabled()) { + log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets); + } } /// diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java index 85c6ebe26f..1bbcc69f5e 100644 --- a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java +++ b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java @@ -170,7 +170,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering { } clusterMembers.remove(member.address()); - log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers); + if(log.isDebugEnabled()) { + log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers); + } } /** @@ -184,8 +186,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering { if (!clusterMembers.contains(member.address())) clusterMembers.add(member.address()); - - log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers); + if(log.isDebugEnabled()) { + log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers); + } } /** @@ -205,8 +208,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering { Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size()); remoteMemberToGossipTo = clusterMembers.get(randomIndex); } - - log.debug("Gossiping to [{}]", remoteMemberToGossipTo); + if(log.isDebugEnabled()) { + log.debug("Gossiping to [{}]", remoteMemberToGossipTo); + } getLocalStatusAndSendTo(remoteMemberToGossipTo); } @@ -244,7 +248,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering { void receiveGossip(GossipEnvelope envelope){ //TODO: Add more validations if (!selfAddress.equals(envelope.to())) { - log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to()); + if(log.isDebugEnabled()) { + log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to()); + } return; } @@ -291,7 +297,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering { ActorSelection remoteRef = getContext().system().actorSelection( remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress()); - log.debug("Sending bucket versions to [{}]", remoteRef); + if(log.isDebugEnabled()) { + log.debug("Sending bucket versions to [{}]", remoteRef); + } futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher()); @@ -416,7 +424,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering { public Void apply(Object msg) { if (msg instanceof GetBucketsByMembersReply) { Map buckets = ((GetBucketsByMembersReply) msg).getBuckets(); - log.debug("Buckets to send from {}: {}", selfAddress, buckets); + if(log.isDebugEnabled()) { + log.debug("Buckets to send from {}: {}", selfAddress, buckets); + } GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets); sender.tell(envelope, getSelf()); } diff --git a/opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java b/opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java index 5d8c910afc..a95a64b2c2 100644 --- a/opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java +++ b/opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java @@ -17,6 +17,7 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import java.math.BigInteger; import java.net.URI; +import java.net.URISyntaxException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -137,13 +138,24 @@ public class RestconfImpl implements RestconfService { private static final String SCOPE_PARAM_NAME = "scope"; + private static final String NETCONF_BASE = "urn:ietf:params:xml:ns:netconf:base:1.0"; + + private static final String NETCONF_BASE_PAYLOAD_NAME = "data"; + + private static final QName NETCONF_BASE_QNAME; + static { try { EVENT_SUBSCRIPTION_AUGMENT_REVISION = new SimpleDateFormat("yyyy-MM-dd").parse("2014-07-08"); + NETCONF_BASE_QNAME = QName.create(QNameModule.create(new URI(NETCONF_BASE), null), NETCONF_BASE_PAYLOAD_NAME ); } catch (ParseException e) { throw new RestconfDocumentedException( "It wasn't possible to convert revision date of sal-remote-augment to date", ErrorType.APPLICATION, ErrorTag.OPERATION_FAILED); + } catch (URISyntaxException e) { + throw new RestconfDocumentedException( + "It wasn't possible to create instance of URI class with "+NETCONF_BASE+" URI", ErrorType.APPLICATION, + ErrorTag.OPERATION_FAILED); } } @@ -705,11 +717,13 @@ public class RestconfImpl implements RestconfService { validateInput(iiWithData.getSchemaNode(), payload); DOMMountPoint mountPoint = iiWithData.getMountPoint(); + validateTopLevelNodeName(payload, iiWithData.getInstanceIdentifier()); final CompositeNode value = this.normalizeNode(payload, iiWithData.getSchemaNode(), mountPoint); validateListKeysEqualityInPayloadAndUri(iiWithData, value); final NormalizedNode datastoreNormalizedNode = compositeNodeToDatastoreNormalizedNode(value, iiWithData.getSchemaNode()); + YangInstanceIdentifier normalizedII; if (mountPoint != null) { normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized( @@ -760,6 +774,29 @@ public class RestconfImpl implements RestconfService { return Response.status(Status.OK).build(); } + private void validateTopLevelNodeName(final Node node, + final YangInstanceIdentifier identifier) { + final String payloadName = getName(node); + final Iterator pathArguments = identifier.getReversePathArguments().iterator(); + + //no arguments + if (!pathArguments.hasNext()) { + //no "data" payload + if (!node.getNodeType().equals(NETCONF_BASE_QNAME)) { + throw new RestconfDocumentedException("Instance identifier has to contain at least one path argument", + ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE); + } + //any arguments + } else { + final String identifierName = pathArguments.next().getNodeType().getLocalName(); + if (!payloadName.equals(identifierName)) { + throw new RestconfDocumentedException("Payload name (" + payloadName + + ") is different from identifier name (" + identifierName + ")", ErrorType.PROTOCOL, + ErrorTag.MALFORMED_MESSAGE); + } + } + } + /** * Validates whether keys in {@code payload} are equal to values of keys in {@code iiWithData} for list schema node * diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml new file mode 100644 index 0000000000..8d4bbbd64c --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml @@ -0,0 +1,55 @@ + + + + + 4.0.0 + + clustering-it + org.opendaylight.controller.samples + 1.1-SNAPSHOT + + clustering-it-config + jar + + + + org.codehaus.mojo + build-helper-maven-plugin + + + attach-artifacts + + attach-artifact + + package + + + + ${project.build.directory}/classes/initial/20-clustering-test-app.xml + xml + config + + + ${project.build.directory}/classes/initial/module-shards.conf + xml + testmoduleshardconf + + + ${project.build.directory}/classes/initial/modules.conf + xml + testmoduleconf + + + + + + + + + diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/20-clustering-test-app.xml b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/20-clustering-test-app.xml new file mode 100644 index 0000000000..f01970908c --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/20-clustering-test-app.xml @@ -0,0 +1,47 @@ + + + + + + + + + + prefix:clustering-it-provider + + clustering-it-provider + + + binding:binding-rpc-registry + binding-rpc-broker + + + binding:binding-async-data-broker + binding-data-broker + + + + binding:binding-notification-service + + binding-notification-broker + + + + + + + + + urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28 + urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider?module=clustering-it-provider&revision=2014-08-19 + + + + + diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/module-shards.conf b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/module-shards.conf new file mode 100644 index 0000000000..59b0be1bce --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/module-shards.conf @@ -0,0 +1,117 @@ +# This file describes which shards live on which members +# The format for a module-shards is as follows, +# { +# name = "" +# shards = [ +# { +# name="" +# replicas = [ +# "" +# ] +# ] +# } +# +# For Helium we support only one shard per module. Beyond Helium +# we will support more than 1 +# The replicas section is a collection of member names. This information +# will be used to decide on which members replicas of a particular shard will be +# located. Once replication is integrated with the distributed data store then +# this section can have multiple entries. +# +# + + +module-shards = [ + { + name = "default" + shards = [ + { + name="default" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + }, + { + name = "topology" + shards = [ + { + name="topology" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + }, + { + name = "inventory" + shards = [ + { + name="inventory" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + }, + { + name = "toaster" + shards = [ + { + name="toaster" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + } + { + name = "car" + shards = [ + { + name="car" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + } + { + name = "people" + shards = [ + { + name="people" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + } + { + name = "car-people" + shards = [ + { + name="car-people" + replicas = [ + "member-1", + "member-2", + "member-3" + ] + } + ] + } + +] diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/modules.conf b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/modules.conf new file mode 100644 index 0000000000..eda60d35a1 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/modules.conf @@ -0,0 +1,47 @@ +# This file should describe all the modules that need to be placed in a separate shard +# The format of the configuration is as follows +# { +# name = "" +# namespace = "" +# shard-strategy = "module" +# } +# +# Note that at this time the only shard-strategy we support is module which basically +# will put all the data of a single module in two shards (one for config and one for +# operational data) + +modules = [ + { + name = "inventory" + namespace = "urn:opendaylight:inventory" + shard-strategy = "module" + }, + + { + name = "topology" + namespace = "urn:TBD:params:xml:ns:yang:network-topology" + shard-strategy = "module" + }, + + { + name = "toaster" + namespace = "http://netconfcentral.org/ns/toaster" + shard-strategy = "module" + }, + { + name = "car" + namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car" + shard-strategy = "module" + } + { + name = "people" + namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people" + shard-strategy = "module" + } + + { + name = "car-people" + namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people" + shard-strategy = "module" + } +] diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/model/pom.xml new file mode 100644 index 0000000000..a23e32df2b --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/model/pom.xml @@ -0,0 +1,115 @@ + + + 4.0.0 + + clustering-it + org.opendaylight.controller.samples + 1.1-SNAPSHOT + + clustering-it-model + bundle + + + + + org.apache.felix + maven-bundle-plugin + ${bundle.plugin.version} + true + + + org.opendaylight.controller.sal-clustering-it-model + * + + + + + org.opendaylight.yangtools + yang-maven-plugin + ${yangtools.version} + + + + generate-sources + + + src/main/yang + + + org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl + target/generated-sources/sal + + + true + + + + + + org.opendaylight.yangtools + maven-sal-api-gen-plugin + ${yangtools.version} + jar + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.opendaylight.yangtools + yang-maven-plugin + [0.5,) + + generate-sources + + + + + + + + + + + + + + + + org.opendaylight.yangtools + yang-binding + ${yangtools.version} + + + org.opendaylight.yangtools + yang-common + ${yangtools.version} + + + org.opendaylight.yangtools.model + ietf-inet-types + ${ietf-inet-types.version} + + + org.opendaylight.yangtools.model + ietf-yang-types + ${ietf-yang-types.version} + + + org.opendaylight.yangtools.model + yang-ext + ${yang-ext.version} + + + diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang new file mode 100644 index 0000000000..899724658f --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang @@ -0,0 +1,42 @@ +module car-people { + + yang-version 1; + + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"; + + prefix car; + + import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; } + import car { prefix "c"; revision-date 2014-08-18; } + import people { prefix "people"; revision-date 2014-08-18; } + + organization "Netconf Central"; + + contact + "Harman Singh "; + + description + "YANG model for car for test application"; + + revision "2014-08-18" { + description + "Clustering sample app"; + } + + container car-people { + description + "Top-level container for all people car map"; + + list car-person { + key "car-id person-id"; + description "A mapping of cars and people."; + leaf car-id { + type c:car-id; + } + + leaf person-id { + type people:person-id; + } + } + } +} \ No newline at end of file diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang new file mode 100644 index 0000000000..f6a8797098 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang @@ -0,0 +1,60 @@ +module car-purchase { + + yang-version 1; + + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase"; + + prefix cp; + + import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; } + import car { prefix "car"; revision-date 2014-08-18; } + import people { prefix "person"; revision-date 2014-08-18; } + import yang-ext {prefix "ext"; revision-date "2013-07-09";} + + organization "Netconf Central"; + + contact + "Harman Singh "; + + description + "YANG model for car purchase for test application"; + + revision "2014-08-18" { + description + "Clustering sample app"; + } + + rpc buy-car { + description + "buy a new car"; + input { + leaf person { + ext:context-reference "person:person-context"; + type person:person-ref; + description "A reference to a particular person."; + } + + leaf car-id { + type car:car-id; + description "identifier of car."; + } + leaf person-id { + type person:person-id; + description "identifier of person."; + } + } + } + + notification carBought { + description + "Indicates that a person bought a car."; + leaf car-id { + type car:car-id; + description "identifier of car."; + } + leaf person-id { + type person:person-id; + description "identifier of person."; + } + } +} \ No newline at end of file diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car.yang new file mode 100644 index 0000000000..d9cfb6b1d5 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car.yang @@ -0,0 +1,64 @@ +module car { + + yang-version 1; + + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"; + + prefix car; + + import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; } + + organization "Netconf Central"; + + contact + "Harman Singh "; + + description + "YANG model for car for test application"; + + revision "2014-08-18" { + description + "Clustering sample app"; + } + + typedef car-id { + type inet:uri; + description "An identifier for car entry."; + } + + grouping car-entry { + description "Describes the contents of a car entry - + Details of the car manufacturer, model etc"; + leaf id { + type car-id; + description "identifier of single list of entries."; + } + + leaf model { + type string; + } + leaf manufacturer { + type string; + } + + leaf year { + type uint32; + } + + leaf category { + type string; + } + } + + container cars { + description + "Top-level container for all car objects."; + list car-entry { + key "id"; + description "A list of cars (as defined by the 'grouping car-entry')."; + uses car-entry; + } + } + + +} \ No newline at end of file diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/people.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/people.yang new file mode 100644 index 0000000000..6c8f24703f --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/people.yang @@ -0,0 +1,80 @@ +module people { + + yang-version 1; + + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"; + + prefix people; + + import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; } + + organization "Netconf Central"; + + contact + "Harman Singh "; + + description + "YANG model for person for test application"; + + revision "2014-08-18" { + description + "Clustering sample app"; + } + + typedef person-id { + type inet:uri; + description "An identifier for person."; + } + + typedef person-ref { + type instance-identifier; + description "A reference that points to an people:people/person in the data tree."; + } + identity person-context { + description "A person-context is a classifier for person elements which allows an RPC to provide a service on behalf of a particular element in the data tree."; + } + + grouping person { + description "Describes the details of the person"; + + leaf id { + type person-id; + description "identifier of single list of entries."; + } + + leaf gender { + type string; + } + + leaf age { + type uint32; + } + + leaf address { + type string; + } + + leaf contactNo { + type string; + } + } + + container people { + description + "Top-level container for all people"; + + list person { + key "id"; + description "A list of people (as defined by the 'grouping person')."; + uses person; + } + } + + rpc add-person { + description + "Add a person entry into database"; + input { + uses person; + } + } +} \ No newline at end of file diff --git a/opendaylight/md-sal/samples/clustering-test-app/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/pom.xml new file mode 100644 index 0000000000..863bbecdf9 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/pom.xml @@ -0,0 +1,16 @@ + + + 4.0.0 + + org.opendaylight.controller.samples + sal-samples + 1.1-SNAPSHOT + + clustering-it + pom + + configuration + model + provider + + diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml new file mode 100644 index 0000000000..093b681125 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml @@ -0,0 +1,102 @@ + + + 4.0.0 + + clustering-it + org.opendaylight.controller.samples + 1.1-SNAPSHOT + + clustering-it-provider + bundle + + + + + org.apache.felix + maven-bundle-plugin + ${bundle.plugin.version} + true + + + org.opendaylight.controller.config.yang.config.clustering_it_provider + * + + + + + org.opendaylight.yangtools + yang-maven-plugin + ${yangtools.version} + + + config + + generate-sources + + + + + org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator + ${jmxGeneratorPath} + + urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang + + + + org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl + ${salGeneratorPath} + + + true + + + + + + org.opendaylight.controller + yang-jmx-generator-plugin + ${config.version} + + + org.opendaylight.yangtools + maven-sal-api-gen-plugin + ${yangtools.version} + + + + + + + + + org.opendaylight.controller.samples + clustering-it-model + ${version} + + + org.opendaylight.controller + config-api + ${config.version} + + + org.opendaylight.controller + sal-binding-config + ${mdsal.version} + + + org.opendaylight.controller + sal-binding-api + ${mdsal.version} + + + org.opendaylight.controller + sal-common-util + ${mdsal.version} + + + equinoxSDK381 + org.eclipse.osgi + 3.8.1.v20120830-144521 + + + diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java new file mode 100644 index 0000000000..4737d6eb49 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.clustering.it.listener; + +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import org.opendaylight.controller.md.sal.binding.api.DataBroker; +import org.opendaylight.controller.md.sal.binding.api.WriteTransaction; +import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener; +import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class PeopleCarListener implements CarPurchaseListener { + + private static final Logger log = LoggerFactory.getLogger(PeopleCarListener.class); + + private DataBroker dataProvider; + + + + public void setDataProvider(final DataBroker salDataProvider) { + this.dataProvider = salDataProvider; + } + + @Override + public void onCarBought(CarBought notification) { + log.info("onCarBought notification : Adding car person entry"); + + final CarPersonBuilder carPersonBuilder = new CarPersonBuilder(); + carPersonBuilder.setCarId(notification.getCarId()); + carPersonBuilder.setPersonId(notification.getPersonId()); + CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId()); + carPersonBuilder.setKey(key); + final CarPerson carPerson = carPersonBuilder.build(); + + InstanceIdentifier carPersonIId = + InstanceIdentifier.builder(CarPeople.class).child(CarPerson.class, carPerson.getKey()).build(); + + + WriteTransaction tx = dataProvider.newWriteOnlyTransaction(); + tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson); + + Futures.addCallback(tx.submit(), new FutureCallback() { + @Override + public void onSuccess(final Void result) { + log.info("Car bought, entry added to map of people and car [{}]", carPerson); + } + + @Override + public void onFailure(final Throwable t) { + log.info("Car bought, Failed entry addition to map of people and car [{}]", carPerson); + } + }); + + } +} diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java new file mode 100644 index 0000000000..e0d3f75349 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.clustering.it.provider; + +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.SettableFuture; +import org.opendaylight.controller.md.sal.binding.api.DataBroker; +import org.opendaylight.controller.md.sal.binding.api.WriteTransaction; +import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; +import org.opendaylight.controller.sal.binding.api.BindingAwareBroker; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder; +import org.opendaylight.yangtools.yang.common.RpcError; +import org.opendaylight.yangtools.yang.common.RpcResult; +import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; +import org.opendaylight.yangtools.yang.common.RpcResultBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.Future; + +public class PeopleProvider implements PeopleService, AutoCloseable { + + private static final Logger log = LoggerFactory.getLogger(PeopleProvider.class); + + private DataBroker dataProvider; + + private BindingAwareBroker.RoutedRpcRegistration rpcRegistration; + + public void setDataProvider(final DataBroker salDataProvider) { + this.dataProvider = salDataProvider; + } + + + public void setRpcRegistration(BindingAwareBroker.RoutedRpcRegistration rpcRegistration) { + this.rpcRegistration = rpcRegistration; + } + + @Override + public Future> addPerson(AddPersonInput input) { + log.info("RPC addPerson : adding person [{}]", input); + + PersonBuilder builder = new PersonBuilder(input); + final Person person = builder.build(); + final SettableFuture> futureResult = SettableFuture.create(); + + // Each entry will be identifiable by a unique key, we have to create that identifier + final InstanceIdentifier.InstanceIdentifierBuilder personIdBuilder = + InstanceIdentifier.builder(People.class) + .child(Person.class, person.getKey()); + final InstanceIdentifier personId = personIdBuilder.build(); + // Place entry in data store tree + WriteTransaction tx = dataProvider.newWriteOnlyTransaction(); + tx.put(LogicalDatastoreType.CONFIGURATION, personId, person); + + Futures.addCallback(tx.submit(), new FutureCallback() { + @Override + public void onSuccess(final Void result) { + log.info("RPC addPerson : person added successfully [{}]", person); + rpcRegistration.registerPath(PersonContext.class, personId); + log.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId); + futureResult.set(RpcResultBuilder.success().build()); + } + + @Override + public void onFailure(final Throwable t) { + log.info("RPC addPerson : person addition failed [{}]", person); + futureResult.set(RpcResultBuilder.failed() + .withError(RpcError.ErrorType.APPLICATION, t.getMessage()).build()); + } + }); + return futureResult; + } + + @Override + public void close() throws Exception { + + } +} diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java new file mode 100644 index 0000000000..74a0aa68ed --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.clustering.it.provider; + +import com.google.common.util.concurrent.SettableFuture; +import org.opendaylight.controller.sal.binding.api.NotificationProviderService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService; +import org.opendaylight.yangtools.yang.common.RpcResult; +import org.opendaylight.yangtools.yang.common.RpcResultBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.Future; + + +public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable{ + + private static final Logger log = LoggerFactory.getLogger(PurchaseCarProvider.class); + + private NotificationProviderService notificationProvider; + + + public void setNotificationProvider(final NotificationProviderService salService) { + this.notificationProvider = salService; + } + + + @Override + public Future> buyCar(BuyCarInput input) { + log.info("Routed RPC buyCar : generating notification for buying car [{}]", input); + SettableFuture> futureResult = SettableFuture.create(); + CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder(); + carBoughtBuilder.setCarId(input.getCarId()); + carBoughtBuilder.setPersonId(input.getPersonId()); + notificationProvider.publish(carBoughtBuilder.build()); + futureResult.set(RpcResultBuilder.success().build()); + return futureResult; + } + + @Override + public void close() throws Exception { + + } +} diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModule.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModule.java new file mode 100644 index 0000000000..d91d40a34d --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModule.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.config.yang.config.clustering_it_provider; + + +import org.opendaylight.controller.clustering.it.listener.PeopleCarListener; +import org.opendaylight.controller.clustering.it.provider.PeopleProvider; +import org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider; +import org.opendaylight.controller.md.sal.binding.api.DataBroker; +import org.opendaylight.controller.sal.binding.api.BindingAwareBroker; +import org.opendaylight.controller.sal.binding.api.NotificationProviderService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService; +import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService; +import org.opendaylight.yangtools.concepts.ListenerRegistration; +import org.opendaylight.yangtools.yang.binding.NotificationListener; + +public class ClusteringItProviderModule extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModule { + public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) { + super(identifier, dependencyResolver); + } + + public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.clustering_it_provider.ClusteringItProviderModule oldModule, java.lang.AutoCloseable oldInstance) { + super(identifier, dependencyResolver, oldModule, oldInstance); + } + + @Override + public void customValidation() { + // add custom validation form module attributes here. + } + + @Override + public java.lang.AutoCloseable createInstance() { + DataBroker dataBrokerService = getDataBrokerDependency(); + NotificationProviderService notificationProvider = getNotificationServiceDependency(); + + // Add routed RPC registration for car purchase + final PurchaseCarProvider purchaseCar = new PurchaseCarProvider(); + purchaseCar.setNotificationProvider(notificationProvider); + + final BindingAwareBroker.RoutedRpcRegistration purchaseCarRpc = getRpcRegistryDependency() + .addRoutedRpcImplementation(CarPurchaseService.class, purchaseCar); + + // Add people provider registration + final PeopleProvider people = new PeopleProvider(); + people.setDataProvider(dataBrokerService); + + people.setRpcRegistration(purchaseCarRpc); + + final BindingAwareBroker.RpcRegistration peopleRpcReg = getRpcRegistryDependency() + .addRpcImplementation(PeopleService.class, people); + + + + final PeopleCarListener peopleCarListener = new PeopleCarListener(); + peopleCarListener.setDataProvider(dataBrokerService); + + final ListenerRegistration listenerReg = + getNotificationServiceDependency().registerNotificationListener( peopleCarListener ); + + // Wrap toaster as AutoCloseable and close registrations to md-sal at + // close() + final class AutoCloseableToaster implements AutoCloseable { + + @Override + public void close() throws Exception { + peopleRpcReg.close(); + purchaseCarRpc.close(); + people.close(); + purchaseCar.close(); + listenerReg.close(); + } + } + + AutoCloseable ret = new AutoCloseableToaster(); + return ret; + } + +} diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModuleFactory.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModuleFactory.java new file mode 100644 index 0000000000..642263c773 --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModuleFactory.java @@ -0,0 +1,13 @@ +/* +* Generated file +* +* Generated from: yang module name: clustering-it-provider yang module local name: clustering-it-provider +* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator +* Generated at: Tue Aug 19 14:44:46 PDT 2014 +* +* Do not modify this file unless it is present under src/main directory +*/ +package org.opendaylight.controller.config.yang.config.clustering_it_provider; +public class ClusteringItProviderModuleFactory extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModuleFactory { + +} diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/yang/clustering-it-provider.yang b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/yang/clustering-it-provider.yang new file mode 100644 index 0000000000..ff3f9a8e5c --- /dev/null +++ b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/yang/clustering-it-provider.yang @@ -0,0 +1,60 @@ +module clustering-it-provider { + + yang-version 1; + namespace "urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider"; + prefix "clustering-it-provider"; + + import config { prefix config; revision-date 2013-04-05; } + import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; } + + description + "This module contains the base YANG definitions for + clustering-it-provider implementation."; + + revision "2014-08-19" { + description + "Initial revision."; + } + + // This is the definition of the service implementation as a module identity. + identity clustering-it-provider { + base config:module-type; + + // Specifies the prefix for generated java classes. + config:java-name-prefix ClusteringItProvider; + } + + // Augments the 'configuration' choice node under modules/module. + augment "/config:modules/config:module/config:configuration" { + case clustering-it-provider { + when "/config:modules/config:module/config:type = 'clustering-it-provider'"; + + container rpc-registry { + uses config:service-ref { + refine type { + mandatory true; + config:required-identity mdsal:binding-rpc-registry; + } + } + } + + container notification-service { + uses config:service-ref { + refine type { + mandatory true; + config:required-identity mdsal:binding-notification-service; + } + } + } + + container data-broker { + uses config:service-ref { + refine type { + mandatory false; + config:required-identity mdsal:binding-async-data-broker; + } + } + } + } + } +} diff --git a/opendaylight/md-sal/samples/pom.xml b/opendaylight/md-sal/samples/pom.xml index ae7d323480..d13200e4e8 100644 --- a/opendaylight/md-sal/samples/pom.xml +++ b/opendaylight/md-sal/samples/pom.xml @@ -17,6 +17,7 @@ toaster-provider toaster-config l2switch + clustering-test-app scm:git:ssh://git.opendaylight.org:29418/controller.git diff --git a/opendaylight/md-sal/topology-manager/pom.xml b/opendaylight/md-sal/topology-manager/pom.xml index fe1813a199..57313d2948 100644 --- a/opendaylight/md-sal/topology-manager/pom.xml +++ b/opendaylight/md-sal/topology-manager/pom.xml @@ -40,6 +40,16 @@ org.osgi.core provided + + junit + junit + test + + + org.mockito + mockito-all + test + diff --git a/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporter.java b/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporter.java index c1996f4691..361373d78d 100644 --- a/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporter.java +++ b/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporter.java @@ -15,9 +15,9 @@ import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMap import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode; import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId; +import java.util.Collection; import java.util.Collections; import java.util.List; - import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction; import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException; @@ -50,17 +50,19 @@ import org.slf4j.LoggerFactory; import com.google.common.base.Optional; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import com.google.common.util.concurrent.CheckedFuture; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener { - private final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class); + private static final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class); private final InstanceIdentifier topology; private final OperationProcessor processor; - FlowCapableTopologyExporter(final OperationProcessor processor, final InstanceIdentifier topology) { + FlowCapableTopologyExporter(final OperationProcessor processor, + final InstanceIdentifier topology) { this.processor = Preconditions.checkNotNull(processor); this.topology = Preconditions.checkNotNull(topology); } @@ -73,15 +75,14 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open processor.enqueueOperation(new TopologyOperation() { @Override - public void applyOperation(final ReadWriteTransaction transaction) { - removeAffectedLinks(nodeId); + public void applyOperation(ReadWriteTransaction transaction) { + removeAffectedLinks(nodeId, transaction); + transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance); } - }); - processor.enqueueOperation(new TopologyOperation() { @Override - public void applyOperation(ReadWriteTransaction transaction) { - transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance); + public String toString() { + return "onNodeRemoved"; } }); } @@ -97,6 +98,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open final InstanceIdentifier path = getNodePath(toTopologyNodeId(notification.getId())); transaction.merge(LogicalDatastoreType.OPERATIONAL, path, node, true); } + + @Override + public String toString() { + return "onNodeUpdated"; + } }); } } @@ -104,28 +110,30 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open @Override public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) { - final InstanceIdentifier tpInstance = toTerminationPointIdentifier(notification - .getNodeConnectorRef()); + final InstanceIdentifier tpInstance = toTerminationPointIdentifier( + notification.getNodeConnectorRef()); - processor.enqueueOperation(new TopologyOperation() { - @Override - public void applyOperation(final ReadWriteTransaction transaction) { - final TpId tpId = toTerminationPointId(getNodeConnectorKey(notification.getNodeConnectorRef()).getId()); - removeAffectedLinks(tpId); - } - }); + final TpId tpId = toTerminationPointId(getNodeConnectorKey( + notification.getNodeConnectorRef()).getId()); processor.enqueueOperation(new TopologyOperation() { @Override public void applyOperation(ReadWriteTransaction transaction) { + removeAffectedLinks(tpId, transaction); transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance); } + + @Override + public String toString() { + return "onNodeConnectorRemoved"; + } }); } @Override public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) { - final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(FlowCapableNodeConnectorUpdated.class); + final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation( + FlowCapableNodeConnectorUpdated.class); if (fcncu != null) { processor.enqueueOperation(new TopologyOperation() { @Override @@ -137,9 +145,14 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open transaction.merge(LogicalDatastoreType.OPERATIONAL, path, point, true); if ((fcncu.getState() != null && fcncu.getState().isLinkDown()) || (fcncu.getConfiguration() != null && fcncu.getConfiguration().isPORTDOWN())) { - removeAffectedLinks(point.getTpId()); + removeAffectedLinks(point.getTpId(), transaction); } } + + @Override + public String toString() { + return "onNodeConnectorUpdated"; + } }); } } @@ -153,6 +166,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open final InstanceIdentifier path = linkPath(link); transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true); } + + @Override + public String toString() { + return "onLinkDiscovered"; + } }); } @@ -168,6 +186,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open public void applyOperation(final ReadWriteTransaction transaction) { transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification))); } + + @Override + public String toString() { + return "onLinkRemoved"; + } }); } @@ -188,62 +211,92 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open return tpPath(toTopologyNodeId(invNodeKey.getId()), toTerminationPointId(invNodeConnectorKey.getId())); } - private void removeAffectedLinks(final NodeId id) { - processor.enqueueOperation(new TopologyOperation() { + private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) { + CheckedFuture, ReadFailedException> topologyDataFuture = + transaction.read(LogicalDatastoreType.OPERATIONAL, topology); + Futures.addCallback(topologyDataFuture, new FutureCallback>() { @Override - public void applyOperation(final ReadWriteTransaction transaction) { - CheckedFuture, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology); - Futures.addCallback(topologyDataFuture, new FutureCallback>() { - @Override - public void onSuccess(Optional topologyOptional) { - if (topologyOptional.isPresent()) { - List linkList = topologyOptional.get().getLink() != null - ? topologyOptional.get().getLink() : Collections. emptyList(); - for (Link link : linkList) { - if (id.equals(link.getSource().getSourceNode()) || id.equals(link.getDestination().getDestNode())) { - transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link)); - } - } - } - } + public void onSuccess(Optional topologyOptional) { + removeAffectedLinks(id, topologyOptional); + } - @Override - public void onFailure(Throwable throwable) { - LOG.error("Error reading topology data for topology {}", topology, throwable); - } - }); + @Override + public void onFailure(Throwable throwable) { + LOG.error("Error reading topology data for topology {}", topology, throwable); } }); } - private void removeAffectedLinks(final TpId id) { - processor.enqueueOperation(new TopologyOperation() { - @Override - public void applyOperation(final ReadWriteTransaction transaction) { - CheckedFuture, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology); - Futures.addCallback(topologyDataFuture, new FutureCallback>() { - @Override - public void onSuccess(Optional topologyOptional) { - if (topologyOptional.isPresent()) { - List linkList = topologyOptional.get().getLink() != null - ? topologyOptional.get().getLink() : Collections. emptyList(); - for (Link link : linkList) { - if (id.equals(link.getSource().getSourceTp()) || id.equals(link.getDestination().getDestTp())) { - transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link)); - } - } - } - } + private void removeAffectedLinks(final NodeId id, Optional topologyOptional) { + if (!topologyOptional.isPresent()) { + return; + } + + List linkList = topologyOptional.get().getLink() != null ? + topologyOptional.get().getLink() : Collections. emptyList(); + final List> linkIDsToDelete = Lists.newArrayList(); + for (Link link : linkList) { + if (id.equals(link.getSource().getSourceNode()) || + id.equals(link.getDestination().getDestNode())) { + linkIDsToDelete.add(linkPath(link)); + } + } + + enqueueLinkDeletes(linkIDsToDelete); + } - @Override - public void onFailure(Throwable throwable) { - LOG.error("Error reading topology data for topology {}", topology, throwable); + private void enqueueLinkDeletes(final Collection> linkIDsToDelete) { + if(!linkIDsToDelete.isEmpty()) { + processor.enqueueOperation(new TopologyOperation() { + @Override + public void applyOperation(ReadWriteTransaction transaction) { + for(InstanceIdentifier linkID: linkIDsToDelete) { + transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID); } - }); + } + + @Override + public String toString() { + return "Delete Links " + linkIDsToDelete.size(); + } + }); + } + } + + private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) { + CheckedFuture, ReadFailedException> topologyDataFuture = + transaction.read(LogicalDatastoreType.OPERATIONAL, topology); + Futures.addCallback(topologyDataFuture, new FutureCallback>() { + @Override + public void onSuccess(Optional topologyOptional) { + removeAffectedLinks(id, topologyOptional); + } + + @Override + public void onFailure(Throwable throwable) { + LOG.error("Error reading topology data for topology {}", topology, throwable); } }); } + private void removeAffectedLinks(final TpId id, Optional topologyOptional) { + if (!topologyOptional.isPresent()) { + return; + } + + List linkList = topologyOptional.get().getLink() != null + ? topologyOptional.get().getLink() : Collections. emptyList(); + final List> linkIDsToDelete = Lists.newArrayList(); + for (Link link : linkList) { + if (id.equals(link.getSource().getSourceTp()) || + id.equals(link.getDestination().getDestTp())) { + linkIDsToDelete.add(linkPath(link)); + } + } + + enqueueLinkDeletes(linkIDsToDelete); + } + private InstanceIdentifier getNodePath(final NodeId nodeId) { return topology.child(Node.class, new NodeKey(nodeId)); } diff --git a/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/OperationProcessor.java b/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/OperationProcessor.java index 1cf648eb97..f09da00459 100644 --- a/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/OperationProcessor.java +++ b/opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/OperationProcessor.java @@ -11,14 +11,17 @@ import com.google.common.base.Preconditions; import com.google.common.util.concurrent.CheckedFuture; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; + import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; + import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain; import org.opendaylight.controller.md.sal.binding.api.DataBroker; import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction; import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction; import org.opendaylight.controller.md.sal.common.api.data.TransactionChain; import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener; +import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,9 +53,9 @@ final class OperationProcessor implements AutoCloseable, Runnable, TransactionCh for (; ; ) { TopologyOperation op = queue.take(); - LOG.debug("New operations available, starting transaction"); - final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction(); + LOG.debug("New {} operation available, starting transaction", op); + final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction(); int ops = 0; do { @@ -64,14 +67,16 @@ final class OperationProcessor implements AutoCloseable, Runnable, TransactionCh } else { op = null; } + + LOG.debug("Next operation {}", op); } while (op != null); LOG.debug("Processed {} operations, submitting transaction", ops); - final CheckedFuture txResultFuture = tx.submit(); - Futures.addCallback(txResultFuture, new FutureCallback() { + CheckedFuture txResultFuture = tx.submit(); + Futures.addCallback(txResultFuture, new FutureCallback() { @Override - public void onSuccess(Object o) { + public void onSuccess(Void notUsed) { LOG.debug("Topology export successful for tx :{}", tx.getIdentifier()); } diff --git a/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java b/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java new file mode 100644 index 0000000000..b7a56a4890 --- /dev/null +++ b/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java @@ -0,0 +1,666 @@ +/* + * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.md.controller.topology.manager; + +import static org.junit.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain; +import org.opendaylight.controller.md.sal.binding.api.DataBroker; +import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction; +import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; +import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException; +import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener; +import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.StateBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector; +import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey; +import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode; +import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint; +import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey; +import org.opendaylight.yangtools.yang.binding.InstanceIdentifier; +import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier; + +import com.google.common.base.Optional; +import com.google.common.util.concurrent.CheckedFuture; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.Uninterruptibles; + +public class FlowCapableTopologyExporterTest { + + @Mock + private DataBroker mockDataBroker; + + @Mock + private BindingTransactionChain mockTxChain; + + private OperationProcessor processor; + + private FlowCapableTopologyExporter exporter; + + private InstanceIdentifier topologyIID; + + private final ExecutorService executor = Executors.newFixedThreadPool(1); + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + + doReturn(mockTxChain).when(mockDataBroker) + .createTransactionChain(any(TransactionChainListener.class)); + + processor = new OperationProcessor(mockDataBroker); + + topologyIID = InstanceIdentifier.create(NetworkTopology.class) + .child(Topology.class, new TopologyKey(new TopologyId("test"))); + exporter = new FlowCapableTopologyExporter(processor, topologyIID); + + executor.execute(processor); + } + + @After + public void tearDown() { + executor.shutdownNow(); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void testOnNodeRemoved() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + InstanceIdentifier invNodeID = InstanceIdentifier.create(Nodes.class).child( + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, + nodeKey); + + List linkList = Arrays.asList( + newLink("link1", newSourceNode("node1"), newDestNode("dest")), + newLink("link2", newSourceNode("source"), newDestNode("node1")), + newLink("link2", newSourceNode("source2"), newDestNode("dest2"))); + final Topology topology = new TopologyBuilder().setLink(linkList).build(); + + InstanceIdentifier[] expDeletedIIDs = { + topologyIID.child(Link.class, linkList.get(0).getKey()), + topologyIID.child(Link.class, linkList.get(1).getKey()), + topologyIID.child(Node.class, new NodeKey(new NodeId("node1"))) + }; + + SettableFuture> readFuture = SettableFuture.create(); + ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class); + doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1) + .read(LogicalDatastoreType.OPERATIONAL, topologyIID); + + CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1); + + int expDeleteCalls = expDeletedIIDs.length; + CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls); + ArgumentCaptor deletedLinkIDs = + ArgumentCaptor.forClass(InstanceIdentifier.class); + setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch); + + ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class); + setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch); + CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2); + + doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build()); + + waitForSubmit(submitLatch1); + + setReadFutureAsync(topology, readFuture); + + waitForDeletes(expDeleteCalls, deleteLatch); + + waitForSubmit(submitLatch2); + + assertDeletedIDs(expDeletedIIDs, deletedLinkIDs); + + verifyMockTx(mockTx1); + verifyMockTx(mockTx2); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void testOnNodeRemovedWithNoTopology() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + InstanceIdentifier invNodeID = InstanceIdentifier.create(Nodes.class).child( + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, + nodeKey); + + InstanceIdentifier[] expDeletedIIDs = { + topologyIID.child(Node.class, new NodeKey(new NodeId("node1"))) + }; + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx) + .read(LogicalDatastoreType.OPERATIONAL, topologyIID); + CountDownLatch submitLatch = setupStubbedSubmit(mockTx); + + CountDownLatch deleteLatch = new CountDownLatch(1); + ArgumentCaptor deletedLinkIDs = + ArgumentCaptor.forClass(InstanceIdentifier.class); + setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch); + + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build()); + + waitForSubmit(submitLatch); + + waitForDeletes(1, deleteLatch); + + assertDeletedIDs(expDeletedIIDs, deletedLinkIDs); + } + + @SuppressWarnings("rawtypes") + @Test + public void testOnNodeConnectorRemoved() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey = + newInvNodeConnKey("tp1"); + + InstanceIdentifier invNodeConnID = newNodeConnID(nodeKey, ncKey); + + List linkList = Arrays.asList( + newLink("link1", newSourceTp("tp1"), newDestTp("dest")), + newLink("link2", newSourceTp("source"), newDestTp("tp1")), + newLink("link3", newSourceTp("source2"), newDestTp("dest2"))); + final Topology topology = new TopologyBuilder().setLink(linkList).build(); + + InstanceIdentifier[] expDeletedIIDs = { + topologyIID.child(Link.class, linkList.get(0).getKey()), + topologyIID.child(Link.class, linkList.get(1).getKey()), + topologyIID.child(Node.class, new NodeKey(new NodeId("node1"))) + .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1"))) + }; + + final SettableFuture> readFuture = SettableFuture.create(); + ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class); + doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1) + .read(LogicalDatastoreType.OPERATIONAL, topologyIID); + + CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1); + + int expDeleteCalls = expDeletedIIDs.length; + CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls); + ArgumentCaptor deletedLinkIDs = + ArgumentCaptor.forClass(InstanceIdentifier.class); + setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch); + + ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class); + setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch); + CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2); + + doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef( + new NodeConnectorRef(invNodeConnID)).build()); + + waitForSubmit(submitLatch1); + + setReadFutureAsync(topology, readFuture); + + waitForDeletes(expDeleteCalls, deleteLatch); + + waitForSubmit(submitLatch2); + + assertDeletedIDs(expDeletedIIDs, deletedLinkIDs); + + verifyMockTx(mockTx1); + verifyMockTx(mockTx2); + } + + @SuppressWarnings("rawtypes") + @Test + public void testOnNodeConnectorRemovedWithNoTopology() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey = + newInvNodeConnKey("tp1"); + + InstanceIdentifier invNodeConnID = newNodeConnID(nodeKey, ncKey); + + InstanceIdentifier[] expDeletedIIDs = { + topologyIID.child(Node.class, new NodeKey(new NodeId("node1"))) + .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1"))) + }; + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx) + .read(LogicalDatastoreType.OPERATIONAL, topologyIID); + CountDownLatch submitLatch = setupStubbedSubmit(mockTx); + + CountDownLatch deleteLatch = new CountDownLatch(1); + ArgumentCaptor deletedLinkIDs = + ArgumentCaptor.forClass(InstanceIdentifier.class); + setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch); + + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef( + new NodeConnectorRef(invNodeConnID)).build()); + + waitForSubmit(submitLatch); + + waitForDeletes(1, deleteLatch); + + assertDeletedIDs(expDeletedIIDs, deletedLinkIDs); + } + + @Test + public void testOnNodeUpdated() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + InstanceIdentifier invNodeID = InstanceIdentifier.create(Nodes.class).child( + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, + nodeKey); + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + CountDownLatch submitLatch = setupStubbedSubmit(mockTx); + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeUpdated(new NodeUpdatedBuilder().setNodeRef(new NodeRef(invNodeID)) + .setId(nodeKey.getId()).addAugmentation(FlowCapableNodeUpdated.class, + new FlowCapableNodeUpdatedBuilder().build()).build()); + + waitForSubmit(submitLatch); + + ArgumentCaptor mergedNode = ArgumentCaptor.forClass(Node.class); + NodeId expNodeId = new NodeId("node1"); + verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(Node.class, + new NodeKey(expNodeId))), mergedNode.capture(), eq(true)); + assertEquals("getNodeId", expNodeId, mergedNode.getValue().getNodeId()); + InventoryNode augmentation = mergedNode.getValue().getAugmentation(InventoryNode.class); + assertNotNull("Missing augmentation", augmentation); + assertEquals("getInventoryNodeRef", new NodeRef(invNodeID), augmentation.getInventoryNodeRef()); + } + + @SuppressWarnings("rawtypes") + @Test + public void testOnNodeConnectorUpdated() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey = + newInvNodeConnKey("tp1"); + + InstanceIdentifier invNodeConnID = newNodeConnID(nodeKey, ncKey); + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + CountDownLatch submitLatch = setupStubbedSubmit(mockTx); + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef( + new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation( + FlowCapableNodeConnectorUpdated.class, + new FlowCapableNodeConnectorUpdatedBuilder().build()).build()); + + waitForSubmit(submitLatch); + + ArgumentCaptor mergedNode = ArgumentCaptor.forClass(TerminationPoint.class); + NodeId expNodeId = new NodeId("node1"); + TpId expTpId = new TpId("tp1"); + InstanceIdentifier expTpPath = topologyIID.child( + Node.class, new NodeKey(expNodeId)).child(TerminationPoint.class, + new TerminationPointKey(expTpId)); + verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath), + mergedNode.capture(), eq(true)); + assertEquals("getTpId", expTpId, mergedNode.getValue().getTpId()); + InventoryNodeConnector augmentation = mergedNode.getValue().getAugmentation( + InventoryNodeConnector.class); + assertNotNull("Missing augmentation", augmentation); + assertEquals("getInventoryNodeConnectorRef", new NodeConnectorRef(invNodeConnID), + augmentation.getInventoryNodeConnectorRef()); + } + + @SuppressWarnings("rawtypes") + @Test + public void testOnNodeConnectorUpdatedWithLinkStateDown() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey = + newInvNodeConnKey("tp1"); + + InstanceIdentifier invNodeConnID = newNodeConnID(nodeKey, ncKey); + + List linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest"))); + Topology topology = new TopologyBuilder().setLink(linkList).build(); + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx) + .read(LogicalDatastoreType.OPERATIONAL, topologyIID); + setupStubbedSubmit(mockTx); + + CountDownLatch deleteLatch = new CountDownLatch(1); + ArgumentCaptor deletedLinkIDs = + ArgumentCaptor.forClass(InstanceIdentifier.class); + setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch); + + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef( + new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation( + FlowCapableNodeConnectorUpdated.class, + new FlowCapableNodeConnectorUpdatedBuilder().setState( + new StateBuilder().setLinkDown(true).build()).build()).build()); + + waitForDeletes(1, deleteLatch); + + InstanceIdentifier expTpPath = topologyIID.child( + Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class, + new TerminationPointKey(new TpId("tp1"))); + + verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath), + any(TerminationPoint.class), eq(true)); + + assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class, + linkList.get(0).getKey())}, deletedLinkIDs); + } + + + @SuppressWarnings("rawtypes") + @Test + public void testOnNodeConnectorUpdatedWithPortDown() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + nodeKey = newInvNodeKey("node1"); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey = + newInvNodeConnKey("tp1"); + + InstanceIdentifier invNodeConnID = newNodeConnID(nodeKey, ncKey); + + List linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest"))); + Topology topology = new TopologyBuilder().setLink(linkList).build(); + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx) + .read(LogicalDatastoreType.OPERATIONAL, topologyIID); + setupStubbedSubmit(mockTx); + + CountDownLatch deleteLatch = new CountDownLatch(1); + ArgumentCaptor deletedLinkIDs = + ArgumentCaptor.forClass(InstanceIdentifier.class); + setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch); + + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef( + new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation( + FlowCapableNodeConnectorUpdated.class, + new FlowCapableNodeConnectorUpdatedBuilder().setConfiguration( + new PortConfig(true, true, true, true)).build()).build()); + + waitForDeletes(1, deleteLatch); + + InstanceIdentifier expTpPath = topologyIID.child( + Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class, + new TerminationPointKey(new TpId("tp1"))); + + verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath), + any(TerminationPoint.class), eq(true)); + + assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class, + linkList.get(0).getKey())}, deletedLinkIDs); + } + + @Test + public void testOnLinkDiscovered() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + sourceNodeKey = newInvNodeKey("sourceNode"); + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey + sourceNodeConnKey = newInvNodeConnKey("sourceTP"); + InstanceIdentifier sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + destNodeKey = newInvNodeKey("destNode"); + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey + destNodeConnKey = newInvNodeConnKey("destTP"); + InstanceIdentifier destConnID = newNodeConnID(destNodeKey, destNodeConnKey); + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + CountDownLatch submitLatch = setupStubbedSubmit(mockTx); + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onLinkDiscovered(new LinkDiscoveredBuilder().setSource( + new NodeConnectorRef(sourceConnID)).setDestination( + new NodeConnectorRef(destConnID)).build()); + + waitForSubmit(submitLatch); + + ArgumentCaptor mergedNode = ArgumentCaptor.forClass(Link.class); + verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child( + Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))), + mergedNode.capture(), eq(true)); + assertEquals("Source node ID", "sourceNode", + mergedNode.getValue().getSource().getSourceNode().getValue()); + assertEquals("Dest TP ID", "sourceTP", + mergedNode.getValue().getSource().getSourceTp().getValue()); + assertEquals("Dest node ID", "destNode", + mergedNode.getValue().getDestination().getDestNode().getValue()); + assertEquals("Dest TP ID", "destTP", + mergedNode.getValue().getDestination().getDestTp().getValue()); + } + + @Test + public void testOnLinkRemoved() { + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + sourceNodeKey = newInvNodeKey("sourceNode"); + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey + sourceNodeConnKey = newInvNodeConnKey("sourceTP"); + InstanceIdentifier sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey); + + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + destNodeKey = newInvNodeKey("destNode"); + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey + destNodeConnKey = newInvNodeConnKey("destTP"); + InstanceIdentifier destConnID = newNodeConnID(destNodeKey, destNodeConnKey); + + ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class); + CountDownLatch submitLatch = setupStubbedSubmit(mockTx); + doReturn(mockTx).when(mockTxChain).newReadWriteTransaction(); + + exporter.onLinkRemoved(new LinkRemovedBuilder().setSource( + new NodeConnectorRef(sourceConnID)).setDestination( + new NodeConnectorRef(destConnID)).build()); + + waitForSubmit(submitLatch); + + verify(mockTx).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child( + Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))); + } + + private void verifyMockTx(ReadWriteTransaction mockTx) { + InOrder inOrder = inOrder(mockTx); + inOrder.verify(mockTx, atLeast(0)).submit(); + inOrder.verify(mockTx, never()).delete(eq(LogicalDatastoreType.OPERATIONAL), + any(InstanceIdentifier.class)); + } + + @SuppressWarnings("rawtypes") + private void assertDeletedIDs(InstanceIdentifier[] expDeletedIIDs, + ArgumentCaptor deletedLinkIDs) { + Set actualIIDs = new HashSet<>(deletedLinkIDs.getAllValues()); + for(InstanceIdentifier id: expDeletedIIDs) { + assertTrue("Missing expected deleted IID " + id, actualIIDs.contains(id)); + } + } + + private void setReadFutureAsync(final Topology topology, + final SettableFuture> readFuture) { + new Thread() { + @Override + public void run() { + Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS); + readFuture.set(Optional.of(topology)); + } + + }.start(); + } + + private void waitForSubmit(CountDownLatch latch) { + assertEquals("Transaction submitted", true, + Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS)); + } + + private void waitForDeletes(int expDeleteCalls, final CountDownLatch latch) { + boolean done = Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS); + if(!done) { + fail("Expected " + expDeleteCalls + " delete calls. Actual: " + + (expDeleteCalls - latch.getCount())); + } + } + + private CountDownLatch setupStubbedSubmit(ReadWriteTransaction mockTx) { + final CountDownLatch latch = new CountDownLatch(1); + doAnswer(new Answer>() { + @Override + public CheckedFuture answer( + InvocationOnMock invocation) { + latch.countDown(); + return Futures.immediateCheckedFuture(null); + } + }).when(mockTx).submit(); + + return latch; + } + + @SuppressWarnings("rawtypes") + private void setupStubbedDeletes(ReadWriteTransaction mockTx, + ArgumentCaptor deletedLinkIDs, final CountDownLatch latch) { + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) { + latch.countDown(); + return null; + } + }).when(mockTx).delete(eq(LogicalDatastoreType.OPERATIONAL), deletedLinkIDs.capture()); + } + + private org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey + newInvNodeKey(String id) { + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey = + new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey( + new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory. + rev130819.NodeId(id)); + return nodeKey; + } + + private NodeConnectorKey newInvNodeConnKey(String id) { + return new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey( + new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819. + NodeConnectorId(id)); + } + + private KeyedInstanceIdentifier newNodeConnID( + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey, + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey) { + return InstanceIdentifier.create(Nodes.class).child( + org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, + nodeKey).child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory. + rev130819.node.NodeConnector.class, ncKey); + } + + private Link newLink(String id, Source source, Destination dest) { + return new LinkBuilder().setLinkId(new LinkId(id)) + .setSource(source).setDestination(dest).build(); + } + + private Destination newDestTp(String id) { + return new DestinationBuilder().setDestTp(new TpId(id)).build(); + } + + private Source newSourceTp(String id) { + return new SourceBuilder().setSourceTp(new TpId(id)).build(); + } + + private Destination newDestNode(String id) { + return new DestinationBuilder().setDestNode(new NodeId(id)).build(); + } + + private Source newSourceNode(String id) { + return new SourceBuilder().setSourceNode(new NodeId(id)).build(); + } +} diff --git a/opendaylight/netconf/netconf-client/pom.xml b/opendaylight/netconf/netconf-client/pom.xml index bf27ed6f4d..6bb67d0681 100644 --- a/opendaylight/netconf/netconf-client/pom.xml +++ b/opendaylight/netconf/netconf-client/pom.xml @@ -24,6 +24,12 @@ ${project.groupId} netconf-util + + ${project.groupId} + netconf-util + test-jar + test + com.google.guava guava @@ -36,6 +42,10 @@ org.slf4j slf4j-api + + org.opendaylight.yangtools + mockito-configuration + diff --git a/opendaylight/netconf/netconf-client/src/main/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiator.java b/opendaylight/netconf/netconf-client/src/main/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiator.java index e2ac49c3ef..cbbee1f655 100644 --- a/opendaylight/netconf/netconf-client/src/main/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiator.java +++ b/opendaylight/netconf/netconf-client/src/main/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiator.java @@ -8,6 +8,7 @@ package org.opendaylight.controller.netconf.client; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import io.netty.channel.Channel; @@ -70,8 +71,8 @@ public class NetconfClientSessionNegotiator extends logger.debug("Netconf session {} should use exi.", session); NetconfStartExiMessage startExiMessage = (NetconfStartExiMessage) sessionPreferences.getStartExiMessage(); tryToInitiateExi(session, startExiMessage); - // Exi is not supported, release session immediately } else { + // Exi is not supported, release session immediately logger.debug("Netconf session {} isn't capable of using exi.", session); negotiationSuccessful(session); } @@ -117,6 +118,7 @@ public class NetconfClientSessionNegotiator extends private long extractSessionId(final Document doc) { final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE); + Preconditions.checkState(sessionIdNode != null, ""); String textContent = sessionIdNode.getTextContent(); if (textContent == null || textContent.equals("")) { throw new IllegalStateException("Session id not received from server"); diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientConfigurationTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientConfigurationTest.java new file mode 100644 index 0000000000..592cdad4c1 --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientConfigurationTest.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import com.google.common.base.Optional; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; +import org.opendaylight.controller.netconf.client.NetconfClientSessionListener; +import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener; +import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration; +import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder; +import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader; +import org.opendaylight.protocol.framework.ReconnectStrategy; + +import java.net.InetSocketAddress; + +public class NetconfClientConfigurationTest { + @Test + public void testNetconfClientConfiguration() throws Exception { + Long timeout = 200L; + NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id"); + NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener(); + InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830); + ReconnectStrategy strategy = Mockito.mock(ReconnectStrategy.class); + AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class); + NetconfClientConfiguration cfg = NetconfClientConfigurationBuilder.create(). + withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH). + withAddress(address). + withConnectionTimeoutMillis(timeout). + withReconnectStrategy(strategy). + withAdditionalHeader(header). + withSessionListener(listener). + withAuthHandler(handler).build(); + + Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis()); + Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader()); + Assert.assertEquals(listener, cfg.getSessionListener()); + Assert.assertEquals(handler, cfg.getAuthHandler()); + Assert.assertEquals(strategy, cfg.getReconnectStrategy()); + Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol()); + Assert.assertEquals(address, cfg.getAddress()); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientDispatcherImplTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientDispatcherImplTest.java new file mode 100644 index 0000000000..5a2ec5656f --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientDispatcherImplTest.java @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelPromise; +import io.netty.channel.EventLoopGroup; +import io.netty.util.HashedWheelTimer; +import io.netty.util.Timer; +import io.netty.util.concurrent.GenericFutureListener; +import org.junit.Test; +import org.mockito.Mockito; +import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl; +import org.opendaylight.controller.netconf.client.NetconfClientSessionListener; +import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener; +import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration; +import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder; +import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration; +import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder; +import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader; +import org.opendaylight.protocol.framework.ReconnectStrategy; +import org.opendaylight.protocol.framework.ReconnectStrategyFactory; + +import java.net.InetSocketAddress; +import java.util.concurrent.Future; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doReturn; + +public class NetconfClientDispatcherImplTest { + @Test + public void testNetconfClientDispatcherImpl() throws Exception { + EventLoopGroup bossGroup = Mockito.mock(EventLoopGroup.class); + EventLoopGroup workerGroup = Mockito.mock(EventLoopGroup.class); + Timer timer = new HashedWheelTimer(); + + ChannelFuture chf = Mockito.mock(ChannelFuture.class); + Channel ch = Mockito.mock(Channel.class); + doReturn(ch).when(chf).channel(); + Throwable thr = Mockito.mock(Throwable.class); + doReturn(chf).when(workerGroup).register(any(Channel.class)); + + ChannelPromise promise = Mockito.mock(ChannelPromise.class); + doReturn(promise).when(chf).addListener(any(GenericFutureListener.class)); + doReturn(thr).when(chf).cause(); + + Long timeout = 200L; + NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id"); + NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener(); + InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830); + ReconnectStrategyFactory reconnectStrategyFactory = Mockito.mock(ReconnectStrategyFactory.class); + AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class); + ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class); + + doReturn(5).when(reconnect).getConnectTimeout(); + doReturn("").when(reconnect).toString(); + doReturn("").when(handler).toString(); + doReturn("").when(reconnectStrategyFactory).toString(); + doReturn(reconnect).when(reconnectStrategyFactory).createReconnectStrategy(); + + NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create(). + withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH). + withAddress(address). + withConnectionTimeoutMillis(timeout). + withReconnectStrategy(reconnect). + withAdditionalHeader(header). + withSessionListener(listener). + withConnectStrategyFactory(reconnectStrategyFactory). + withAuthHandler(handler).build(); + + NetconfReconnectingClientConfiguration cfg2 = NetconfReconnectingClientConfigurationBuilder.create(). + withProtocol(NetconfClientConfiguration.NetconfClientProtocol.TCP). + withAddress(address). + withConnectionTimeoutMillis(timeout). + withReconnectStrategy(reconnect). + withAdditionalHeader(header). + withSessionListener(listener). + withConnectStrategyFactory(reconnectStrategyFactory). + withAuthHandler(handler).build(); + + NetconfClientDispatcherImpl dispatcher = new NetconfClientDispatcherImpl(bossGroup, workerGroup, timer); + Future sshSession = dispatcher.createClient(cfg); + Future tcpSession = dispatcher.createClient(cfg2); + + Future sshReconn = dispatcher.createReconnectingClient(cfg); + Future tcpReconn = dispatcher.createReconnectingClient(cfg2); + + assertNotNull(sshSession); + assertNotNull(tcpSession); + assertNotNull(sshReconn); + assertNotNull(tcpReconn); + + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorFactoryTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorFactoryTest.java new file mode 100644 index 0000000000..0557a0c268 --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorFactoryTest.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import com.google.common.base.Optional; +import io.netty.channel.Channel; +import io.netty.util.HashedWheelTimer; +import io.netty.util.Timer; +import io.netty.util.concurrent.Promise; +import org.apache.sshd.common.SessionListener; +import org.junit.Test; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader; +import org.opendaylight.protocol.framework.SessionListenerFactory; +import org.opendaylight.protocol.framework.SessionNegotiator; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + +public class NetconfClientSessionNegotiatorFactoryTest { + @Test + public void testGetSessionNegotiator() throws Exception { + NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class); + Timer timer = new HashedWheelTimer(); + SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class); + doReturn(sessionListener).when(listenerFactory).getSessionListener(); + + Channel channel = mock(Channel.class); + Promise promise = mock(Promise.class); + NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer, + Optional.absent(), 200L); + + SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise); + assertNotNull(sessionNegotiator); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorTest.java new file mode 100644 index 0000000000..333e9deae4 --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorTest.java @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import com.google.common.base.Optional; +import io.netty.channel.*; +import io.netty.handler.ssl.SslHandler; +import io.netty.util.HashedWheelTimer; +import io.netty.util.concurrent.GenericFutureListener; +import io.netty.util.concurrent.Promise; +import org.apache.mina.handler.demux.ExceptionHandler; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.internal.util.collections.Sets; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences; +import org.opendaylight.controller.netconf.api.NetconfMessage; +import io.netty.util.Timer; +import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder; +import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder; +import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder; +import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader; +import org.opendaylight.controller.netconf.util.test.XmlFileLoader; +import org.opendaylight.controller.netconf.util.xml.XmlUtil; +import org.openexi.proc.common.EXIOptions; +import org.w3c.dom.Document; +import java.util.Set; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.*; + +public class NetconfClientSessionNegotiatorTest { + + private NetconfHelloMessage helloMessage; + private ChannelPipeline pipeline; + private ChannelFuture future; + private Channel channel; + private ChannelInboundHandlerAdapter channelInboundHandlerAdapter; + + @Before + public void setUp() throws Exception { + helloMessage = NetconfHelloMessage.createClientHello(Sets.newSet("exi:1.0"), Optional.absent()); + pipeline = mockChannelPipeline(); + future = mockChannelFuture(); + channel = mockChannel(); + System.out.println("setup done"); + } + + private ChannelHandler mockChannelHandler() { + ChannelHandler handler = mock(ChannelHandler.class); + return handler; + } + + private Channel mockChannel() { + Channel channel = mock(Channel.class); + ChannelHandler channelHandler = mockChannelHandler(); + doReturn("").when(channel).toString(); + doReturn(future).when(channel).close(); + doReturn(future).when(channel).writeAndFlush(anyObject()); + doReturn(true).when(channel).isOpen(); + doReturn(pipeline).when(channel).pipeline(); + doReturn("").when(pipeline).toString(); + doReturn(pipeline).when(pipeline).remove(any(ChannelHandler.class)); + doReturn(channelHandler).when(pipeline).remove(anyString()); + return channel; + } + + private ChannelFuture mockChannelFuture() { + ChannelFuture future = mock(ChannelFuture.class); + doReturn(future).when(future).addListener(any(GenericFutureListener.class)); + return future; + } + + private ChannelPipeline mockChannelPipeline() { + ChannelPipeline pipeline = mock(ChannelPipeline.class); + ChannelHandler handler = mock(ChannelHandler.class); + doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class)); + doReturn(null).when(pipeline).get(SslHandler.class); + doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class)); + doReturn(handler).when(pipeline).replace(anyString(), anyString(), any(ChunkedFramingMechanismEncoder.class)); + + NetconfXMLToHelloMessageDecoder messageDecoder = new NetconfXMLToHelloMessageDecoder(); + doReturn(messageDecoder).when(pipeline).replace(anyString(), anyString(), any(NetconfXMLToMessageDecoder.class)); + doReturn(pipeline).when(pipeline).replace(any(ChannelHandler.class), anyString(), any(NetconfClientSession.class)); + return pipeline; + } + + private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise, + NetconfMessage startExi) { + ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class); + NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi); + doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class)); + + long timeout = 10L; + NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class); + Timer timer = new HashedWheelTimer(); + return new NetconfClientSessionNegotiator(preferences, promise, channel, timer, sessionListener, timeout); + } + + @Test + public void testNetconfClientSessionNegotiator() throws Exception { + Promise promise = mock(Promise.class); + doReturn(promise).when(promise).setSuccess(anyObject()); + NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null); + + negotiator.channelActive(null); + Set caps = Sets.newSet("a", "b"); + NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10); + negotiator.handleMessage(helloServerMessage); + verify(promise).setSuccess(anyObject()); + } + + @Test + public void testNetconfClientSessionNegotiatorWithEXI() throws Exception { + Promise promise = mock(Promise.class); + EXIOptions exiOptions = new EXIOptions(); + NetconfStartExiMessage exiMessage = NetconfStartExiMessage.create(exiOptions, "msg-id"); + doReturn(promise).when(promise).setSuccess(anyObject()); + NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage); + + negotiator.channelActive(null); + Set caps = Sets.newSet("exi:1.0"); + NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocationOnMock) throws Throwable { + channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]); + return null; + } + }).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class)); + + ChannelHandlerContext handlerContext = mock(ChannelHandlerContext.class); + doReturn(pipeline).when(handlerContext).pipeline(); + negotiator.handleMessage(helloMessage); + Document expectedResult = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml"); + channelInboundHandlerAdapter.channelRead(handlerContext, new NetconfMessage(expectedResult)); + + verify(promise).setSuccess(anyObject()); + + // two calls for exiMessage, 2 for hello message + verify(pipeline, times(4)).replace(anyString(), anyString(), any(ChannelHandler.class)); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionTest.java new file mode 100644 index 0000000000..4175190e14 --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionTest.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import com.google.common.collect.Lists; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelPipeline; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.opendaylight.controller.netconf.client.NetconfClientSession; +import org.opendaylight.controller.netconf.client.NetconfClientSessionListener; +import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec; +import org.openexi.proc.common.EXIOptions; + +import java.util.ArrayList; +import java.util.Collection; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; + +public class NetconfClientSessionTest { + + @Mock + ChannelHandler channelHandler; + + @Mock + Channel channel; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + } + + @Test + public void testNetconfClientSession() throws Exception { + NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class); + long sessId = 20L; + Collection caps = Lists.newArrayList("cap1", "cap2"); + + NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions()); + ChannelPipeline pipeline = mock(ChannelPipeline.class); + + Mockito.doReturn(pipeline).when(channel).pipeline(); + Mockito.doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class)); + Mockito.doReturn("").when(channelHandler).toString(); + + NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps); + session.addExiHandlers(codec); + session.stopExiCommunication(); + + assertEquals(caps, session.getServerCapabilities()); + assertEquals(session, session.thisInstance()); + + Mockito.verify(pipeline, Mockito.times(4)).replace(anyString(), anyString(), Mockito.any(ChannelHandler.class)); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfReconnectingClientConfigurationTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfReconnectingClientConfigurationTest.java new file mode 100644 index 0000000000..e79a370ec7 --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfReconnectingClientConfigurationTest.java @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import com.google.common.base.Optional; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; +import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule; +import org.opendaylight.controller.netconf.client.NetconfClientSessionListener; +import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener; +import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration; +import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration; +import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder; +import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader; +import org.opendaylight.protocol.framework.ReconnectStrategy; +import org.opendaylight.protocol.framework.ReconnectStrategyFactory; + +import java.net.InetSocketAddress; + +public class NetconfReconnectingClientConfigurationTest { + @Test + public void testNetconfReconnectingClientConfiguration() throws Exception { + Long timeout = 200L; + NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id"); + NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener(); + InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830); + ReconnectStrategyFactory strategy = Mockito.mock(ReconnectStrategyFactory.class); + AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class); + ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class); + + NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create(). + withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH). + withAddress(address). + withConnectionTimeoutMillis(timeout). + withReconnectStrategy(reconnect). + withAdditionalHeader(header). + withSessionListener(listener). + withConnectStrategyFactory(strategy). + withAuthHandler(handler).build(); + + Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis()); + Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader()); + Assert.assertEquals(listener, cfg.getSessionListener()); + Assert.assertEquals(handler, cfg.getAuthHandler()); + Assert.assertEquals(strategy, cfg.getConnectStrategyFactory()); + Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol()); + Assert.assertEquals(address, cfg.getAddress()); + Assert.assertEquals(reconnect, cfg.getReconnectStrategy()); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SimpleNetconfClientSessionListenerTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SimpleNetconfClientSessionListenerTest.java new file mode 100644 index 0000000000..e067cc225f --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SimpleNetconfClientSessionListenerTest.java @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; +import io.netty.channel.*; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import org.junit.Before; +import org.junit.Test; +import org.mockito.internal.util.collections.Sets; +import org.opendaylight.controller.netconf.api.NetconfMessage; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage; + +import java.util.Set; + +import static org.junit.Assert.*; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.*; + +public class SimpleNetconfClientSessionListenerTest { + + private Channel channel; + private ChannelFuture channelFuture; + Set caps; + private NetconfHelloMessage helloMessage; + private NetconfMessage message; + private NetconfClientSessionListener sessionListener; + private NetconfClientSession clientSession; + + @Before + public void setUp() throws Exception { + channel = mock(Channel.class); + channelFuture = mock(ChannelFuture.class); + doReturn(channelFuture).when(channel).writeAndFlush(anyObject()); + caps = Sets.newSet("a", "b"); + helloMessage = NetconfHelloMessage.createServerHello(caps, 10); + message = new NetconfMessage(helloMessage.getDocument()); + sessionListener = mock(NetconfClientSessionListener.class); + clientSession = new NetconfClientSession(sessionListener, channel, 20L, caps); + } + + @Test + public void testSessionDown() throws Exception { + SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener(); + Future promise = simpleListener.sendRequest(message); + simpleListener.onSessionUp(clientSession); + verify(channel, times(1)).writeAndFlush(anyObject()); + + simpleListener.onSessionDown(clientSession, new Exception()); + assertFalse(promise.isSuccess()); + } + + @Test + public void testSendRequest() throws Exception { + SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener(); + Future promise = simpleListener.sendRequest(message); + simpleListener.onSessionUp(clientSession); + verify(channel, times(1)).writeAndFlush(anyObject()); + + simpleListener.sendRequest(message); + assertFalse(promise.isSuccess()); + } + + @Test + public void testOnMessage() throws Exception { + SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener(); + Future promise = simpleListener.sendRequest(message); + simpleListener.onSessionUp(clientSession); + verify(channel, times(1)).writeAndFlush(anyObject()); + + simpleListener.onMessage(clientSession, message); + assertTrue(promise.isSuccess()); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SshClientChannelInitializerTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SshClientChannelInitializerTest.java new file mode 100644 index 0000000000..0830c2967b --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SshClientChannelInitializerTest.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelPipeline; +import io.netty.util.concurrent.Promise; +import org.junit.Test; +import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler; +import org.opendaylight.protocol.framework.SessionListenerFactory; +import org.opendaylight.protocol.framework.SessionNegotiator; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.*; + +public class SshClientChannelInitializerTest { + @Test + public void test() throws Exception { + + AuthenticationHandler authenticationHandler = mock(AuthenticationHandler.class); + NetconfClientSessionNegotiatorFactory negotiatorFactory = mock(NetconfClientSessionNegotiatorFactory.class); + NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class); + + SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class); + doReturn("").when(sessionNegotiator).toString(); + doReturn(sessionNegotiator).when(negotiatorFactory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class)); + ChannelPipeline pipeline = mock(ChannelPipeline.class); + doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class)); + Channel channel = mock(Channel.class); + doReturn(pipeline).when(channel).pipeline(); + doReturn("").when(channel).toString(); + doReturn(pipeline).when(pipeline).addFirst(any(ChannelHandler.class)); + doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class)); + + Promise promise = mock(Promise.class); + doReturn("").when(promise).toString(); + + SshClientChannelInitializer initializer = new SshClientChannelInitializer(authenticationHandler, negotiatorFactory, + sessionListener); + initializer.initialize(channel, promise); + verify(pipeline, times(1)).addFirst(any(ChannelHandler.class)); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TcpClientChannelInitializerTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TcpClientChannelInitializerTest.java new file mode 100644 index 0000000000..e355cf45e7 --- /dev/null +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TcpClientChannelInitializerTest.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.client; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelPipeline; +import io.netty.util.concurrent.Promise; +import org.junit.Test; +import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer; +import org.opendaylight.protocol.framework.SessionListenerFactory; +import org.opendaylight.protocol.framework.SessionNegotiator; + +import static org.mockito.Mockito.*; + +public class TcpClientChannelInitializerTest { + @Test + public void testInitializeSessionNegotiator() throws Exception { + NetconfClientSessionNegotiatorFactory factory = mock(NetconfClientSessionNegotiatorFactory.class); + SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class); + doReturn("").when(sessionNegotiator).toString(); + doReturn(sessionNegotiator).when(factory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class)); + NetconfClientSessionListener listener = mock(NetconfClientSessionListener.class); + TcpClientChannelInitializer initializer = new TcpClientChannelInitializer(factory, listener); + ChannelPipeline pipeline = mock(ChannelPipeline.class); + doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class)); + Channel channel = mock(Channel.class); + doReturn(pipeline).when(channel).pipeline(); + doReturn("").when(channel).toString(); + + Promise promise = mock(Promise.class); + doReturn("").when(promise).toString(); + + initializer.initializeSessionNegotiator(channel, promise); + verify(pipeline, times(1)).addAfter(anyString(), anyString(), any(ChannelHandler.class)); + } +} diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/test/TestingNetconfClient.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TestingNetconfClient.java similarity index 92% rename from opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/test/TestingNetconfClient.java rename to opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TestingNetconfClient.java index 18ed18e4ae..d7209d9295 100644 --- a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/test/TestingNetconfClient.java +++ b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TestingNetconfClient.java @@ -6,7 +6,7 @@ * and is available at http://www.eclipse.org/legal/epl-v10.html */ -package org.opendaylight.controller.netconf.client.test; +package org.opendaylight.controller.netconf.client; import com.google.common.base.Optional; import com.google.common.base.Preconditions; @@ -26,11 +26,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.opendaylight.controller.netconf.api.NetconfMessage; -import org.opendaylight.controller.netconf.client.NetconfClientDispatcher; -import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl; -import org.opendaylight.controller.netconf.client.NetconfClientSession; -import org.opendaylight.controller.netconf.client.NetconfClientSessionListener; -import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration.NetconfClientProtocol; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder; diff --git a/opendaylight/netconf/netconf-impl/src/test/java/org/opendaylight/controller/netconf/impl/ConcurrentClientsTest.java b/opendaylight/netconf/netconf-impl/src/test/java/org/opendaylight/controller/netconf/impl/ConcurrentClientsTest.java index c5281d01f8..5f8bc06e10 100644 --- a/opendaylight/netconf/netconf-impl/src/test/java/org/opendaylight/controller/netconf/impl/ConcurrentClientsTest.java +++ b/opendaylight/netconf/netconf-impl/src/test/java/org/opendaylight/controller/netconf/impl/ConcurrentClientsTest.java @@ -59,7 +59,7 @@ import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl; import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder; -import org.opendaylight.controller.netconf.client.test.TestingNetconfClient; +import org.opendaylight.controller.netconf.client.TestingNetconfClient; import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl; import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService; import org.opendaylight.controller.netconf.mapping.api.Capability; diff --git a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfConfigPersisterITTest.java b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfConfigPersisterITTest.java index 4b49c0928b..d8eb841a79 100644 --- a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfConfigPersisterITTest.java +++ b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfConfigPersisterITTest.java @@ -36,7 +36,7 @@ import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder; import org.opendaylight.controller.config.persist.api.Persister; import org.opendaylight.controller.netconf.api.NetconfMessage; import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification; -import org.opendaylight.controller.netconf.client.test.TestingNetconfClient; +import org.opendaylight.controller.netconf.client.TestingNetconfClient; import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer; import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl; import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl; diff --git a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITMonitoringTest.java b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITMonitoringTest.java index 72a2f8f7ac..a9558c06cd 100644 --- a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITMonitoringTest.java +++ b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITMonitoringTest.java @@ -28,7 +28,7 @@ import java.util.Set; import org.junit.Test; import org.opendaylight.controller.netconf.api.NetconfMessage; import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession; -import org.opendaylight.controller.netconf.client.test.TestingNetconfClient; +import org.opendaylight.controller.netconf.client.TestingNetconfClient; import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl; import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl; import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService; diff --git a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITSecureTest.java b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITSecureTest.java index 67ccf0c02c..4fe5f2a950 100644 --- a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITSecureTest.java +++ b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITSecureTest.java @@ -34,7 +34,7 @@ import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl; import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration; import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder; -import org.opendaylight.controller.netconf.client.test.TestingNetconfClient; +import org.opendaylight.controller.netconf.client.TestingNetconfClient; import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler; import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword; import org.opendaylight.controller.netconf.ssh.NetconfSSHServer; diff --git a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITTest.java b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITTest.java index a7a9d7494a..4c0730863f 100644 --- a/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITTest.java +++ b/opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITTest.java @@ -41,7 +41,7 @@ import org.opendaylight.controller.config.yang.test.impl.NetconfTestImplModuleMX import org.opendaylight.controller.netconf.api.NetconfDocumentedException; import org.opendaylight.controller.netconf.api.NetconfMessage; import org.opendaylight.controller.netconf.client.NetconfClientDispatcher; -import org.opendaylight.controller.netconf.client.test.TestingNetconfClient; +import org.opendaylight.controller.netconf.client.TestingNetconfClient; import org.opendaylight.controller.netconf.util.test.XmlFileLoader; import org.opendaylight.controller.netconf.util.xml.XmlElement; import org.opendaylight.controller.netconf.util.xml.XmlUtil; diff --git a/opendaylight/netconf/netconf-monitoring/src/test/java/org/opendaylight/controller/netconf/monitoring/xml/JaxBSerializerTest.java b/opendaylight/netconf/netconf-monitoring/src/test/java/org/opendaylight/controller/netconf/monitoring/xml/JaxBSerializerTest.java index d0d587fb84..08441b4ce5 100644 --- a/opendaylight/netconf/netconf-monitoring/src/test/java/org/opendaylight/controller/netconf/monitoring/xml/JaxBSerializerTest.java +++ b/opendaylight/netconf/netconf-monitoring/src/test/java/org/opendaylight/controller/netconf/monitoring/xml/JaxBSerializerTest.java @@ -38,7 +38,7 @@ import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types. public class JaxBSerializerTest { @Test - public void testName() throws Exception { + public void testSerialization() throws Exception { final NetconfMonitoringService service = new NetconfMonitoringService() { @@ -53,29 +53,29 @@ public class JaxBSerializerTest { } }; final NetconfState model = new NetconfState(service); - final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)); + final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)).replaceAll("\\s", ""); assertThat(xml, CoreMatchers.containsString( - "\n" + - "yang\n" + - "id\n" + - "NETCONF\n" + - "localhost\n" + - "v1\n" + - "\n")); + "" + + "yang" + + "id" + + "NETCONF" + + "localhost" + + "v1" + + "")); assertThat(xml, CoreMatchers.containsString( - "\n" + - "1\n" + - "0\n" + - "0\n" + - "loginTime\n" + - "0\n" + - "0\n" + - "client\n" + - "address/port\n" + - "ncme:netconf-tcp\n" + - "username\n" + + "" + + "1" + + "0" + + "0" + + "loginTime" + + "0" + + "0" + + "client" + + "address/port" + + "ncme:netconf-tcp" + + "username" + "")); } diff --git a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParameters.java b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParameters.java index 993709258a..531ba3ccb7 100644 --- a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParameters.java +++ b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParameters.java @@ -7,18 +7,21 @@ */ package org.opendaylight.controller.netconf.nettyutil.handler.exi; -import com.google.common.base.Preconditions; import org.opendaylight.controller.netconf.util.xml.XmlElement; import org.openexi.proc.common.AlignmentType; import org.openexi.proc.common.EXIOptions; import org.openexi.proc.common.EXIOptionsException; +import org.w3c.dom.Element; +import org.w3c.dom.NodeList; + +import com.google.common.base.Preconditions; public final class EXIParameters { private static final String EXI_PARAMETER_ALIGNMENT = "alignment"; - private static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned"; - private static final String EXI_PARAMETER_BIT_PACKED = "bit-packed"; - private static final String EXI_PARAMETER_COMPRESSED = "compressed"; - private static final String EXI_PARAMETER_PRE_COMPRESSION = "pre-compression"; + static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned"; + static final String EXI_PARAMETER_BIT_PACKED = "bit-packed"; + static final String EXI_PARAMETER_COMPRESSED = "compressed"; + static final String EXI_PARAMETER_PRE_COMPRESSION = "pre-compression"; private static final String EXI_PARAMETER_FIDELITY = "fidelity"; private static final String EXI_FIDELITY_DTD = "dtd"; @@ -38,15 +41,25 @@ public final class EXIParameters { final EXIOptions options = new EXIOptions(); options.setAlignmentType(AlignmentType.bitPacked); - if (root.getElementsByTagName(EXI_PARAMETER_ALIGNMENT).getLength() > 0) { - if (root.getElementsByTagName(EXI_PARAMETER_BIT_PACKED).getLength() > 0) { - options.setAlignmentType(AlignmentType.bitPacked); - } else if (root.getElementsByTagName(EXI_PARAMETER_BYTE_ALIGNED).getLength() > 0) { - options.setAlignmentType(AlignmentType.byteAligned); - } else if (root.getElementsByTagName(EXI_PARAMETER_COMPRESSED).getLength() > 0) { - options.setAlignmentType(AlignmentType.compress); - } else if (root.getElementsByTagName(EXI_PARAMETER_PRE_COMPRESSION).getLength() > 0) { - options.setAlignmentType(AlignmentType.preCompress); + + final NodeList alignmentElements = root.getElementsByTagName(EXI_PARAMETER_ALIGNMENT); + if (alignmentElements.getLength() > 0) { + final Element alignmentElement = (Element) alignmentElements.item(0); + final String alignmentTextContent = alignmentElement.getTextContent().trim(); + + switch (alignmentTextContent) { + case EXI_PARAMETER_BIT_PACKED: + options.setAlignmentType(AlignmentType.bitPacked); + break; + case EXI_PARAMETER_BYTE_ALIGNED: + options.setAlignmentType(AlignmentType.byteAligned); + break; + case EXI_PARAMETER_COMPRESSED: + options.setAlignmentType(AlignmentType.compress); + break; + case EXI_PARAMETER_PRE_COMPRESSION: + options.setAlignmentType(AlignmentType.preCompress); + break; } } diff --git a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessage.java b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessage.java index 72eb774b53..1d301d3d35 100644 --- a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessage.java +++ b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessage.java @@ -8,8 +8,8 @@ package org.opendaylight.controller.netconf.nettyutil.handler.exi; +import com.google.common.collect.Lists; import java.util.List; - import org.opendaylight.controller.netconf.api.NetconfMessage; import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants; import org.opendaylight.controller.netconf.util.xml.XmlUtil; @@ -17,8 +17,6 @@ import org.openexi.proc.common.EXIOptions; import org.w3c.dom.Document; import org.w3c.dom.Element; -import com.google.common.collect.Lists; - /** * Start-exi netconf message. */ @@ -33,19 +31,19 @@ public final class NetconfStartExiMessage extends NetconfMessage { public static final String PIS_KEY = "pis"; public static final String PREFIXES_KEY = "prefixes"; - private NetconfStartExiMessage(Document doc) { + private NetconfStartExiMessage(final Document doc) { super(doc); } - public static NetconfStartExiMessage create(EXIOptions exiOptions, String messageId) { - Document doc = XmlUtil.newDocument(); - Element rpcElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, + public static NetconfStartExiMessage create(final EXIOptions exiOptions, final String messageId) { + final Document doc = XmlUtil.newDocument(); + final Element rpcElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, XmlNetconfConstants.RPC_KEY); rpcElement.setAttributeNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, XmlNetconfConstants.MESSAGE_ID, messageId); // TODO draft http://tools.ietf.org/html/draft-varga-netconf-exi-capability-02#section-3.5.1 has no namespace for start-exi element in xml - Element startExiElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, + final Element startExiElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, START_EXI); addAlignment(exiOptions, doc, startExiElement); @@ -57,8 +55,8 @@ public final class NetconfStartExiMessage extends NetconfMessage { return new NetconfStartExiMessage(doc); } - private static void addFidelity(EXIOptions exiOptions, Document doc, Element startExiElement) { - List fidelityElements = Lists.newArrayList(); + private static void addFidelity(final EXIOptions exiOptions, final Document doc, final Element startExiElement) { + final List fidelityElements = Lists.newArrayList(); createFidelityElement(doc, fidelityElements, exiOptions.getPreserveComments(), COMMENTS_KEY); createFidelityElement(doc, fidelityElements, exiOptions.getPreserveDTD(), DTD_KEY); createFidelityElement(doc, fidelityElements, exiOptions.getPreserveLexicalValues(), LEXICAL_VALUES_KEY); @@ -66,23 +64,44 @@ public final class NetconfStartExiMessage extends NetconfMessage { createFidelityElement(doc, fidelityElements, exiOptions.getPreserveNS(), PREFIXES_KEY); if (fidelityElements.isEmpty() == false) { - Element fidelityElement = doc.createElementNS( + final Element fidelityElement = doc.createElementNS( XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, FIDELITY_KEY); - for (Element element : fidelityElements) { + for (final Element element : fidelityElements) { fidelityElement.appendChild(element); } startExiElement.appendChild(fidelityElement); } } - private static void addAlignment(EXIOptions exiOptions, Document doc, Element startExiElement) { - Element alignmentElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, + private static void addAlignment(final EXIOptions exiOptions, final Document doc, final Element startExiElement) { + final Element alignmentElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, ALIGNMENT_KEY); - alignmentElement.setTextContent(exiOptions.getAlignmentType().toString()); + + String alignmentString = EXIParameters.EXI_PARAMETER_BIT_PACKED; + switch (exiOptions.getAlignmentType()) { + case byteAligned: { + alignmentString = EXIParameters.EXI_PARAMETER_BYTE_ALIGNED; + break; + } + case bitPacked: { + alignmentString = EXIParameters.EXI_PARAMETER_BIT_PACKED; + break; + } + case compress: { + alignmentString = EXIParameters.EXI_PARAMETER_COMPRESSED; + break; + } + case preCompress: { + alignmentString = EXIParameters.EXI_PARAMETER_PRE_COMPRESSION; + break; + } + } + + alignmentElement.setTextContent(alignmentString); startExiElement.appendChild(alignmentElement); } - private static void createFidelityElement(Document doc, List fidelityElements, boolean fidelity, String fidelityName) { + private static void createFidelityElement(final Document doc, final List fidelityElements, final boolean fidelity, final String fidelityName) { if (fidelity) { fidelityElements.add(doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, diff --git a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java index 0d877c9ec7..369c013832 100644 --- a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java +++ b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java @@ -148,9 +148,11 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter { connectPromise = null; sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut()); - sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn()); - - ctx.fireChannelActive(); + // if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null + if(channel != null) { + sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn()); + ctx.fireChannelActive(); + } } private synchronized void handleSshSetupFailure(final ChannelHandlerContext ctx, final Throwable e) { @@ -230,17 +232,14 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter { @Override public synchronized void operationComplete(final IoReadFuture future) { if(future.getException() != null) { - if(asyncOut.isClosed() || asyncOut.isClosing()) { - // Ssh dropped logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException()); - invokeDisconnect(); - return; } else { logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException()); - invokeDisconnect(); } + invokeDisconnect(); + return; } if (future.getRead() > 0) { @@ -324,6 +323,7 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter { // Check limit for pending writes pendingWriteCounter++; if(pendingWriteCounter > MAX_PENDING_WRITES) { + promise.setFailure(e); handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() + ", remote window is not getting read or is too small")); } @@ -331,6 +331,7 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter { logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter); // In case of pending, re-invoke write after pending is finished + Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e); lastWriteFuture.addListener(new SshFutureListener() { @Override public void operationComplete(final IoWriteFuture future) { diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractChannelInitializerTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractChannelInitializerTest.java new file mode 100644 index 0000000000..83eafb5299 --- /dev/null +++ b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractChannelInitializerTest.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.nettyutil; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelPipeline; +import io.netty.util.concurrent.Promise; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.opendaylight.controller.netconf.api.NetconfSession; + +public class AbstractChannelInitializerTest { + + @Mock + private Channel channel; + @Mock + private ChannelPipeline pipeline; + @Mock + private Promise sessionPromise; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + doReturn(pipeline).when(channel).pipeline(); + doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class)); + } + + @Test + public void testInit() throws Exception { + final TestingInitializer testingInitializer = new TestingInitializer(); + testingInitializer.initialize(channel, sessionPromise); + verify(pipeline, times(4)).addLast(anyString(), any(ChannelHandler.class)); + } + + private static final class TestingInitializer extends AbstractChannelInitializer { + + @Override + protected void initializeSessionNegotiator(final Channel ch, final Promise promise) { + } + } + +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractNetconfSessionTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractNetconfSessionTest.java new file mode 100644 index 0000000000..8199963c81 --- /dev/null +++ b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractNetconfSessionTest.java @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.nettyutil; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; + +import com.google.common.base.Optional; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelPipeline; +import java.util.Collections; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.opendaylight.controller.netconf.api.NetconfMessage; +import org.opendaylight.controller.netconf.api.NetconfSession; +import org.opendaylight.controller.netconf.api.NetconfSessionListener; +import org.opendaylight.controller.netconf.api.NetconfTerminationReason; +import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec; +import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage; +import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader; +import org.openexi.proc.common.EXIOptions; + +public class AbstractNetconfSessionTest { + + @Mock + private NetconfSessionListener listener; + @Mock + private Channel channel; + @Mock + private ChannelPipeline pipeline; + private NetconfHelloMessage clientHello; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + doNothing().when(listener).onMessage(any(NetconfSession.class), any(NetconfMessage.class)); + doNothing().when(listener).onSessionUp(any(NetconfSession.class)); + doNothing().when(listener).onSessionDown(any(NetconfSession.class), any(Exception.class)); + doNothing().when(listener).onSessionTerminated(any(NetconfSession.class), any(NetconfTerminationReason.class)); + + doReturn(mock(ChannelFuture.class)).when(channel).writeAndFlush(any(NetconfMessage.class)); + doReturn(pipeline).when(channel).pipeline(); + doReturn(mock(ChannelFuture.class)).when(channel).close(); + + doReturn(null).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class)); + + clientHello = NetconfHelloMessage.createClientHello(Collections.emptySet(), Optional.absent()); + } + + @Test + public void testHandleMessage() throws Exception { + final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + testingNetconfSession.handleMessage(clientHello); + verify(listener).onMessage(testingNetconfSession, clientHello); + } + + @Test + public void testSessionUp() throws Exception { + final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + testingNetconfSession.sessionUp(); + verify(listener).onSessionUp(testingNetconfSession); + assertEquals(1L, testingNetconfSession.getSessionId()); + } + + @Test + public void testClose() throws Exception { + final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + testingNetconfSession.sessionUp(); + testingNetconfSession.close(); + verify(channel).close(); + verify(listener).onSessionTerminated(any(NetconfSession.class), any(NetconfTerminationReason.class)); + } + + @Test + public void testReplaceHandlers() throws Exception { + final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + final ChannelHandler mock = mock(ChannelHandler.class); + doReturn("handler").when(mock).toString(); + + testingNetconfSession.replaceMessageDecoder(mock); + verify(pipeline).replace(AbstractChannelInitializer.NETCONF_MESSAGE_DECODER, AbstractChannelInitializer.NETCONF_MESSAGE_DECODER, mock); + testingNetconfSession.replaceMessageEncoder(mock); + verify(pipeline).replace(AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, mock); + testingNetconfSession.replaceMessageEncoderAfterNextMessage(mock); + verifyNoMoreInteractions(pipeline); + + testingNetconfSession.sendMessage(clientHello); + verify(pipeline, times(2)).replace(AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, mock); + } + + @Test + public void testStartExi() throws Exception { + TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + testingNetconfSession = spy(testingNetconfSession); + + testingNetconfSession.startExiCommunication(NetconfStartExiMessage.create(new EXIOptions(), "4")); + verify(testingNetconfSession).addExiHandlers(any(NetconfEXICodec.class)); + } + + @Test + public void testEndOfInput() throws Exception { + final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + testingNetconfSession.endOfInput(); + verifyZeroInteractions(listener); + testingNetconfSession.sessionUp(); + testingNetconfSession.endOfInput(); + verify(listener).onSessionDown(any(NetconfSession.class), any(Exception.class)); + } + + @Test + public void testSendMessage() throws Exception { + final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L); + final NetconfHelloMessage clientHello = NetconfHelloMessage.createClientHello(Collections.emptySet(), Optional.absent()); + testingNetconfSession.sendMessage(clientHello); + verify(channel).writeAndFlush(clientHello); + } + + private static class TestingNetconfSession extends AbstractNetconfSession> { + + protected TestingNetconfSession(final NetconfSessionListener sessionListener, final Channel channel, final long sessionId) { + super(sessionListener, channel, sessionId); + } + + @Override + protected NetconfSession thisInstance() { + return this; + } + + @Override + protected void addExiHandlers(final NetconfEXICodec exiCodec) {} + + @Override + public void stopExiCommunication() {} + } +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfEXIHandlersTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfEXIHandlersTest.java new file mode 100644 index 0000000000..4a8db176fe --- /dev/null +++ b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfEXIHandlersTest.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.nettyutil.handler; + +import static org.junit.Assert.*; + +import com.google.common.collect.Lists; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.custommonkey.xmlunit.XMLUnit; +import org.junit.Before; +import org.junit.Test; +import org.opendaylight.controller.netconf.api.NetconfMessage; +import org.opendaylight.controller.netconf.util.xml.XmlUtil; +import org.openexi.proc.common.EXIOptions; +import org.openexi.proc.common.EXIOptionsException; +import org.openexi.sax.Transmogrifier; +import org.openexi.sax.TransmogrifierException; +import org.xml.sax.InputSource; + +public class NetconfEXIHandlersTest { + + private final String msgAsString = ""; + private NetconfMessageToEXIEncoder netconfMessageToEXIEncoder; + private NetconfEXIToMessageDecoder netconfEXIToMessageDecoder; + private NetconfMessage msg; + private byte[] msgAsExi; + + @Before + public void setUp() throws Exception { + final NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions()); + netconfMessageToEXIEncoder = new NetconfMessageToEXIEncoder(codec); + netconfEXIToMessageDecoder = new NetconfEXIToMessageDecoder(codec); + + msg = new NetconfMessage(XmlUtil.readXmlToDocument(msgAsString)); + this.msgAsExi = msgToExi(msgAsString, codec); + } + + private byte[] msgToExi(final String msgAsString, final NetconfEXICodec codec) throws EXIOptionsException, TransmogrifierException, IOException { + final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + final Transmogrifier transmogrifier = codec.getTransmogrifier(); + transmogrifier.setOutputStream(byteArrayOutputStream); + transmogrifier.encode(new InputSource(new ByteArrayInputStream(msgAsString.getBytes()))); + return byteArrayOutputStream.toByteArray(); + } + + @Test + public void testEncodeDecode() throws Exception { + final ByteBuf buffer = Unpooled.buffer(); + netconfMessageToEXIEncoder.encode(null, msg, buffer); + final int exiLength = msgAsExi.length; + // array from buffer is cca 256 n length, compare only subarray + assertArrayEquals(msgAsExi, Arrays.copyOfRange(buffer.array(), 0, exiLength)); + + // assert all other bytes in buffer be 0 + for (int i = exiLength; i < buffer.array().length; i++) { + assertEquals((byte)0, buffer.array()[i]); + } + + final List out = Lists.newArrayList(); + netconfEXIToMessageDecoder.decode(null, buffer, out); + + XMLUnit.compareXML(msg.getDocument(), ((NetconfMessage) out.get(0)).getDocument()); + } +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfXMLToHelloMessageDecoderTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfXMLToHelloMessageDecoderTest.java index f0c0d6341b..ac6370685a 100644 --- a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfXMLToHelloMessageDecoderTest.java +++ b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfXMLToHelloMessageDecoderTest.java @@ -36,7 +36,7 @@ public class NetconfXMLToHelloMessageDecoderTest { assertThat(out.get(0), CoreMatchers.instanceOf(NetconfHelloMessage.class)); final NetconfHelloMessage hello = (NetconfHelloMessage) out.get(0); assertTrue(hello.getAdditionalHeader().isPresent()); - assertEquals("[tomas;10.0.0.0:10000;tcp;client;]\n", hello.getAdditionalHeader().get().toFormattedString()); + assertEquals("[tomas;10.0.0.0:10000;tcp;client;]" + System.lineSeparator(), hello.getAdditionalHeader().get().toFormattedString()); assertThat(XmlUtil.toString(hello.getDocument()), CoreMatchers.containsString(" data() throws Exception { + final String noChangeXml = + "\n" + + "bit-packed\n" + + "\n"; + + + final String fullOptionsXml = + "\n" + + "byte-aligned\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n"; + + final EXIOptions fullOptions = new EXIOptions(); + fullOptions.setAlignmentType(AlignmentType.byteAligned); + fullOptions.setPreserveLexicalValues(true); + fullOptions.setPreserveDTD(true); + fullOptions.setPreserveComments(true); + fullOptions.setPreserveNS(true); + fullOptions.setPreservePIs(true); + + return Arrays.asList(new Object[][]{ + {noChangeXml, new EXIOptions()}, + {fullOptionsXml, fullOptions}, + }); + } + + private final String sourceXml; + private final EXIOptions exiOptions; + + public EXIParametersTest(final String sourceXml, final EXIOptions exiOptions) { + this.sourceXml = sourceXml; + this.exiOptions = exiOptions; + } + + @Test + public void testFromXmlElement() throws Exception { + final EXIParameters opts = + EXIParameters.fromXmlElement( + XmlElement.fromDomElement( + XmlUtil.readXmlToElement(sourceXml))); + + + assertEquals(opts.getOptions().getAlignmentType(), exiOptions.getAlignmentType()); + assertEquals(opts.getOptions().getPreserveComments(), exiOptions.getPreserveComments()); + assertEquals(opts.getOptions().getPreserveLexicalValues(), exiOptions.getPreserveLexicalValues()); + assertEquals(opts.getOptions().getPreserveNS(), exiOptions.getPreserveNS()); + assertEquals(opts.getOptions().getPreserveDTD(), exiOptions.getPreserveDTD()); + assertEquals(opts.getOptions().getPreserveNS(), exiOptions.getPreserveNS()); + } +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessageTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessageTest.java new file mode 100644 index 0000000000..47abe96687 --- /dev/null +++ b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessageTest.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.nettyutil.handler.exi; + +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import org.custommonkey.xmlunit.Diff; +import org.custommonkey.xmlunit.XMLUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.openexi.proc.common.AlignmentType; +import org.openexi.proc.common.EXIOptions; + +@RunWith(Parameterized.class) +public class NetconfStartExiMessageTest { + + @Parameterized.Parameters + public static Iterable data() throws Exception { + final String noChangeXml = "\n" + + "\n" + + "bit-packed\n" + + "\n" + + ""; + + + final String fullOptionsXml = "\n" + + "\n" + + "byte-aligned\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + ""; + + final EXIOptions fullOptions = new EXIOptions(); + fullOptions.setAlignmentType(AlignmentType.byteAligned); + fullOptions.setPreserveLexicalValues(true); + fullOptions.setPreserveDTD(true); + fullOptions.setPreserveComments(true); + fullOptions.setPreserveNS(true); + fullOptions.setPreservePIs(true); + + return Arrays.asList(new Object[][]{ + {noChangeXml, new EXIOptions()}, + {fullOptionsXml, fullOptions}, + }); + } + + private final String controlXml; + private final EXIOptions exiOptions; + + public NetconfStartExiMessageTest(final String controlXml, final EXIOptions exiOptions) { + this.controlXml = controlXml; + this.exiOptions = exiOptions; + } + + @Test + public void testCreate() throws Exception { + final NetconfStartExiMessage startExiMessage = NetconfStartExiMessage.create(exiOptions, "id"); + + XMLUnit.setIgnoreWhitespace(true); + XMLUnit.setIgnoreAttributeOrder(true); + final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(controlXml), startExiMessage.getDocument()); + assertTrue(diff.toString(), diff.similar()); + } +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java new file mode 100644 index 0000000000..223f2c7f94 --- /dev/null +++ b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java @@ -0,0 +1,625 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client; + +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import java.io.IOException; +import java.net.SocketAddress; + +import java.nio.channels.WritePendingException; +import org.apache.sshd.ClientChannel; +import org.apache.sshd.ClientSession; +import org.apache.sshd.SshClient; +import org.apache.sshd.client.channel.ChannelSubsystem; +import org.apache.sshd.client.future.AuthFuture; +import org.apache.sshd.client.future.ConnectFuture; +import org.apache.sshd.client.future.OpenFuture; +import org.apache.sshd.common.future.CloseFuture; +import org.apache.sshd.common.future.SshFuture; +import org.apache.sshd.common.future.SshFutureListener; +import org.apache.sshd.common.io.IoInputStream; +import org.apache.sshd.common.io.IoOutputStream; +import org.apache.sshd.common.io.IoReadFuture; +import org.apache.sshd.common.io.IoWriteFuture; +import org.apache.sshd.common.util.Buffer; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Matchers; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler; + +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; + +public class AsyncSshHandlerTest { + + @Mock + private SshClient sshClient; + @Mock + private AuthenticationHandler authHandler; + @Mock + private ChannelHandlerContext ctx; + @Mock + private Channel channel; + @Mock + private SocketAddress remoteAddress; + @Mock + private SocketAddress localAddress; + + private AsyncSshHandler asyncSshHandler; + + private SshFutureListener sshConnectListener; + private SshFutureListener sshAuthListener; + private SshFutureListener sshChannelOpenListener; + + private ChannelPromise promise; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + stubAuth(); + stubSshClient(); + stubChannel(); + stubCtx(); + stubRemoteAddress(); + + promise = getMockedPromise(); + + asyncSshHandler = new AsyncSshHandler(authHandler, sshClient); + } + + @After + public void tearDown() throws Exception { + sshConnectListener = null; + sshAuthListener = null; + sshChannelOpenListener = null; + promise = null; + asyncSshHandler.close(ctx, getMockedPromise()); + } + + private void stubAuth() throws IOException { + doReturn("usr").when(authHandler).getUsername(); + + final AuthFuture authFuture = mock(AuthFuture.class); + Futures.addCallback(stubAddListener(authFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + sshAuthListener = result; + } + }); + doReturn(authFuture).when(authHandler).authenticate(any(ClientSession.class)); + } + + @SuppressWarnings("unchecked") + private > ListenableFuture> stubAddListener(final T future) { + final SettableFuture> listenerSettableFuture = SettableFuture.create(); + + doAnswer(new Answer() { + @Override + public Object answer(final InvocationOnMock invocation) throws Throwable { + listenerSettableFuture.set((SshFutureListener) invocation.getArguments()[0]); + return null; + } + }).when(future).addListener(any(SshFutureListener.class)); + + return listenerSettableFuture; + } + + private void stubRemoteAddress() { + doReturn("remote").when(remoteAddress).toString(); + } + + private void stubCtx() { + doReturn(channel).when(ctx).channel(); + doReturn(ctx).when(ctx).fireChannelActive(); + doReturn(ctx).when(ctx).fireChannelInactive(); + doReturn(ctx).when(ctx).fireChannelRead(anyObject()); + doReturn(getMockedPromise()).when(ctx).newPromise(); + } + + private void stubChannel() { + doReturn("channel").when(channel).toString(); + } + + private void stubSshClient() { + doNothing().when(sshClient).start(); + final ConnectFuture connectFuture = mock(ConnectFuture.class); + Futures.addCallback(stubAddListener(connectFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + sshConnectListener = result; + } + }); + doReturn(connectFuture).when(sshClient).connect("usr", remoteAddress); + } + + @Test + public void testConnectSuccess() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async); + + verify(promise).setSuccess(); + verifyNoMoreInteractions(promise); + verify(ctx).fireChannelActive(); + } + + @Test + public void testRead() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + verify(ctx).fireChannelRead(any(ByteBuf.class)); + } + + @Test + public void testReadClosed() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoReadFuture mockedReadFuture = asyncOut.read(null); + + Futures.addCallback(stubAddListener(mockedReadFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + doReturn(new IllegalStateException()).when(mockedReadFuture).getException(); + doReturn(mockedReadFuture).when(mockedReadFuture).removeListener(Matchers.>any()); + doReturn(true).when(asyncOut).isClosing(); + doReturn(true).when(asyncOut).isClosed(); + result.operationComplete(mockedReadFuture); + } + }); + + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + verify(ctx).fireChannelInactive(); + } + + @Test + public void testReadFail() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoReadFuture mockedReadFuture = asyncOut.read(null); + + Futures.addCallback(stubAddListener(mockedReadFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + doReturn(new IllegalStateException()).when(mockedReadFuture).getException(); + doReturn(mockedReadFuture).when(mockedReadFuture).removeListener(Matchers.>any()); + result.operationComplete(mockedReadFuture); + } + }); + + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + verify(ctx).fireChannelInactive(); + } + + @Test + public void testWrite() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + final ChannelPromise writePromise = getMockedPromise(); + asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), writePromise); + + verify(writePromise).setSuccess(); + } + + @Test + public void testWriteClosed() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + + final IoWriteFuture ioWriteFuture = asyncIn.write(null); + + Futures.addCallback(stubAddListener(ioWriteFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + doReturn(false).when(ioWriteFuture).isWritten(); + doReturn(new IllegalStateException()).when(ioWriteFuture).getException(); + doReturn(true).when(asyncIn).isClosing(); + doReturn(true).when(asyncIn).isClosed(); + result.operationComplete(ioWriteFuture); + } + }); + + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + final ChannelPromise writePromise = getMockedPromise(); + asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), writePromise); + + verify(writePromise).setFailure(any(Throwable.class)); + } + + @Test + public void testWritePendingOne() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final IoWriteFuture ioWriteFuture = asyncIn.write(null); + + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + final ChannelPromise firstWritePromise = getMockedPromise(); + + // intercept listener for first write, so we can invoke successful write later thus simulate pending of the first write + final ListenableFuture> firstWriteListenerFuture = stubAddListener(ioWriteFuture); + asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), firstWritePromise); + final SshFutureListener firstWriteListener = firstWriteListenerFuture.get(); + // intercept second listener, this is the listener for pending write for the pending write to know when pending state ended + final ListenableFuture> pendingListener = stubAddListener(ioWriteFuture); + + final ChannelPromise secondWritePromise = getMockedPromise(); + // now make write throw pending exception + doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class)); + asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise); + + doReturn(ioWriteFuture).when(asyncIn).write(any(Buffer.class)); + + verifyZeroInteractions(firstWritePromise, secondWritePromise); + + // make first write stop pending + firstWriteListener.operationComplete(ioWriteFuture); + // intercept third listener, this is regular listener for second write to determine success or failure + final ListenableFuture> afterPendingListener = stubAddListener(ioWriteFuture); + + // notify listener for second write that pending has ended + pendingListener.get().operationComplete(ioWriteFuture); + // Notify third listener (regular listener for second write) that second write succeeded + afterPendingListener.get().operationComplete(ioWriteFuture); + + // verify both write promises successful + verify(firstWritePromise).setSuccess(); + verify(secondWritePromise).setSuccess(); + } + + @Test + public void testWritePendingMax() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final IoWriteFuture ioWriteFuture = asyncIn.write(null); + + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + final ChannelPromise firstWritePromise = getMockedPromise(); + + // intercept listener for first write, so we can invoke successful write later thus simulate pending of the first write + final ListenableFuture> firstWriteListenerFuture = stubAddListener(ioWriteFuture); + asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), firstWritePromise); + + final ChannelPromise secondWritePromise = getMockedPromise(); + // now make write throw pending exception + doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class)); + for (int i = 0; i < 1000; i++) { + asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise); + } + + verify(ctx).fireChannelInactive(); + } + + @Test + public void testDisconnect() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + sshAuthListener.operationComplete(getSuccessAuthFuture()); + sshChannelOpenListener.operationComplete(getSuccessOpenFuture()); + + final ChannelPromise disconnectPromise = getMockedPromise(); + asyncSshHandler.disconnect(ctx, disconnectPromise); + + verify(sshSession).close(anyBoolean()); + verify(disconnectPromise).setSuccess(); + verify(ctx).fireChannelInactive(); + } + + private OpenFuture getSuccessOpenFuture() { + final OpenFuture failedOpenFuture = mock(OpenFuture.class); + doReturn(true).when(failedOpenFuture).isOpened(); + return failedOpenFuture; + } + + private AuthFuture getSuccessAuthFuture() { + final AuthFuture authFuture = mock(AuthFuture.class); + doReturn(true).when(authFuture).isSuccess(); + return authFuture; + } + + private ConnectFuture getSuccessConnectFuture(final ClientSession sshSession) { + final ConnectFuture connectFuture = mock(ConnectFuture.class); + doReturn(true).when(connectFuture).isConnected(); + + doReturn(sshSession).when(connectFuture).getSession(); + return connectFuture; + } + + private ClientSession getMockedSshSession(final ChannelSubsystem subsystemChannel) throws IOException { + final ClientSession sshSession = mock(ClientSession.class); + + doReturn("sshSession").when(sshSession).toString(); + doReturn("serverVersion").when(sshSession).getServerVersion(); + doReturn(false).when(sshSession).isClosed(); + doReturn(false).when(sshSession).isClosing(); + final CloseFuture closeFuture = mock(CloseFuture.class); + Futures.addCallback(stubAddListener(closeFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + doReturn(true).when(closeFuture).isClosed(); + result.operationComplete(closeFuture); + } + }); + doReturn(closeFuture).when(sshSession).close(false); + + doReturn(subsystemChannel).when(sshSession).createSubsystemChannel(anyString()); + + return sshSession; + } + + private ChannelSubsystem getMockedSubsystemChannel(final IoInputStream asyncOut, final IoOutputStream asyncIn) throws IOException { + final ChannelSubsystem subsystemChannel = mock(ChannelSubsystem.class); + doNothing().when(subsystemChannel).setStreaming(any(ClientChannel.Streaming.class)); + final OpenFuture openFuture = mock(OpenFuture.class); + + Futures.addCallback(stubAddListener(openFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + sshChannelOpenListener = result; + } + }); + + doReturn(asyncOut).when(subsystemChannel).getAsyncOut(); + + doReturn(openFuture).when(subsystemChannel).open(); + doReturn(asyncIn).when(subsystemChannel).getAsyncIn(); + return subsystemChannel; + } + + private IoOutputStream getMockedIoOutputStream() { + final IoOutputStream mock = mock(IoOutputStream.class); + final IoWriteFuture ioWriteFuture = mock(IoWriteFuture.class); + doReturn(ioWriteFuture).when(ioWriteFuture).addListener(Matchers.>any()); + doReturn(true).when(ioWriteFuture).isWritten(); + + Futures.addCallback(stubAddListener(ioWriteFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + result.operationComplete(ioWriteFuture); + } + }); + + doReturn(ioWriteFuture).when(mock).write(any(Buffer.class)); + doReturn(false).when(mock).isClosed(); + doReturn(false).when(mock).isClosing(); + return mock; + } + + private IoInputStream getMockedIoInputStream() { + final IoInputStream mock = mock(IoInputStream.class); + final IoReadFuture ioReadFuture = mock(IoReadFuture.class); + doReturn(null).when(ioReadFuture).getException(); + doReturn(ioReadFuture).when(ioReadFuture).removeListener(Matchers.>any()); + doReturn(5).when(ioReadFuture).getRead(); + doReturn(new Buffer(new byte[]{0, 1, 2, 3, 4})).when(ioReadFuture).getBuffer(); + doReturn(ioReadFuture).when(ioReadFuture).addListener(Matchers.>any()); + + // Always success for read + Futures.addCallback(stubAddListener(ioReadFuture), new SuccessFutureListener() { + @Override + public void onSuccess(final SshFutureListener result) { + result.operationComplete(ioReadFuture); + } + }); + + doReturn(ioReadFuture).when(mock).read(any(Buffer.class)); + doReturn(false).when(mock).isClosed(); + doReturn(false).when(mock).isClosing(); + return mock; + } + + @Test + public void testConnectFailOpenChannel() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final IoInputStream asyncOut = getMockedIoInputStream(); + final IoOutputStream asyncIn = getMockedIoOutputStream(); + final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn); + final ClientSession sshSession = getMockedSshSession(subsystemChannel); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + + sshAuthListener.operationComplete(getSuccessAuthFuture()); + + verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async); + + try { + sshChannelOpenListener.operationComplete(getFailedOpenFuture()); + fail("Exception expected"); + } catch (final Exception e) { + verify(promise).setFailure(any(Throwable.class)); + verifyNoMoreInteractions(promise); + // TODO should ctx.channelInactive be called if we throw exception ? + } + } + + @Test + public void testConnectFailAuth() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final ClientSession sshSession = mock(ClientSession.class); + doReturn(true).when(sshSession).isClosed(); + final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession); + + sshConnectListener.operationComplete(connectFuture); + + final AuthFuture authFuture = getFailedAuthFuture(); + + try { + sshAuthListener.operationComplete(authFuture); + fail("Exception expected"); + } catch (final Exception e) { + verify(promise).setFailure(any(Throwable.class)); + verifyNoMoreInteractions(promise); + // TODO should ctx.channelInactive be called ? + } + } + + private AuthFuture getFailedAuthFuture() { + final AuthFuture authFuture = mock(AuthFuture.class); + doReturn(false).when(authFuture).isSuccess(); + doReturn(new IllegalStateException()).when(authFuture).getException(); + return authFuture; + } + + private OpenFuture getFailedOpenFuture() { + final OpenFuture authFuture = mock(OpenFuture.class); + doReturn(false).when(authFuture).isOpened(); + doReturn(new IllegalStateException()).when(authFuture).getException(); + return authFuture; + } + + @Test + public void testConnectFail() throws Exception { + asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise); + + final ConnectFuture connectFuture = getFailedConnectFuture(); + try { + sshConnectListener.operationComplete(connectFuture); + fail("Exception expected"); + } catch (final Exception e) { + verify(promise).setFailure(any(Throwable.class)); + verifyNoMoreInteractions(promise); + // TODO should ctx.channelInactive be called ? + } + } + + private ConnectFuture getFailedConnectFuture() { + final ConnectFuture connectFuture = mock(ConnectFuture.class); + doReturn(false).when(connectFuture).isConnected(); + doReturn(new IllegalStateException()).when(connectFuture).getException(); + return connectFuture; + } + + private ChannelPromise getMockedPromise() { + final ChannelPromise promise = mock(ChannelPromise.class); + doReturn(promise).when(promise).setSuccess(); + doReturn(promise).when(promise).setFailure(any(Throwable.class)); + return promise; + } + + private static abstract class SuccessFutureListener> implements FutureCallback> { + + @Override + public abstract void onSuccess(final SshFutureListener result); + + @Override + public void onFailure(final Throwable t) { + throw new RuntimeException(t); + } + } +} diff --git a/opendaylight/netconf/netconf-ssh/src/test/java/org/opendaylight/controller/netconf/netty/SSHTest.java b/opendaylight/netconf/netconf-ssh/src/test/java/org/opendaylight/controller/netconf/netty/SSHTest.java index ce1400bbcb..eb2b644cbc 100644 --- a/opendaylight/netconf/netconf-ssh/src/test/java/org/opendaylight/controller/netconf/netty/SSHTest.java +++ b/opendaylight/netconf/netconf-ssh/src/test/java/org/opendaylight/controller/netconf/netty/SSHTest.java @@ -67,7 +67,9 @@ public class SSHTest { netconfSSHServer.setAuthProvider(authProvider); InetSocketAddress address = netconfSSHServer.getLocalSocketAddress(); - final EchoClientHandler echoClientHandler = connectClient(address); + + final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort())); + Stopwatch stopwatch = new Stopwatch().start(); while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) { Thread.sleep(100); diff --git a/opendaylight/netconf/netconf-testtool/src/main/java/org/opendaylight/controller/netconf/test/tool/NetconfDeviceSimulator.java b/opendaylight/netconf/netconf-testtool/src/main/java/org/opendaylight/controller/netconf/test/tool/NetconfDeviceSimulator.java index 2cd5b19bd1..600baa7431 100644 --- a/opendaylight/netconf/netconf-testtool/src/main/java/org/opendaylight/controller/netconf/test/tool/NetconfDeviceSimulator.java +++ b/opendaylight/netconf/netconf-testtool/src/main/java/org/opendaylight/controller/netconf/test/tool/NetconfDeviceSimulator.java @@ -93,7 +93,7 @@ public class NetconfDeviceSimulator implements Closeable { this.hashedWheelTimer = hashedWheelTimer; } - private NetconfServerDispatcher createDispatcher(final Map moduleBuilders, final boolean exi) { + private NetconfServerDispatcher createDispatcher(final Map moduleBuilders, final boolean exi, final int generateConfigsTimeout) { final Set capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function() { @Override @@ -115,7 +115,7 @@ public class NetconfDeviceSimulator implements Closeable { : Sets.newHashSet(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0, XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1); final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory( - hashedWheelTimer, simulatedOperationProvider, idProvider, CONNECTION_TIMEOUT_MILLIS, commitNotifier, new LoggingMonitoringService(), serverCapabilities); + hashedWheelTimer, simulatedOperationProvider, idProvider, generateConfigsTimeout, commitNotifier, new LoggingMonitoringService(), serverCapabilities); final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer( serverNegotiatorFactory); @@ -153,7 +153,7 @@ public class NetconfDeviceSimulator implements Closeable { public List start(final Main.Params params) { final Map moduleBuilders = parseSchemasToModuleBuilders(params); - final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi); + final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout); int currentPort = params.startingPort; diff --git a/opendaylight/netconf/netconf-util/pom.xml b/opendaylight/netconf/netconf-util/pom.xml index df4d389705..bed58beb0f 100644 --- a/opendaylight/netconf/netconf-util/pom.xml +++ b/opendaylight/netconf/netconf-util/pom.xml @@ -46,6 +46,10 @@ xmlunit test + + org.opendaylight.yangtools + mockito-configuration + diff --git a/opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessage.java b/opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessage.java index 33934d10ba..15223cb60b 100644 --- a/opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessage.java +++ b/opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessage.java @@ -64,10 +64,12 @@ public final class NetconfHelloMessage extends NetconfMessage { Document doc = XmlUtil.newDocument(); Element helloElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, HELLO_TAG); - Element capabilitiesElement = doc.createElement(XmlNetconfConstants.CAPABILITIES); + Element capabilitiesElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, + XmlNetconfConstants.CAPABILITIES); for (String capability : Sets.newHashSet(capabilities)) { - Element capElement = doc.createElement(XmlNetconfConstants.CAPABILITY); + Element capElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, + XmlNetconfConstants.CAPABILITY); capElement.setTextContent(capability); capabilitiesElement.appendChild(capElement); } @@ -80,7 +82,8 @@ public final class NetconfHelloMessage extends NetconfMessage { public static NetconfHelloMessage createServerHello(Set capabilities, long sessionId) throws NetconfDocumentedException { Document doc = createHelloMessageDoc(capabilities); - Element sessionIdElement = doc.createElement(XmlNetconfConstants.SESSION_ID); + Element sessionIdElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, + XmlNetconfConstants.SESSION_ID); sessionIdElement.setTextContent(Long.toString(sessionId)); doc.getDocumentElement().appendChild(sessionIdElement); return new NetconfHelloMessage(doc); diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/CloseableUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/CloseableUtilTest.java new file mode 100644 index 0000000000..8d41ad7607 --- /dev/null +++ b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/CloseableUtilTest.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; + +import com.google.common.collect.Lists; +import org.junit.Test; + +public class CloseableUtilTest { + + @Test + public void testCloseAllFail() throws Exception { + final AutoCloseable failingCloseable = new AutoCloseable() { + @Override + public void close() throws Exception { + throw new RuntimeException("testing failing close"); + } + }; + + try { + CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable)); + fail("Exception with suppressed should be thrown"); + } catch (final RuntimeException e) { + assertEquals(1, e.getSuppressed().length); + } + } + + @Test + public void testCloseAll() throws Exception { + final AutoCloseable failingCloseable = mock(AutoCloseable.class); + doNothing().when(failingCloseable).close(); + CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable)); + } +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/HardcodedNamespaceResolverTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/HardcodedNamespaceResolverTest.java new file mode 100644 index 0000000000..f083cc1dbd --- /dev/null +++ b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/HardcodedNamespaceResolverTest.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.util.xml; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +import org.junit.Test; + +public class HardcodedNamespaceResolverTest { + + @Test + public void testResolver() throws Exception { + final HardcodedNamespaceResolver hardcodedNamespaceResolver = new HardcodedNamespaceResolver("prefix", "namespace"); + + assertEquals("namespace", hardcodedNamespaceResolver.getNamespaceURI("prefix")); + try{ + hardcodedNamespaceResolver.getNamespaceURI("unknown"); + fail("Unknown namespace lookup should fail"); + } catch(IllegalStateException e) {} + + assertNull(hardcodedNamespaceResolver.getPrefix("any")); + assertNull(hardcodedNamespaceResolver.getPrefixes("any")); + } +} \ No newline at end of file diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlElementTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlElementTest.java new file mode 100644 index 0000000000..a88de956e2 --- /dev/null +++ b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlElementTest.java @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.util.xml; + +import static org.hamcrest.CoreMatchers.both; +import static org.hamcrest.CoreMatchers.containsString; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.opendaylight.controller.netconf.api.NetconfDocumentedException; +import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; + +import com.google.common.base.Optional; + +public class XmlElementTest { + + private final String elementAsString = "" + + "" + + "deepValue" + + "" + + "innerNamespaceValue" + + "b:valueWithPrefix" + + ""; + private Document document; + private Element element; + private XmlElement xmlElement; + + @Before + public void setUp() throws Exception { + document = XmlUtil.readXmlToDocument(elementAsString); + element = document.getDocumentElement(); + xmlElement = XmlElement.fromDomElement(element); + } + + @Test + public void testConstruct() throws Exception { + final XmlElement fromString = XmlElement.fromString(elementAsString); + assertEquals(fromString, xmlElement); + XmlElement.fromDomDocument(document); + XmlElement.fromDomElement(element); + XmlElement.fromDomElementWithExpected(element, "top"); + XmlElement.fromDomElementWithExpected(element, "top", "namespace"); + + try { + XmlElement.fromString("notXml"); + fail(); + } catch (final NetconfDocumentedException e) {} + + try { + XmlElement.fromDomElementWithExpected(element, "notTop"); + fail(); + } catch (final NetconfDocumentedException e) {} + + try { + XmlElement.fromDomElementWithExpected(element, "top", "notNamespace"); + fail(); + } catch (final NetconfDocumentedException e) {} + } + + @Test + public void testGetters() throws Exception { + assertEquals(element, xmlElement.getDomElement()); + assertEquals(element.getElementsByTagName("inner").getLength(), xmlElement.getElementsByTagName("inner").getLength()); + + assertEquals("top", xmlElement.getName()); + assertTrue(xmlElement.hasNamespace()); + assertEquals("namespace", xmlElement.getNamespace()); + assertEquals("namespace", xmlElement.getNamespaceAttribute()); + assertEquals(Optional.of("namespace"), xmlElement.getNamespaceOptionally()); + + assertEquals("value1", xmlElement.getAttribute("attr1", "attrNamespace")); + assertEquals("value2", xmlElement.getAttribute("attr2")); + assertEquals(2 + 2/*Namespace definition*/, xmlElement.getAttributes().size()); + + assertEquals(3, xmlElement.getChildElements().size()); + assertEquals(1, xmlElement.getChildElements("inner").size()); + assertTrue(xmlElement.getOnlyChildElementOptionally("inner").isPresent()); + assertTrue(xmlElement.getOnlyChildElementWithSameNamespaceOptionally("inner").isPresent()); + assertEquals(0, xmlElement.getChildElements("unknown").size()); + assertFalse(xmlElement.getOnlyChildElementOptionally("unknown").isPresent()); + assertEquals(1, xmlElement.getChildElementsWithSameNamespace("inner").size()); + assertEquals(0, xmlElement.getChildElementsWithSameNamespace("innerNamespace").size()); + assertEquals(1, xmlElement.getChildElementsWithinNamespace("innerNamespace", "innerNamespace").size()); + assertTrue(xmlElement.getOnlyChildElementOptionally("innerNamespace", "innerNamespace").isPresent()); + assertFalse(xmlElement.getOnlyChildElementOptionally("innerNamespace", "unknownNamespace").isPresent()); + + final XmlElement noNamespaceElement = XmlElement.fromString(""); + assertFalse(noNamespaceElement.hasNamespace()); + try { + noNamespaceElement.getNamespace(); + fail(); + } catch (final MissingNameSpaceException e) {} + + final XmlElement inner = xmlElement.getOnlyChildElement("inner"); + final XmlElement deepInner = inner.getOnlyChildElementWithSameNamespaceOptionally().get(); + assertEquals(deepInner, inner.getOnlyChildElementWithSameNamespace()); + assertEquals(Optional.absent(), xmlElement.getOnlyChildElementOptionally("unknown")); + assertEquals("deepValue", deepInner.getTextContent()); + assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get()); + assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get()); + } + + @Test + public void testExtractNamespaces() throws Exception { + final XmlElement innerPrefixed = xmlElement.getOnlyChildElement("innerPrefixed"); + Map.Entry namespaceOfTextContent = innerPrefixed.findNamespaceOfTextContent(); + + assertNotNull(namespaceOfTextContent); + assertEquals("b", namespaceOfTextContent.getKey()); + assertEquals("prefixedValueNamespace", namespaceOfTextContent.getValue()); + final XmlElement innerNamespace = xmlElement.getOnlyChildElement("innerNamespace"); + namespaceOfTextContent = innerNamespace.findNamespaceOfTextContent(); + + assertEquals("", namespaceOfTextContent.getKey()); + assertEquals("innerNamespace", namespaceOfTextContent.getValue()); + } + + @Test + public void testUnrecognisedElements() throws Exception { + xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"), xmlElement.getOnlyChildElement("innerPrefixed"), xmlElement.getOnlyChildElement("innerNamespace")); + + try { + xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner")); + fail(); + } catch (final NetconfDocumentedException e) { + assertThat(e.getMessage(), both(containsString("innerNamespace")).and(containsString("innerNamespace"))); + } + } +} diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlUtilTest.java new file mode 100644 index 0000000000..3796dd996a --- /dev/null +++ b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlUtilTest.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License v1.0 which accompanies this distribution, + * and is available at http://www.eclipse.org/legal/epl-v10.html + */ + +package org.opendaylight.controller.netconf.util.xml; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.common.base.Optional; +import javax.xml.xpath.XPathConstants; +import javax.xml.xpath.XPathExpression; +import org.custommonkey.xmlunit.Diff; +import org.custommonkey.xmlunit.XMLUnit; +import org.junit.Test; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.xml.sax.SAXParseException; + +public class XmlUtilTest { + + private final String xml = "\n" + + "value\n" + + "prefix:value\n" + + "prefix:value\n" + + ""; + + @Test + public void testCreateElement() throws Exception { + final Document document = XmlUtil.newDocument(); + final Element top = XmlUtil.createElement(document, "top", Optional.of("namespace")); + + top.appendChild(XmlUtil.createTextElement(document, "innerText", "value", Optional.of("namespace"))); + top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("namespace"))); + top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("randomNamespace"))); + + document.appendChild(top); + assertEquals("top", XmlUtil.createDocumentCopy(document).getDocumentElement().getTagName()); + + XMLUnit.setIgnoreAttributeOrder(true); + XMLUnit.setIgnoreWhitespace(true); + + final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(xml), document); + assertTrue(diff.toString(), diff.similar()); + } + + @Test + public void testLoadSchema() throws Exception { + XmlUtil.loadSchema(); + try { + XmlUtil.loadSchema(getClass().getResourceAsStream("/netconfMessages/commit.xml")); + fail("Input stream does not contain xsd"); + } catch (final IllegalStateException e) { + assertTrue(e.getCause() instanceof SAXParseException); + } + + } + + @Test + public void testXPath() throws Exception { + final XPathExpression correctXPath = XMLNetconfUtil.compileXPath("/top/innerText"); + try { + XMLNetconfUtil.compileXPath("!@(*&$!"); + fail("Incorrect xpath should fail"); + } catch (IllegalStateException e) {} + final Object value = XmlUtil.evaluateXPath(correctXPath, XmlUtil.readXmlToDocument("value"), XPathConstants.NODE); + assertEquals("value", ((Element) value).getTextContent()); + } +} \ No newline at end of file diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java index 12c80fe70c..a2df680b07 100644 --- a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java +++ b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java @@ -14,8 +14,8 @@ import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +//import javax.xml.bind.annotation.XmlElementWrapper; import java.io.Serializable; -import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -34,7 +34,7 @@ import java.util.List; * healthmonitor_id String * admin_state_up Bool * status String - * members List + * members List * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf */ @@ -71,13 +71,10 @@ public class NeutronLoadBalancerPool extends ConfigurationObject implements Seri @XmlElement (name="status") String loadBalancerPoolStatus; - @XmlElement (name="members") - List loadBalancerPoolMembers; - - HashMap member; + @XmlElement(name="members") + List loadBalancerPoolMembers; public NeutronLoadBalancerPool() { - member = new HashMap(); } public String getLoadBalancerPoolID() { @@ -152,14 +149,27 @@ public class NeutronLoadBalancerPool extends ConfigurationObject implements Seri this.loadBalancerPoolStatus = loadBalancerPoolStatus; } - public List getLoadBalancerPoolMembers() { + public List getLoadBalancerPoolMembers() { + /* + * Update the pool_id of the member to that this.loadBalancerPoolID + */ + for (NeutronLoadBalancerPoolMember member: loadBalancerPoolMembers) + member.setPoolID(loadBalancerPoolID); return loadBalancerPoolMembers; } - public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) { + public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) { this.loadBalancerPoolMembers = loadBalancerPoolMembers; } + public void addLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) { + this.loadBalancerPoolMembers.add(loadBalancerPoolMember); + } + + public void removeLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) { + this.loadBalancerPoolMembers.remove(loadBalancerPoolMember); + } + public NeutronLoadBalancerPool extractFields(List fields) { NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool(); Iterator i = fields.iterator(); @@ -198,4 +208,4 @@ public class NeutronLoadBalancerPool extends ConfigurationObject implements Seri } return ans; } -} \ No newline at end of file +} diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java index 577c3bb528..683d45fcf2 100644 --- a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java +++ b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java @@ -10,11 +10,18 @@ package org.opendaylight.controller.networkconfig.neutron; import org.opendaylight.controller.configuration.ConfigurationObject; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; import java.io.Serializable; import java.util.Iterator; import java.util.List; +@XmlRootElement +@XmlAccessorType(XmlAccessType.NONE) + public class NeutronLoadBalancerPoolMember extends ConfigurationObject implements Serializable { private static final long serialVersionUID = 1L; @@ -46,9 +53,20 @@ public class NeutronLoadBalancerPoolMember extends ConfigurationObject implemen @XmlElement (name="status") String poolMemberStatus; + String poolID; + public NeutronLoadBalancerPoolMember() { } + @XmlTransient + public String getPoolID() { + return poolID; + } + + public void setPoolID(String poolID) { + this.poolID = poolID; + } + public String getPoolMemberID() { return poolMemberID; } @@ -121,6 +139,9 @@ public class NeutronLoadBalancerPoolMember extends ConfigurationObject implemen if (s.equals("id")) { ans.setPoolMemberID(this.getPoolMemberID()); } + if (s.equals("pool_id")) { + ans.setPoolID(this.getPoolID()); + } if (s.equals("tenant_id")) { ans.setPoolMemberTenantID(this.getPoolMemberTenantID()); } @@ -148,6 +169,7 @@ public class NeutronLoadBalancerPoolMember extends ConfigurationObject implemen @Override public String toString() { return "NeutronLoadBalancerPoolMember{" + "poolMemberID='" + poolMemberID + '\'' + + ", poolID='" + poolID + '\'' + ", poolMemberTenantID='" + poolMemberTenantID + '\'' + ", poolMemberAddress='" + poolMemberAddress + '\'' + ", poolMemberProtoPort=" + poolMemberProtoPort + diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java index 748dffc8cf..863b3cbdc7 100644 --- a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java +++ b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java @@ -38,8 +38,8 @@ import java.util.Iterator; import java.util.List; /** - * Neutron Northbound REST APIs for LoadBalancer Policies.
    - * This class provides REST APIs for managing neutron LoadBalancer Policies + * Neutron Northbound REST APIs for LoadBalancers.
    + * This class provides REST APIs for managing neutron LoadBalancers * *
    *
    @@ -87,15 +87,13 @@ public class NeutronLoadBalancerNorthbound { @QueryParam("page_reverse") String pageReverse // sorting not supported ) { - INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( - this); - // INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this); + INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(this); - if (loadBalancerPoolInterface == null) { + if (loadBalancerInterface == null) { throw new ServiceUnavailableException("LoadBalancer CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } - List allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers(); + List allLoadBalancers = loadBalancerInterface.getAllNeutronLoadBalancers(); // List allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules(); List ans = new ArrayList(); // List rules = new ArrayList(); @@ -128,7 +126,7 @@ public class NeutronLoadBalancerNorthbound { /** * Returns a specific LoadBalancer */ - @Path("{loadBalancerPoolID}") + @Path("{loadBalancerID}") @GET @Produces({ MediaType.APPLICATION_JSON }) @@ -137,25 +135,25 @@ public class NeutronLoadBalancerNorthbound { @ResponseCode(code = 401, condition = "Unauthorized"), @ResponseCode(code = 404, condition = "Not Found"), @ResponseCode(code = 501, condition = "Not Implemented") }) - public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID, + public Response showLoadBalancer(@PathParam("loadBalancerID") String loadBalancerID, // return fields @QueryParam("fields") List fields) { - INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( + INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( this); - if (loadBalancerPoolInterface == null) { + if (loadBalancerInterface == null) { throw new ServiceUnavailableException("LoadBalancer CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } - if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) { + if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) { throw new ResourceNotFoundException("LoadBalancer UUID does not exist."); } if (fields.size() > 0) { - NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID); + NeutronLoadBalancer ans = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID); return Response.status(200).entity( new NeutronLoadBalancerRequest(extractFields(ans, fields))).build(); } else { - return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer( - loadBalancerPoolID))).build(); + return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer( + loadBalancerID))).build(); } } @@ -175,9 +173,9 @@ public class NeutronLoadBalancerNorthbound { @ResponseCode(code = 409, condition = "Conflict"), @ResponseCode(code = 501, condition = "Not Implemented") }) public Response createLoadBalancers(final NeutronLoadBalancerRequest input) { - INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( + INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( this); - if (loadBalancerPoolInterface == null) { + if (loadBalancerInterface == null) { throw new ServiceUnavailableException("LoadBalancer CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } @@ -187,11 +185,9 @@ public class NeutronLoadBalancerNorthbound { /* * Verify that the LoadBalancer doesn't already exist. */ - if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) { + if (loadBalancerInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) { throw new BadRequestException("LoadBalancer UUID already exists"); } - loadBalancerPoolInterface.addNeutronLoadBalancer(singleton); - Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null); if (instances != null) { for (Object instance : instances) { @@ -202,7 +198,7 @@ public class NeutronLoadBalancerNorthbound { } } } - loadBalancerPoolInterface.addNeutronLoadBalancer(singleton); + loadBalancerInterface.addNeutronLoadBalancer(singleton); if (instances != null) { for (Object instance : instances) { INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance; @@ -218,10 +214,10 @@ public class NeutronLoadBalancerNorthbound { NeutronLoadBalancer test = i.next(); /* - * Verify that the firewall policy doesn't already exist + * Verify that the loadbalancer doesn't already exist */ - if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) { + if (loadBalancerInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) { throw new BadRequestException("Load Balancer Pool UUID already is already created"); } if (testMap.containsKey(test.getLoadBalancerID())) { @@ -243,7 +239,7 @@ public class NeutronLoadBalancerNorthbound { i = bulk.iterator(); while (i.hasNext()) { NeutronLoadBalancer test = i.next(); - loadBalancerPoolInterface.addNeutronLoadBalancer(test); + loadBalancerInterface.addNeutronLoadBalancer(test); if (instances != null) { for (Object instance : instances) { INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance; @@ -258,7 +254,7 @@ public class NeutronLoadBalancerNorthbound { /** * Updates a LoadBalancer Policy */ - @Path("{loadBalancerPoolID}") + @Path("{loadBalancerID}") @PUT @Produces({ MediaType.APPLICATION_JSON }) @Consumes({ MediaType.APPLICATION_JSON }) @@ -271,10 +267,10 @@ public class NeutronLoadBalancerNorthbound { @ResponseCode(code = 404, condition = "Not Found"), @ResponseCode(code = 501, condition = "Not Implemented") }) public Response updateLoadBalancer( - @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) { - INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( + @PathParam("loadBalancerID") String loadBalancerID, final NeutronLoadBalancerRequest input) { + INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( this); - if (loadBalancerPoolInterface == null) { + if (loadBalancerInterface == null) { throw new ServiceUnavailableException("LoadBalancer CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } @@ -282,14 +278,14 @@ public class NeutronLoadBalancerNorthbound { /* * verify the LoadBalancer exists and there is only one delta provided */ - if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) { + if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) { throw new ResourceNotFoundException("LoadBalancer UUID does not exist."); } if (!input.isSingleton()) { throw new BadRequestException("Only singleton edit supported"); } NeutronLoadBalancer delta = input.getSingleton(); - NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID); + NeutronLoadBalancer original = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID); /* * updates restricted by Neutron @@ -318,23 +314,23 @@ public class NeutronLoadBalancerNorthbound { /* * update the object and return it */ - loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta); - NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer( - loadBalancerPoolID); + loadBalancerInterface.updateNeutronLoadBalancer(loadBalancerID, delta); + NeutronLoadBalancer updatedLoadBalancer = loadBalancerInterface.getNeutronLoadBalancer( + loadBalancerID); if (instances != null) { for (Object instance : instances) { INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance; service.neutronLoadBalancerUpdated(updatedLoadBalancer); } } - return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer( - loadBalancerPoolID))).build(); + return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer( + loadBalancerID))).build(); } /** * Deletes a LoadBalancer */ - @Path("{loadBalancerPoolID}") + @Path("{loadBalancerID}") @DELETE @StatusCodes({ @ResponseCode(code = 204, condition = "No Content"), @@ -343,10 +339,10 @@ public class NeutronLoadBalancerNorthbound { @ResponseCode(code = 409, condition = "Conflict"), @ResponseCode(code = 501, condition = "Not Implemented") }) public Response deleteLoadBalancer( - @PathParam("loadBalancerPoolID") String loadBalancerPoolID) { - INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( + @PathParam("loadBalancerID") String loadBalancerID) { + INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD( this); - if (loadBalancerPoolInterface == null) { + if (loadBalancerInterface == null) { throw new ServiceUnavailableException("LoadBalancer CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } @@ -354,13 +350,13 @@ public class NeutronLoadBalancerNorthbound { /* * verify the LoadBalancer exists and it isn't currently in use */ - if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) { + if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) { throw new ResourceNotFoundException("LoadBalancer UUID does not exist."); } - if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) { + if (loadBalancerInterface.neutronLoadBalancerInUse(loadBalancerID)) { return Response.status(409).build(); } - NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID); + NeutronLoadBalancer singleton = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID); Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null); if (instances != null) { for (Object instance : instances) { @@ -372,7 +368,7 @@ public class NeutronLoadBalancerNorthbound { } } - loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID); + loadBalancerInterface.removeNeutronLoadBalancer(loadBalancerID); if (instances != null) { for (Object instance : instances) { INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance; diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMemberRequest.java similarity index 82% rename from opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java rename to opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMemberRequest.java index 9d6616373c..9b949da72e 100644 --- a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java +++ b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMemberRequest.java @@ -12,7 +12,7 @@ import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool import javax.xml.bind.annotation.XmlElement; import java.util.List; -public class INeutronLoadBalancerPoolMemberRequest { +public class NeutronLoadBalancerPoolMemberRequest { /** * See OpenStack Network API v2.0 Reference for description of @@ -25,15 +25,15 @@ public class INeutronLoadBalancerPoolMemberRequest { @XmlElement(name="members") List bulkRequest; - INeutronLoadBalancerPoolMemberRequest() { + NeutronLoadBalancerPoolMemberRequest() { } - INeutronLoadBalancerPoolMemberRequest(List bulk) { + NeutronLoadBalancerPoolMemberRequest(List bulk) { bulkRequest = bulk; singletonLoadBalancerPoolMember = null; } - INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) { + NeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) { singletonLoadBalancerPoolMember = group; } diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java index ff56fa0a9d..f8f3cd8c53 100644 --- a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java +++ b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java @@ -1,46 +1,51 @@ /* - * Copyright (C) 2014 Red Hat, Inc. + * Copyright (C) 2014 SDN Hub, LLC. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html + * + * Authors : Srini Seetharaman */ package org.opendaylight.controller.networkconfig.neutron.northbound; import org.codehaus.enunciate.jaxrs.ResponseCode; import org.codehaus.enunciate.jaxrs.StatusCodes; +import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD; import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware; -import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD; import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces; +import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool; import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember; import org.opendaylight.controller.northbound.commons.RestMessages; import org.opendaylight.controller.northbound.commons.exception.BadRequestException; +import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException; import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException; import org.opendaylight.controller.sal.utils.ServiceHelper; import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; import javax.ws.rs.GET; import javax.ws.rs.PUT; import javax.ws.rs.Path; +import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; - -@Path("/pools/{loadBalancerPoolID}/members") +@Path("/pools/{loadBalancerPoolUUID}/members") public class NeutronLoadBalancerPoolMembersNorthbound { - private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List fields) { return o.extractFields(fields); } /** - * Returns a list of all LoadBalancerPool + * Returns a list of all LoadBalancerPoolMembers in specified pool */ @GET @Produces({MediaType.APPLICATION_JSON}) @@ -50,8 +55,12 @@ public class NeutronLoadBalancerPoolMembersNorthbound { @ResponseCode(code = 501, condition = "Not Implemented")}) public Response listMembers( + //Path param + @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID, + // return fields @QueryParam("fields") List fields, + // OpenStack LoadBalancerPool attributes @QueryParam("id") String queryLoadBalancerPoolMemberID, @QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID, @@ -68,20 +77,24 @@ public Response listMembers( @QueryParam("page_reverse") String pageReverse // sorting not supported ) { - INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces - .getINeutronLoadBalancerPoolMemberCRUD(this); - if (loadBalancerPoolMemberInterface == null) { + INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces + .getINeutronLoadBalancerPoolCRUD(this); + if (loadBalancerPoolInterface == null) { throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } - List allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface - .getAllNeutronLoadBalancerPoolMembers(); + if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) { + throw new ResourceNotFoundException("loadBalancerPool UUID does not exist."); + } + List members = + loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers(); List ans = new ArrayList(); - Iterator i = allLoadBalancerPoolMembers.iterator(); + Iterator i = members.iterator(); while (i.hasNext()) { NeutronLoadBalancerPoolMember nsg = i.next(); if ((queryLoadBalancerPoolMemberID == null || queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) && + loadBalancerPoolUUID.equals(nsg.getPoolID()) && (queryLoadBalancerPoolMemberTenantID == null || queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) && (queryLoadBalancerPoolMemberAddress == null || @@ -102,13 +115,57 @@ public Response listMembers( } } return Response.status(200).entity( - new INeutronLoadBalancerPoolMemberRequest(ans)).build(); + new NeutronLoadBalancerPoolMemberRequest(ans)).build(); +} + +/** + * Returns a specific LoadBalancerPoolMember + */ + +@Path("{loadBalancerPoolMemberUUID}") +@GET +@Produces({ MediaType.APPLICATION_JSON }) +//@TypeHint(OpenStackLoadBalancerPoolMembers.class) +@StatusCodes({ + @ResponseCode(code = 200, condition = "Operation successful"), + @ResponseCode(code = 401, condition = "Unauthorized"), + @ResponseCode(code = 404, condition = "Not Found"), + @ResponseCode(code = 501, condition = "Not Implemented") }) +public Response showLoadBalancerPoolMember( + @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID, + @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID, + // return fields + @QueryParam("fields") List fields ) { + + INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces + .getINeutronLoadBalancerPoolCRUD(this); + if (loadBalancerPoolInterface == null) { + throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface " + + RestMessages.SERVICEUNAVAILABLE.toString()); + } + if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) { + throw new ResourceNotFoundException("loadBalancerPool UUID does not exist."); + } + List members = + loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers(); + for (NeutronLoadBalancerPoolMember ans: members) { + if (!ans.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) + continue; + + if (fields.size() > 0) { + return Response.status(200).entity( + new NeutronLoadBalancerPoolMemberRequest(extractFields(ans, fields))).build(); + } else { + return Response.status(200).entity( + new NeutronLoadBalancerPoolMemberRequest(ans)).build(); + } + } + return Response.status(204).build(); } /** * Adds a Member to an LBaaS Pool member */ -@Path("/pools/{loadBalancerPoolID}/members") @PUT @Produces({MediaType.APPLICATION_JSON}) @Consumes({MediaType.APPLICATION_JSON}) @@ -117,25 +174,34 @@ public Response listMembers( @ResponseCode(code = 401, condition = "Unauthorized"), @ResponseCode(code = 404, condition = "Not Found"), @ResponseCode(code = 501, condition = "Not Implemented")}) -public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberRequest input) { +public Response createLoadBalancerPoolMember( + @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID, + final NeutronLoadBalancerPoolMemberRequest input) { - INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD( - this); - if (loadBalancerPoolMemberInterface == null) { - throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface " + INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this); + if (loadBalancerPoolInterface == null) { + throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface " + RestMessages.SERVICEUNAVAILABLE.toString()); } + // Verify that the loadBalancerPool exists, for the member to be added to its cache + if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) { + throw new ResourceNotFoundException("loadBalancerPool UUID does not exist."); + } + NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID); + if (input.isSingleton()) { NeutronLoadBalancerPoolMember singleton = input.getSingleton(); + singleton.setPoolID(loadBalancerPoolUUID); + String loadBalancerPoolMemberUUID = singleton.getPoolMemberID(); /* * Verify that the LoadBalancerPoolMember doesn't already exist. */ - if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists( - singleton.getPoolMemberID())) { - throw new BadRequestException("LoadBalancerPoolMember UUID already exists"); + List members = singletonPool.getLoadBalancerPoolMembers(); + for (NeutronLoadBalancerPoolMember member: members) { + if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) + throw new BadRequestException("LoadBalancerPoolMember UUID already exists"); } - loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton); Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null); if (instances != null) { @@ -147,13 +213,18 @@ public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberReq } } } - loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton); if (instances != null) { for (Object instance : instances) { INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance; service.neutronLoadBalancerPoolMemberCreated(singleton); } } + + /** + * Add the member from the neutron load balancer pool as well + */ + singletonPool.addLoadBalancerPoolMember(singleton); + } else { List bulk = input.getBulk(); Iterator i = bulk.iterator(); @@ -161,15 +232,17 @@ public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberReq Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null); while (i.hasNext()) { NeutronLoadBalancerPoolMember test = i.next(); + String loadBalancerPoolMemberUUID = test.getPoolMemberID(); /* - * Verify that the firewall doesn't already exist + * Verify that the LoadBalancerPoolMember doesn't already exist. */ - - if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists( - test.getPoolMemberID())) { - throw new BadRequestException("Load Balancer PoolMember UUID already is already created"); + List members = singletonPool.getLoadBalancerPoolMembers(); + for (NeutronLoadBalancerPoolMember member: members) { + if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) + throw new BadRequestException("LoadBalancerPoolMember UUID already exists"); } + if (testMap.containsKey(test.getPoolMemberID())) { throw new BadRequestException("Load Balancer PoolMember UUID already exists"); } @@ -189,15 +262,105 @@ public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberReq i = bulk.iterator(); while (i.hasNext()) { NeutronLoadBalancerPoolMember test = i.next(); - loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test); if (instances != null) { for (Object instance : instances) { INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance; service.neutronLoadBalancerPoolMemberCreated(test); } } + singletonPool.addLoadBalancerPoolMember(test); } } return Response.status(201).entity(input).build(); } + +/** + * Updates a LB member pool + */ + +@Path("{loadBalancerPoolMemberUUID}") +@PUT +@Produces({ MediaType.APPLICATION_JSON }) +@Consumes({ MediaType.APPLICATION_JSON }) +@StatusCodes({ + @ResponseCode(code = 200, condition = "Operation successful"), + @ResponseCode(code = 400, condition = "Bad Request"), + @ResponseCode(code = 401, condition = "Unauthorized"), + @ResponseCode(code = 403, condition = "Forbidden"), + @ResponseCode(code = 404, condition = "Not Found"), + @ResponseCode(code = 501, condition = "Not Implemented") }) +public Response updateLoadBalancerPoolMember( + @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID, + @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID, + final NeutronLoadBalancerPoolMemberRequest input) { + + //TODO: Implement update LB member pool + return Response.status(501).entity(input).build(); +} + +/** + * Deletes a LoadBalancerPoolMember + */ + +@Path("{loadBalancerPoolMemberUUID}") +@DELETE +@StatusCodes({ + @ResponseCode(code = 204, condition = "No Content"), + @ResponseCode(code = 401, condition = "Unauthorized"), + @ResponseCode(code = 403, condition = "Forbidden"), + @ResponseCode(code = 404, condition = "Not Found"), + @ResponseCode(code = 501, condition = "Not Implemented") }) +public Response deleteLoadBalancerPoolMember( + @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID, + @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID) { + INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this); + if (loadBalancerPoolInterface == null) { + throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface " + + RestMessages.SERVICEUNAVAILABLE.toString()); + } + + // Verify that the loadBalancerPool exists, for the member to be removed from its cache + if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) { + throw new ResourceNotFoundException("loadBalancerPool UUID does not exist."); + } + + //Verify that the LB pool member exists + NeutronLoadBalancerPoolMember singleton = null; + List members = + loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers(); + for (NeutronLoadBalancerPoolMember member: members) { + if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) { + singleton = member; + break; + } + } + if (singleton == null) + throw new BadRequestException("LoadBalancerPoolMember UUID does not exist."); + + Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null); + if (instances != null) { + for (Object instance : instances) { + INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance; + int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton); + if (status < 200 || status > 299) { + return Response.status(status).build(); + } + } + } + + if (instances != null) { + for (Object instance : instances) { + INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance; + service.neutronLoadBalancerPoolMemberDeleted(singleton); + } + } + + /** + * Remove the member from the neutron load balancer pool + */ + NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID); + singletonPool.removeLoadBalancerPoolMember(singleton); + + return Response.status(204).build(); +} } diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java index fc5357ccb5..7802dbb906 100644 --- a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java +++ b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java @@ -1,9 +1,11 @@ /* - * Copyright (C) 2014 Red Hat, Inc. + * Copyright (C) 2014 SDN Hub, LLC. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, * and is available at http://www.eclipse.org/legal/epl-v10.html + * + * Authors : Srini Seetharaman */ package org.opendaylight.controller.networkconfig.neutron.northbound; @@ -13,8 +15,10 @@ import org.codehaus.enunciate.jaxrs.ResponseCode; import org.codehaus.enunciate.jaxrs.StatusCodes; import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware; import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD; +import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD; import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces; import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool; +import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember; import org.opendaylight.controller.northbound.commons.RestMessages; import org.opendaylight.controller.northbound.commons.exception.BadRequestException; import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException; @@ -22,6 +26,7 @@ import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailab import org.opendaylight.controller.sal.utils.ServiceHelper; import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.PUT; @@ -31,6 +36,7 @@ import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -53,6 +59,13 @@ import java.util.List; * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration * */ + +/** + * For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD, + * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache. + * TODO: Consolidate and maintain a single copy + */ + @Path("/pools") public class NeutronLoadBalancerPoolNorthbound { @@ -83,7 +96,7 @@ public class NeutronLoadBalancerPoolNorthbound { @QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID, @QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp, @QueryParam("status") String queryLoadBalancerPoolStatus, - @QueryParam("members") List queryLoadBalancerPoolMembers, + @QueryParam("members") List queryLoadBalancerPoolMembers, // pagination @QueryParam("limit") String limit, @QueryParam("marker") String marker, @@ -217,7 +230,7 @@ public class NeutronLoadBalancerPoolNorthbound { NeutronLoadBalancerPool test = i.next(); /* - * Verify that the firewall doesn't already exist + * Verify that the loadBalancerPool doesn't already exist */ if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) { @@ -328,4 +341,73 @@ public class NeutronLoadBalancerPoolNorthbound { } return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build(); } + + /** + * Deletes a LoadBalancerPool + */ + + @Path("{loadBalancerPoolUUID}") + @DELETE + @StatusCodes({ + @ResponseCode(code = 204, condition = "No Content"), + @ResponseCode(code = 401, condition = "Unauthorized"), + @ResponseCode(code = 404, condition = "Not Found"), + @ResponseCode(code = 409, condition = "Conflict"), + @ResponseCode(code = 501, condition = "Not Implemented") }) + public Response deleteLoadBalancerPool( + @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID) { + INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this); + if (loadBalancerPoolInterface == null) { + throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface " + + RestMessages.SERVICEUNAVAILABLE.toString()); + } + + /* + * verify the LoadBalancerPool exists and it isn't currently in use + */ + if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) { + throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist."); + } + if (loadBalancerPoolInterface.neutronLoadBalancerPoolInUse(loadBalancerPoolUUID)) { + return Response.status(409).build(); + } + NeutronLoadBalancerPool singleton = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID); + Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null); + if (instances != null) { + for (Object instance : instances) { + INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance; + int status = service.canDeleteNeutronLoadBalancerPool(singleton); + if (status < 200 || status > 299) { + return Response.status(status).build(); + } + } + } + + /* + * remove it and return 204 status + */ + loadBalancerPoolInterface.removeNeutronLoadBalancerPool(loadBalancerPoolUUID); + if (instances != null) { + for (Object instance : instances) { + INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance; + service.neutronLoadBalancerPoolDeleted(singleton); + } + } + + /* + * remove corresponding members from the member cache too + */ + INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this); + if (loadBalancerPoolMemberInterface != null) { + List allLoadBalancerPoolMembers = new + ArrayList(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers()); + Iterator i = allLoadBalancerPoolMembers.iterator(); + while (i.hasNext()) { + NeutronLoadBalancerPoolMember member = i.next(); + if (member.getPoolID() == loadBalancerPoolUUID) + loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID()); + } + } + return Response.status(204).build(); + } } diff --git a/opendaylight/topologymanager/implementation/src/main/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImpl.java b/opendaylight/topologymanager/implementation/src/main/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImpl.java index b0e87c48f3..659ee7dd81 100644 --- a/opendaylight/topologymanager/implementation/src/main/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImpl.java +++ b/opendaylight/topologymanager/implementation/src/main/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImpl.java @@ -8,25 +8,6 @@ package org.opendaylight.controller.topologymanager.internal; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.util.ArrayList; -import java.util.Dictionary; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.LinkedBlockingQueue; - import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.felix.dm.Component; import org.eclipse.osgi.framework.console.CommandInterpreter; @@ -64,6 +45,25 @@ import org.osgi.framework.FrameworkUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.ArrayList; +import java.util.Dictionary; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.LinkedBlockingQueue; + /** * The class describes TopologyManager which is the central repository of the * network topology. It provides service for applications to interact with @@ -654,12 +654,14 @@ public class TopologyManagerImpl implements // all except the creation time stamp because that should // be set only when the edge is created TimeStamp timeStamp = null; - for (Property prop : oldProps) { - if (prop instanceof TimeStamp) { - TimeStamp tsProp = (TimeStamp) prop; - if (tsProp.getTimeStampName().equals("creation")) { - timeStamp = tsProp; - break; + if (oldProps != null) { + for (Property prop : oldProps) { + if (prop instanceof TimeStamp) { + TimeStamp tsProp = (TimeStamp) prop; + if (tsProp.getTimeStampName().equals("creation")) { + timeStamp = tsProp; + break; + } } } } @@ -679,7 +681,9 @@ public class TopologyManagerImpl implements if (prop instanceof TimeStamp) { TimeStamp t = (TimeStamp) prop; if (t.getTimeStampName().equals("creation")) { - i.remove(); + if (timeStamp != null) { + i.remove(); + } break; } } diff --git a/opendaylight/topologymanager/implementation/src/test/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImplTest.java b/opendaylight/topologymanager/implementation/src/test/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImplTest.java index fa01fa6a60..d1338bf695 100644 --- a/opendaylight/topologymanager/implementation/src/test/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImplTest.java +++ b/opendaylight/topologymanager/implementation/src/test/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImplTest.java @@ -8,21 +8,11 @@ package org.opendaylight.controller.topologymanager.internal; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; - import org.junit.Assert; import org.junit.Test; import org.opendaylight.controller.sal.core.Bandwidth; import org.opendaylight.controller.sal.core.ConstructionException; +import org.opendaylight.controller.sal.core.Description; import org.opendaylight.controller.sal.core.Edge; import org.opendaylight.controller.sal.core.Host; import org.opendaylight.controller.sal.core.Latency; @@ -32,6 +22,7 @@ import org.opendaylight.controller.sal.core.NodeConnector; import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType; import org.opendaylight.controller.sal.core.Property; import org.opendaylight.controller.sal.core.State; +import org.opendaylight.controller.sal.core.TimeStamp; import org.opendaylight.controller.sal.core.UpdateType; import org.opendaylight.controller.sal.packet.address.EthernetAddress; import org.opendaylight.controller.sal.topology.TopoEdgeUpdate; @@ -47,6 +38,17 @@ import org.opendaylight.controller.switchmanager.Switch; import org.opendaylight.controller.switchmanager.SwitchConfig; import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; + public class TopologyManagerImplTest { /** * Mockup of switch manager that only maintains existence of node @@ -733,4 +735,35 @@ public class TopologyManagerImplTest { Assert.assertTrue(nodeNCmap.isEmpty()); } + + @Test + public void bug1348FixTest() throws ConstructionException { + TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl(); + TestSwitchManager swMgr = new TestSwitchManager(); + topoManagerImpl.setSwitchManager(swMgr); + topoManagerImpl.nonClusterObjectCreate(); + + NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector( + (short) 1, NodeCreator.createOFNode(1000L)); + NodeConnector tailnc1 = NodeConnectorCreator.createOFNodeConnector( + (short) 2, NodeCreator.createOFNode(2000L)); + Edge edge = new Edge(headnc1, tailnc1); + List updatedEdges = new ArrayList<>(); + Set edgeProps = new HashSet<>(); + edgeProps.add(new TimeStamp(System.currentTimeMillis(), "creation")); + edgeProps.add(new Latency(Latency.LATENCY100ns)); + edgeProps.add(new State(State.EDGE_UP)); + edgeProps.add(new Bandwidth(Bandwidth.BW100Gbps)); + edgeProps.add(new Description("Test edge")); + updatedEdges.add(new TopoEdgeUpdate(edge, edgeProps, UpdateType.CHANGED)); + + try { + topoManagerImpl.edgeUpdate(updatedEdges); + } catch (Exception e) { + Assert.fail("Exception was raised when trying to update edge properties: " + e.getMessage()); + } + + Assert.assertEquals(1, topoManagerImpl.getEdges().size()); + Assert.assertNotNull(topoManagerImpl.getEdges().get(edge)); + } }