<properties>
<features.file>features.xml</features.file>
+ <org.json.version>20131018</org.json.version>
</properties>
<dependencies>
<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-provider</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
<feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
<feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
<feature version='${project.version}'>odl-restconf</feature>
<feature version='${project.version}'>odl-mdsal-xsql</feature>
- <feature version='${project.version}'>odl-mdsal-clustering</feature>
<feature version='${project.version}'>odl-toaster</feature>
</feature>
<feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
<feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${config.version}'>odl-config-startup</feature>
<feature version='${config.version}'>odl-config-netty</feature>
<feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
<feature version='${mdsal.version}'>odl-mdsal-broker</feature>
<feature>war</feature>
+ <!-- presently we need sal-remote to be listed BEFORE sal-rest-connector because sal-rest-connector
+ has a yang file which augments a yang file in sal-remote, and order seems to matter -->
+ <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
<bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
<bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
</feature>
<feature name='odl-toaster' version='${project.version}' description="OpenDaylight :: Toaster">
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
</feature>
+
+ <feature name='odl-clustering-test-app' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+ <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
+ <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
+ </feature>
</features>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
<feature version='${project.version}'>odl-netconf-impl</feature>
- <feature version='${project.version}'>odl-netconf-tcp</feature>
- <feature version='${project.version}'>odl-netconf-ssh</feature>
<feature version='${project.version}'>odl-config-netconf-connector</feature>
<feature version='${project.version}'>odl-netconf-netty-util</feature>
<feature version='${project.version}'>odl-netconf-client</feature>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>net.sf.jung2</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.antlr</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.moxy</artifactId>
+ </dependency>
</dependencies>
<build>
<resources>
<bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+ <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
<bundle>mvn:org.opendaylight.controller/networkconfig.neutron.northbound/${networkconfig.neutron.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
<bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
<distributionManagement>
<repository>
<id>opendaylight-release</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+ <url>${nexusproxy}/repositories/opendaylight.release/</url>
</repository>
<snapshotRepository>
<id>opendaylight-snapshot</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
</snapshotRepository>
<site>
<id>website</id>
- <url>dav:http://nexus.opendaylight.org/content/sites/site/sal-parent</url>
+ <url>dav:${nexusproxy}/sites/site/sal-parent</url>
</site>
</distributionManagement>
</project>
<ignorePermissions>false</ignorePermissions>
</configuration>
</execution>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/assembly/system</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>true</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ <useRepositoryLayout>true</useRepositoryLayout>
+ <addParentPoms>true</addParentPoms>
+ <copyPom>true</copyPom>
+ </configuration>
+ </execution>
</executions>
</plugin>
<plugin>
<sonar.language>java</sonar.language>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
- <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
+ <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+ <sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
<yang-ext.version>2013.09.07.4-SNAPSHOT</yang-ext.version>
<yang-jmx-generator.version>1.0.0-SNAPSHOT</yang-jmx-generator.version>
<yangtools.version>0.6.2-SNAPSHOT</yangtools.version>
- <sshd-core.version>0.12.0</sshd-core.version>
+ <sshd-core.version>0.12.0</sshd-core.version>
+ <jmh.version>0.9.7</jmh.version>
</properties>
<dependencyManagement>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <!-- JMH Benchmark dependencies -->
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ <version>${jmh.version}</version>
+ </dependency>
</dependencies>
</dependencyManagement>
<repositories>
<!-- OpenDayLight Repo Mirror -->
+ <!-- NOTE: URLs need to be hardcoded in the repository section because we have
+ parent poms that do NOT exist in this project and thus need to be pulled
+ down from the repository. To override these URLs you should use the
+ mirror section in your local settings.xml file. -->
<repository>
<releases>
<enabled>true</enabled>
<url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
</pluginRepository>
</pluginRepositories>
+
+ <!-- distribution management only runs when you run mvn deploy
+ which is if you are deploying compiled artifacts to a
+ maven repository. In that case logic dictacts that you already
+ compiled and thus already have the necessary parent pom files
+ that do not exist in this project pulled down to your local
+ .m2. That way the variables can be resolved and artifacts can
+ be uploaded when running mvn deploy. -->
<distributionManagement>
<!-- OpenDayLight Released artifact -->
<repository>
<id>opendaylight-release</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+ <url>${nexusproxy}/repositories/opendaylight.release/</url>
</repository>
<!-- OpenDayLight Snapshot artifact -->
<snapshotRepository>
<id>opendaylight-snapshot</id>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+ <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
</snapshotRepository>
<!-- Site deployment -->
<site>
<artifactId>guava</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
--- /dev/null
+package org.opendaylight.controller.config.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class AttributeEntryTest {
+
+ private AttributeEntry attributeEntryClient;
+ private final String key = "myKey";
+ private final String description = "myDescription";
+ private final String type = "myType";
+ private final boolean boolValue = false;
+
+ @Before
+ public void setUp() throws Exception {
+ attributeEntryClient = new AttributeEntry("myKey", "myDescription", null, "myType", false);
+ }
+
+ @Test
+ public void testAttributeEntryGetters() throws Exception{
+ assertEquals(key, attributeEntryClient.getKey());
+ assertEquals(description, attributeEntryClient.getDescription());
+ final Object value = attributeEntryClient.getValue();
+ assertNull(value);
+ assertEquals(type, attributeEntryClient.getType());
+ assertEquals(boolValue, attributeEntryClient.isRw());
+ }
+}
*/
package org.opendaylight.controller.config.util;
-import com.google.common.collect.Sets;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.config.api.ConfigRegistry;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.matchers.JUnitMatchers.hasItem;
+
+import java.lang.management.ManagementFactory;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
-import java.lang.management.ManagementFactory;
-import java.util.Set;
-import static org.junit.Assert.assertEquals;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.config.api.ConfigRegistry;
+
+import com.google.common.collect.Sets;
public class ConfigRegistryClientsTest {
private ObjectName testingRegistryON;
private final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
private ConfigRegistryClient jmxRegistryClient;
+ private ConfigTransactionClient jmxTransactionClient;
+ private Map<String, ObjectName> map;
@Before
public void setUp() throws Exception {
mbs.registerMBean(testingRegistry, testingRegistryON);
jmxRegistryClient = new ConfigRegistryJMXClient(
ManagementFactory.getPlatformMBeanServer());
+ map = new HashMap<>();
}
@After
}
}
+ @Test
+ public void testCreateTransaction() throws Exception{
+ jmxTransactionClient = jmxRegistryClient.createTransaction();
+ assertNotNull(jmxTransactionClient);
+ }
+
+ @Test
+ public void testGetConfigTransactionClient2() throws Exception{
+ jmxTransactionClient = jmxRegistryClient.getConfigTransactionClient("transactionName");
+ assertNotNull(jmxTransactionClient);
+ }
+
+ @Test
+ public void testGetConfigTransactionClient() throws Exception{
+ jmxTransactionClient = jmxRegistryClient.getConfigTransactionClient(testingRegistryON);
+ assertNotNull(jmxTransactionClient);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testNewMXBeanProxy() throws Exception{
+ if (jmxRegistryClient instanceof ConfigRegistryJMXClient) {
+ ConfigRegistryJMXClient client = (ConfigRegistryJMXClient) jmxRegistryClient;
+ assertNull(client.newMXBeanProxy(testingRegistryON, String.class));
+ } else {
+ throw new AssertionError("brm msg");
+ }
+ }
+
+ @Test
+ public void testBeginConfig() throws Exception{
+ Assert.assertNotNull(jmxRegistryClient.beginConfig());
+ }
+
+ @Test
+ public void testCommitConfig() throws Exception{
+ assertNull(jmxRegistryClient.commitConfig(testingRegistryON));
+ }
+
+ @Test
+ public void testGetOpenConfigs() throws Exception{
+ assertNull(jmxRegistryClient.getOpenConfigs());
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testGetVersion() throws Exception{
+ assertEquals(3, jmxRegistryClient.getVersion());
+ }
+
+ @Test
+ public void testGetAvailableModuleNames() throws Exception{
+ assertNull(jmxRegistryClient.getAvailableModuleNames());
+ }
+
+ @Test
+ public void testIsHealthy() throws Exception{
+ assertEquals(false, jmxRegistryClient.isHealthy());
+ }
+
+ @Test
+ public void testLookupConfigBeans3() throws Exception{
+ Set<ObjectName> son = jmxRegistryClient.lookupConfigBeans();
+ assertEquals(3, son.size());
+ }
+
+ @Test
+ public void testLookupConfigBeans2() throws Exception{
+ Set<ObjectName> son = jmxRegistryClient.lookupConfigBeans(TestingConfigRegistry.moduleName1);
+ assertEquals(2, son.size());
+ }
+
+ @Test
+ public void testLookupConfigBeans() throws Exception{
+ Set<ObjectName> son = jmxRegistryClient.lookupConfigBeans(TestingConfigRegistry.moduleName1, TestingConfigRegistry.instName1);
+ Set<ObjectName> on = Sets.newHashSet(TestingConfigRegistry.conf2);
+ assertEquals(on, son);
+ }
+
+ @Test
+ public void testLookupConfigBean() throws Exception{
+ ObjectName on = jmxRegistryClient.lookupConfigBean(TestingConfigRegistry.moduleName1, null);
+ assertEquals(TestingConfigRegistry.conf3, on);
+ }
+
@Test
public void testLookupRuntimeBeans() throws Exception {
Set<ObjectName> jmxLookup = lookupRuntimeBeans(jmxRegistryClient);
}
return beans;
}
+
+ @Test
+ public void testCheckConfigBeanExists() throws Exception{
+ jmxRegistryClient.checkConfigBeanExists(testingRegistryON);
+ assertEquals(true, TestingConfigRegistry.checkBool);
+ }
+
+ @Test
+ public void testLookupConfigBeanByServiceInterfaceName() throws Exception{
+ ObjectName on = clientLookupConfigBeanByServiceInterfaceName();
+ assertEquals(TestingConfigRegistry.conf1, on);
+ }
+
+ private ObjectName clientLookupConfigBeanByServiceInterfaceName(){
+ return jmxRegistryClient.lookupConfigBeanByServiceInterfaceName("qnameA", "refA");
+ }
+
+ @Test
+ public void testGetServiceMapping() throws Exception{
+ assertNull(jmxRegistryClient.getServiceMapping());
+ }
+
+ @Test
+ public void testLookupServiceReferencesByServiceInterfaceName() throws Exception{
+ map.put("conf2", TestingConfigRegistry.conf2);
+ assertEquals(map, jmxRegistryClient.lookupServiceReferencesByServiceInterfaceName("qnameB"));
+ }
+
+ @Test
+ public void testLookupServiceInterfaceNames() throws Exception{
+ assertThat(clientLookupServiceInterfaceNames(testingRegistryON), hasItem(TestingConfigRegistry.serviceQName1));
+ assertThat(clientLookupServiceInterfaceNames(testingRegistryON), hasItem(TestingConfigRegistry.serviceQName2));
+ }
+
+ private Set<String> clientLookupServiceInterfaceNames(ObjectName client) throws InstanceNotFoundException{
+ return jmxRegistryClient.lookupServiceInterfaceNames(client);
+ }
+
+ @Test
+ public void testGetServiceInterfaceName() throws Exception{
+ assertNull(jmxRegistryClient.getServiceInterfaceName(null, null));
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testInvokeMethod() throws Exception{
+ assertNull(jmxRegistryClient.invokeMethod(testingRegistryON, "name", null, null));
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testGetAttributeCurrentValue() throws Exception{
+ assertNull(jmxRegistryClient.getAttributeCurrentValue(testingRegistryON, "attrName"));
+ }
+
+ @Test
+ public void testGetAvailableModuleFactoryQNames() throws Exception{
+ for(String str : jmxRegistryClient.getAvailableModuleFactoryQNames()){
+ if(str != TestingConfigRegistry.moduleName1){
+ assertEquals(TestingConfigRegistry.moduleName2, str);
+ }
+ else{
+ assertEquals(TestingConfigRegistry.moduleName1, str);
+ }
+ }
+ }
+
+ @Test
+ public void testGetServiceReference() throws Exception{
+ Assert.assertNotNull(jmxRegistryClient.getServiceReference(null, null));
+ }
+
+ @Test(expected = UnsupportedOperationException.class)
+ public void testcheckServiceReferenceExists() throws Exception{
+ jmxRegistryClient.checkServiceReferenceExists(testingRegistryON);
+ }
}
*/
package org.opendaylight.controller.config.util;
-import com.google.common.collect.Sets;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+
+import java.lang.management.ManagementFactory;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+import javax.management.Attribute;
+import javax.management.MBeanException;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
+import org.opendaylight.controller.config.api.ValidationException;
+import org.opendaylight.controller.config.api.ValidationException.ExceptionMessageWithStackTrace;
import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import java.lang.management.ManagementFactory;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
+import com.google.common.collect.Sets;
public class ConfigTransactionClientsTest {
private final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
private TestingConfigTransactionController transactionController;
private ObjectName transactionControllerON;
private ConfigTransactionClient jmxTransactionClient;
+ Attribute attr;
+
@Before
public void setUp() throws Exception {
transactionControllerON = new ObjectName(ObjectNameUtil.ON_DOMAIN + ":"
+ ObjectNameUtil.TYPE_KEY + "=TransactionController");
mbs.registerMBean(transactionController, transactionControllerON);
- jmxTransactionClient = new ConfigTransactionJMXClient(null, transactionControllerON,
+ jmxTransactionClient = new ConfigTransactionJMXClient(null,
+ transactionControllerON,
ManagementFactory.getPlatformMBeanServer());
}
public void testLookupConfigBeans() throws Exception {
Set<ObjectName> jmxLookup = testClientLookupConfigBeans(jmxTransactionClient);
assertEquals(Sets.newHashSet(transactionController.conf1,
- transactionController.conf2, transactionController.conf3), jmxLookup);
+ transactionController.conf2, transactionController.conf3),
+ jmxLookup);
}
private Set<ObjectName> testClientLookupConfigBeans(
assertEquals(3, beans.size());
return beans;
}
+
+ @Test
+ public void testGetObjectName() throws Exception {
+ testClientGetObjectName(jmxTransactionClient);
+ assertEquals(testClientGetObjectName(jmxTransactionClient), true);
+ }
+
+ private boolean testClientGetObjectName(ConfigTransactionClient client) {
+ return transactionControllerON.equals(client.getObjectName());
+ }
+
+ @Test
+ public void testGetAvailableModuleNames() throws Exception {
+ Set<String> jmxMN = testClientGetAvailableModuleNames(jmxTransactionClient);
+ assertNull(jmxMN);
+ }
+
+ private Set<String> testClientGetAvailableModuleNames(
+ ConfigTransactionClient client) {
+ return client.getAvailableModuleNames();
+ }
+
+ @Test
+ public void testGetTransactionName() throws Exception {
+ String jmxTN = testClientGetTransactionName(jmxTransactionClient);
+ assertEquals("transactionName", jmxTN);
+ }
+
+ private String testClientGetTransactionName(ConfigTransactionClient client) {
+ return client.getTransactionName();
+ }
+
+ @Ignore
+ public void testGetVersion() throws Exception {
+ long jmxVersion = jmxTransactionClient.getVersion();
+ assertNull(jmxVersion);
+ }
+
+ @Ignore
+ public void testGetParentVersion() throws Exception {
+ long jmxParentVersion = jmxTransactionClient.getParentVersion();
+ assertNull(jmxParentVersion);
+ }
+
+ @Test
+ public void testValidateConfig() throws Exception {
+ jmxTransactionClient.validateConfig();
+ }
+
+ @Test
+ public void testAbortConfig() throws Exception {
+ jmxTransactionClient.abortConfig();
+ }
+
+ @Test
+ public void testDestroyModule2() throws Exception {
+ jmxTransactionClient.destroyModule("moduleB", "instB");
+ assertNull(transactionController.conf4);
+ }
+
+ @Test
+ public void testDestroyModule() throws Exception {
+ ObjectName on = testClientCreateModule(jmxTransactionClient);
+ jmxTransactionClient.destroyModule(on);
+ }
+
+ @Test
+ public void testCreateModule() throws Exception {
+ ObjectName on = testClientCreateModule(jmxTransactionClient);
+ Assert.assertNotNull(on);
+ }
+
+ private ObjectName testClientCreateModule(ConfigTransactionClient client)
+ throws Exception {
+ return client.createModule("testModuleName", "testInstanceName");
+ }
+
+ @Ignore
+ public void testAssertVersion() {
+ jmxTransactionClient.assertVersion((int)jmxTransactionClient.getParentVersion(),
+ (int)jmxTransactionClient.getVersion());
+ }
+
+ @Test(expected = NullPointerException.class)
+ public void testCommit() throws Exception {
+ jmxTransactionClient.commit();
+ }
+
+ @Test
+ public void testLookupConfigBeans2() throws Exception {
+ Set<ObjectName> jmxLookup = testClientLookupConfigBeans2(
+ jmxTransactionClient, "moduleB");
+ assertEquals(Sets.newHashSet(transactionController.conf3), jmxLookup);
+ }
+
+ private Set<ObjectName> testClientLookupConfigBeans2(
+ ConfigTransactionClient client, String moduleName) {
+ Set<ObjectName> beans = client.lookupConfigBeans(moduleName);
+ assertEquals(1, beans.size());
+ return beans;
+ }
+
+ @Test
+ public void testLookupConfigBean() throws Exception {
+ Set<ObjectName> jmxLookup = testClientLookupConfigBean(
+ jmxTransactionClient, "moduleB", "instB");
+ assertEquals(Sets.newHashSet(transactionController.conf3), jmxLookup);
+ }
+
+ private Set<ObjectName> testClientLookupConfigBean(
+ ConfigTransactionClient client, String moduleName,
+ String instanceName) {
+ Set<ObjectName> beans = client.lookupConfigBeans(moduleName,
+ instanceName);
+ assertEquals(1, beans.size());
+ return beans;
+ }
+
+ @Test
+ public void testLookupConfigBeans3() throws Exception {
+ Set<ObjectName> jmxLookup = testClientLookupConfigBeans3(
+ jmxTransactionClient, "moduleB", "instB");
+ assertEquals(Sets.newHashSet(transactionController.conf3), jmxLookup);
+ }
+
+ private Set<ObjectName> testClientLookupConfigBeans3(
+ ConfigTransactionClient client, String moduleName,
+ String instanceName) {
+ Set<ObjectName> beans = client.lookupConfigBeans(moduleName,
+ instanceName);
+ assertEquals(1, beans.size());
+ return beans;
+ }
+
+ @Test
+ public void testCheckConfigBeanExists() throws Exception {
+ jmxTransactionClient.checkConfigBeanExists(transactionControllerON);
+ assertEquals("configBeanExists", transactionController.check);
+ }
+
+ @Test
+ public void testSaveServiceReference() throws Exception {
+ assertEquals(transactionControllerON, jmxTransactionClient.saveServiceReference("serviceInterfaceName", "refName", transactionControllerON));
+ }
+
+ @Test
+ public void testRemoveServiceReference() throws Exception {
+ jmxTransactionClient.removeServiceReference("serviceInterface", "refName");
+ assertEquals("refName", transactionController.check);
+ }
+
+ @Test
+ public void testRemoveAllServiceReferences() throws Exception {
+ jmxTransactionClient.removeAllServiceReferences();
+ assertNull(transactionController.check);
+ }
+
+ @Test
+ public void testLookupConfigBeanByServiceInterfaceName() throws Exception {
+ assertEquals(transactionController.conf3, jmxTransactionClient.lookupConfigBeanByServiceInterfaceName("serviceInterface", "refName"));
+ }
+
+ @Test
+ public void testGetServiceMapping() throws Exception {
+ Assert.assertNotNull(jmxTransactionClient.getServiceMapping());
+ }
+
+ @Test
+ public void testLookupServiceReferencesByServiceInterfaceName() throws Exception {
+ Assert.assertNotNull(jmxTransactionClient.lookupServiceReferencesByServiceInterfaceName("serviceInterfaceQName"));
+ }
+
+ @Test
+ public void testLookupServiceInterfaceNames() throws Exception {
+ assertEquals(Sets.newHashSet("setA"), jmxTransactionClient.lookupServiceInterfaceNames(transactionControllerON));
+ }
+
+ @Test
+ public void testGetServiceInterfaceName() throws Exception {
+ assertEquals("namespace" + "localName", jmxTransactionClient.getServiceInterfaceName("namespace", "localName"));
+ }
+
+ @Test
+ public void removeServiceReferences() throws Exception {
+ assertEquals(true, jmxTransactionClient.removeServiceReferences(transactionControllerON));
+ }
+
+ @Test
+ public void testGetServiceReference() throws Exception {
+ assertEquals(transactionController.conf3, jmxTransactionClient.getServiceReference("serviceInterfaceQName", "refName"));
+ }
+
+ @Test
+ public void testCheckServiceReferenceExists() throws Exception {
+ jmxTransactionClient.checkServiceReferenceExists(transactionControllerON);
+ assertEquals("referenceExist", transactionController.check);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testValidateBean() throws Exception {
+ jmxTransactionClient.validateBean(transactionControllerON);
+ }
+
+ @Test(expected = ValidationException.class)
+ public void testValidateBean2() throws Exception {
+ MBeanServer mbsLocal = mock(MBeanServer.class);
+ MBeanException mBeanException = new MBeanException(new ValidationException(
+ Collections.<String, Map<String, ExceptionMessageWithStackTrace>>emptyMap()));
+ doThrow(mBeanException).when(mbsLocal).invoke(transactionControllerON, "validate", null, null);
+
+ ConfigTransactionJMXClient jmxTransactionClientFake = new ConfigTransactionJMXClient(null,
+ transactionControllerON,
+ mbsLocal);
+ jmxTransactionClientFake.validateBean(transactionControllerON);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testValidateBean3() throws Exception {
+ MBeanServer mbsLocal = mock(MBeanServer.class);
+ MBeanException mBeanException = new MBeanException(new RuntimeException());
+ doThrow(mBeanException).when(mbsLocal).invoke(transactionControllerON, "validate", null, null);
+ ConfigTransactionJMXClient jmxTransactionClientFake = new ConfigTransactionJMXClient(null,
+ transactionControllerON,
+ mbsLocal);
+ jmxTransactionClientFake.validateBean(transactionControllerON);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testSetAttribute() throws Exception {
+ attr = null;
+ jmxTransactionClient.setAttribute(transactionControllerON, "attrName", attr);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testGetAttribute() throws Exception {
+ attr = jmxTransactionClient.getAttribute(transactionController.conf3, "attrName");
+ assertNull(attr);
+ }
+
+ @Test
+ public void testGetAvailableModuleFactoryQNames() throws Exception {
+ Assert.assertNotNull(jmxTransactionClient.getAvailableModuleFactoryQNames());
+ }
}
*/
package org.opendaylight.controller.config.util;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class TestingConfigRegistry implements ConfigRegistryMXBean {
static final ObjectName conf1, conf2, conf3, run1, run2, run3;
+ public static String check;
+ public static boolean checkBool;
+ private Map<String, ObjectName> map = new HashMap<>();
public static final String moduleName1 = "moduleA";
public static final String moduleName2 = "moduleB";
public static final String instName1 = "instA";
public static final String instName2 = "instB";
+ public static final String refName1 = "refA";
+ public static final String refName2 = "refB";
+ public static final String serviceQName1 = "qnameA";
+ public static final String serviceQName2 = "qnameB";
static {
conf1 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+ ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
- + "=" + moduleName1);
+ + "=" + moduleName1 + "," + ObjectNameUtil.SERVICE_QNAME_KEY
+ + "=" + serviceQName1 + "," + ObjectNameUtil.REF_NAME_KEY
+ + "=" + refName1);
conf2 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+ ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+ "=" + moduleName1 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
- + "=" + instName1);
+ + "=" + instName1 + "," + ObjectNameUtil.SERVICE_QNAME_KEY
+ + "=" + serviceQName2 + "," + ObjectNameUtil.REF_NAME_KEY
+ + "=" + refName1);
conf3 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+ ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+ "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
+ ":type=RuntimeBean," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+ "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
+ "=" + instName2);
+
+ check = null;
+ checkBool = false;
+
}
@Override
public ObjectName beginConfig() {
- return null;
+ return conf2;
}
@Override
@Override
public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ Set<ObjectName> configBeans = Sets.<ObjectName> newHashSet(run1, run2, run3);
+ if(configBeans.size()>0){
+ checkBool = true;
+ }
}
@Override
public ObjectName lookupConfigBeanByServiceInterfaceName(String serviceInterfaceQName, String refName) {
- throw new UnsupportedOperationException();
+ if (serviceInterfaceQName.equals(serviceQName1) && refName.equals(refName1)) {
+ return conf1;
+ }
+ else{
+ return null;
+ }
}
@Override
public Map<String, Map<String, ObjectName>> getServiceMapping() {
- throw new UnsupportedOperationException();
+ return null;
}
@Override
public Map<String, ObjectName> lookupServiceReferencesByServiceInterfaceName(String serviceInterfaceQName) {
- throw new UnsupportedOperationException();
+
+ if(serviceInterfaceQName.equals(serviceQName1)){
+ map.put("conf1", conf1);
+ }
+ else if(serviceInterfaceQName.equals(serviceQName2)){
+ map.put("conf2", conf2);
+ }
+ else{
+ map.put("conf3", conf3);
+ }
+ return map;
}
@Override
public Set<String> lookupServiceInterfaceNames(ObjectName objectName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ return Sets.<String> newHashSet(serviceQName1, serviceQName2);
}
@Override
public String getServiceInterfaceName(String namespace, String localName) {
- throw new UnsupportedOperationException();
+ return null;
}
@Override
public Set<String> getAvailableModuleFactoryQNames() {
- throw new UnsupportedOperationException();
+ return Sets.<String> newHashSet(moduleName1, moduleName2);
}
@Override
public ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ return conf1;
}
@Override
*/
package org.opendaylight.controller.config.util;
+import java.util.HashMap;
import java.util.Map;
import java.util.Set;
ConfigTransactionControllerMXBean {
public final ObjectName conf1, conf2, conf3;
+ public ObjectName conf4;
+ public String check;
+ Map<String, ObjectName> mapSub;
+ Map<String, Map<String, ObjectName>> map;
public static final String moduleName1 = "moduleA";
public static final String moduleName2 = "moduleB";
+ ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+ "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
+ "=" + instName2);
+ conf4 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+ + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+ + "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
+ + "=" + instName2);
+ mapSub = new HashMap<String, ObjectName>();
+ map = new HashMap<String, Map<String,ObjectName>>();
}
@Override
public ObjectName createModule(String moduleName, String instanceName)
throws InstanceAlreadyExistsException {
- return null;
+ //return null;
+ return ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+ + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+ + "=" + moduleName);
}
@Override
public void destroyModule(ObjectName objectName)
throws InstanceNotFoundException {
+ if(objectName != null){
+ conf4 = null;
+ }
}
@Override
@Override
public String getTransactionName() {
- return null;
+ //return null;
+ return "transactionName";
}
@Override
@Override
public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ check = "configBeanExists";
}
@Override
public ObjectName saveServiceReference(String serviceInterfaceName, String refName, ObjectName moduleON) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ return moduleON;
}
@Override
public void removeServiceReference(String serviceInterfaceName, String refName) {
- throw new UnsupportedOperationException();
+ check = refName;
}
@Override
public void removeAllServiceReferences() {
- throw new UnsupportedOperationException();
+ check = null;
}
@Override
public ObjectName lookupConfigBeanByServiceInterfaceName(String serviceInterfaceQName, String refName) {
- throw new UnsupportedOperationException();
+ return conf3;
}
@Override
public Map<String, Map<String, ObjectName>> getServiceMapping() {
- throw new UnsupportedOperationException();
+ mapSub.put("A",conf2);
+ map.put("AA", mapSub);
+ return map;
}
@Override
public Map<String, ObjectName> lookupServiceReferencesByServiceInterfaceName(String serviceInterfaceQName) {
- throw new UnsupportedOperationException();
+ mapSub.put("A",conf2);
+ return mapSub;
}
@Override
public Set<String> lookupServiceInterfaceNames(ObjectName objectName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ return Sets.newHashSet("setA");
}
@Override
public String getServiceInterfaceName(String namespace, String localName) {
- throw new UnsupportedOperationException();
+ return check=namespace+localName;
}
@Override
public boolean removeServiceReferences(ObjectName objectName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ return true;
}
@Override
public Set<String> getAvailableModuleFactoryQNames() {
- throw new UnsupportedOperationException();
+ return Sets.newHashSet("availableModuleFactoryQNames");
}
@Override
public ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ return conf3;
}
@Override
public void checkServiceReferenceExists(ObjectName objectName) throws InstanceNotFoundException {
- throw new UnsupportedOperationException();
+ check = "referenceExist";
}
}
--- /dev/null
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+# script; for example karaf, start, stop, admin, client, ...
+#
+# if [ "$KARAF_SCRIPT" == "SCRIPT_NAME" ]; then
+# Actions go here...
+# fi
+
+#
+# general settings which should be applied for all scripts go here; please keep
+# in mind that it is possible that scripts might be executed more than once, e.g.
+# in example of the start script where the start script is executed first and the
+# karaf script afterwards.
+#
+
+#
+# The following section shows the possible configuration options for the default
+# karaf scripts
+#
+# export JAVA_HOME # Location of Java installation
+# export JAVA_MIN_MEM # Minimum memory for the JVM
+# export JAVA_MAX_MEM # Maximum memory for the JVM
+# export JAVA_PERM_MEM # Minimum perm memory for the JVM
+# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM
+# export KARAF_HOME # Karaf home folder
+# export KARAF_DATA # Karaf data folder
+# export KARAF_BASE # Karaf base folder
+# export KARAF_ETC # Karaf etc folder
+# export KARAF_OPTS # Additional available Karaf options
+# export KARAF_DEBUG # Enable debug mode
+if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then
+ export JAVA_MAX_PERM_MEM="512m"
+fi
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
+ export JAVA_MAX_MEM="2048m"
+fi
+
enableStrongPasswordCheck = false
#Jolokia configurations
-org.jolokia.listenForHttpService=false
+#org.jolokia.listenForHttpService=false
# Logging configuration for Tomcat-JUL logging
java.util.logging.config.file=configuration/tomcat-logging.properties
#Hosttracker hostsdb key scheme setting
hosttracker.keyscheme=IP
+# LISP Flow Mapping configuration
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+lisp.mappingOverwrite = true
+# Enable the Solicit-Map-Request (SMR) mechanism
+lisp.smr = false
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>sal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>benchmark-data-store</artifactId>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-parser-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openjdk.jmh</groupId>
+ <artifactId>jmh-generator-annprocess</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-inmemory-datastore</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <configuration>
+ <classpathScope>test</classpathScope>
+ <executable>java</executable>
+ <arguments>
+ <argument>-classpath</argument>
+ <classpath/>
+ <argument>org.openjdk.jmh.Main</argument>
+ <argument>.*</argument>
+ </arguments>
+ </configuration>
+ <executions>
+ <execution>
+ <id>run-benchmarks</id>
+ <phase>integration-test</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Warmup;
+
+/**
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int WARMUP_ITERATIONS = 20;
+ private static final int MEASUREMENT_ITERATIONS = 20;
+
+ private static final int OUTER_LIST_100K = 100000;
+ private static final int OUTER_LIST_50K = 50000;
+ private static final int OUTER_LIST_10K = 10000;
+
+ private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
+ private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
+
+ private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
+ final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
+
+ for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
+ paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
+ .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .build();
+ }
+ return paths;
+ }
+
+ private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
+ private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
+ private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
+
+ private static MapNode initInnerListItems(final int count) {
+ final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
+ .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
+
+ for (int i = 1; i <= count; ++i) {
+ mapEntryBuilder
+ .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
+ }
+ return mapEntryBuilder.build();
+ }
+
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
+ private static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
+
+ private static NormalizedNode<?,?>[] initOuterListItems(int outerListItemsCount, MapNode innerList) {
+ final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
+
+ for (int i = 0; i < outerListItemsCount; ++i) {
+ int outerListKey = i;
+ outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .withChild(innerList).build();
+ }
+ return outerListItems;
+ }
+
+ protected SchemaContext schemaContext;
+ protected InMemoryDOMDataStore domStore;
+
+ abstract public void setUp() throws Exception;
+
+ abstract public void tearDown();
+
+ protected void initTestNode() throws Exception {
+ final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
+ .build();
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(testPath, provideOuterListNode());
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ private DataContainerChild<?, ?> provideOuterListNode() {
+ return ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
+ .withChild(
+ ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
+ .build()).build();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ }
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ cohort.canCommit().get();
+ cohort.preCommit().get();
+ cohort.commit().get();
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+/**
+ * Benchmark Model class loads the odl-datastore-test.yang model from resources.
+ * <br>
+ * This class serves as facilitator class which holds several references to initialized yang model as static final
+ * members.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public final class BenchmarkModel {
+
+ public static final QName TEST_QNAME = QName
+ .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test");
+ public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+ public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+ public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+ public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+ private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+ public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+ public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+
+ public static final InputStream getDatastoreBenchmarkInputStream() {
+ return getInputStream(DATASTORE_TEST_YANG);
+ }
+
+ private static InputStream getInputStream(final String resourceName) {
+ return BenchmarkModel.class.getResourceAsStream(resourceName);
+ }
+
+ public static SchemaContext createTestContext() {
+ YangParserImpl parser = new YangParserImpl();
+ Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(
+ getDatastoreBenchmarkInputStream()));
+ return parser.resolveSchemaContext(modules);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as BlockingBoundedFastThreadPool
+ * and DOM Store Executor Service as Blocking Bounded Fast Thread Pool.
+ *
+ * @see org.opendaylight.yangtools.util.concurrent.SpecialExecutors
+ * @see org.opendaylight.controller.md.sal.dom.store.benchmark.AbstractInMemoryDatastoreWriteTransactionBenchmark
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+ private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
+
+ @Override
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ final ListeningExecutorService domStoreExecutor = MoreExecutors.listeningDecorator(SpecialExecutors.newBoundedSingleThreadExecutor(
+ MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name ));
+
+ domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @Override
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithSameThreadedExecutorBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+ private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ final String name = "DS_BENCHMARK";
+ final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ dataChangeListenerExecutor);
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Same Thread Executor
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+ @Setup(Level.Trial)
+ public void setUp() throws Exception {
+ domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
+ schemaContext = BenchmarkModel.createTestContext();
+ domStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @TearDown
+ public void tearDown() {
+ schemaContext = null;
+ domStore = null;
+ }
+}
--- /dev/null
+module odl-datastore-test {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
+ prefix "store-test";
+
+ revision "2014-03-13" {
+ description "Initial revision.";
+ }
+
+ container test {
+ list outer-list {
+ key id;
+ leaf id {
+ type int32;
+ }
+ choice outer-choice {
+ case one {
+ leaf one {
+ type string;
+ }
+ }
+ case two-three {
+ leaf two {
+ type string;
+ }
+ leaf three {
+ type string;
+ }
+ }
+ }
+ list inner-list {
+ key name;
+ leaf name {
+ type int32;
+ }
+ leaf value {
+ type string;
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
import com.google.common.net.InetAddresses;
-public class FromSalConversionsUtils {
+/**
+ * MD-SAL to AD-SAL conversions collection
+ */
+public final class FromSalConversionsUtils {
- private FromSalConversionsUtils() {
+ /** http://en.wikipedia.org/wiki/IPv4#Packet_structure (end of octet number 1, bit 14.+15.) */
+ public static final int ENC_FIELD_BIT_SIZE = 2;
+ private FromSalConversionsUtils() {
+ throw new IllegalAccessError("forcing no instance for factory");
}
@SuppressWarnings("unused")
return true;
}
+ /**
+ * @param nwDscp NW-DSCP
+ * @return shifted to NW-TOS (with empty ECN part)
+ */
+ public static int dscpToTos(int nwDscp) {
+ return (short) (nwDscp << ENC_FIELD_BIT_SIZE);
+ }
}
private static SetNwTosActionCase _toAction(final SetNwTos sourceAction) {
return new SetNwTosActionCaseBuilder()
- .setSetNwTosAction(new SetNwTosActionBuilder().setTos(sourceAction.getNwTos()).build())
+ .setSetNwTosAction(new SetNwTosActionBuilder().setTos(FromSalConversionsUtils.dscpToTos(sourceAction.getNwTos())).build())
.build();
}
private static final Logger LOG = LoggerFactory.getLogger(ToSalConversionsUtils.class);
private ToSalConversionsUtils() {
-
+ throw new IllegalAccessError("forcing no instance for factory");
}
public static Flow toFlow(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow source, Node node) {
} else if (sourceAction instanceof SetNwTosActionCase) {
Integer tos = ((SetNwTosActionCase) sourceAction).getSetNwTosAction().getTos();
if (tos != null) {
- targetAction.add(new SetNwTos(tos));
+ targetAction.add(new SetNwTos(ToSalConversionsUtils.tosToNwDscp(tos)));
}
} else if (sourceAction instanceof SetTpDstActionCase) {
PortNumber port = ((SetTpDstActionCase) sourceAction).getSetTpDstAction().getPort();
return mac;
}
+
+ /**
+ * @param nwTos NW-TOS
+ * @return shifted to NW-DSCP
+ */
+ public static int tosToNwDscp(int nwTos) {
+ return (short) (nwTos >>> FromSalConversionsUtils.ENC_FIELD_BIT_SIZE);
+ }
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils;
+
+/**
+ * test of {@link FromSalConversionsUtils}
+ */
+public class FromSalConversionsUtilsTest {
+
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils#dscpToTos(int)}.
+ */
+ @Test
+ public void testDscpToTos() {
+ Assert.assertEquals(0, FromSalConversionsUtils.dscpToTos(0));
+ Assert.assertEquals(4, FromSalConversionsUtils.dscpToTos(1));
+ Assert.assertEquals(252, FromSalConversionsUtils.dscpToTos(63));
+ Assert.assertEquals(256, FromSalConversionsUtils.dscpToTos(64));
+ Assert.assertEquals(-4, FromSalConversionsUtils.dscpToTos(-1));
+ }
+
+}
}
assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
} else if (innerAction instanceof SetNwTosActionCase) {
- assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 63, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
+ assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 252, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
} else if (innerAction instanceof SetNwDstActionCase) {
Address address = ((SetNwDstActionCase) innerAction).getSetNwDstAction().getAddress();
boolean ipv4AddressFound = false;
private void prepareActionSetNwTos(SetNwTosActionCaseBuilder wrapper) {
SetNwTosActionBuilder setNwTosActionBuilder = new SetNwTosActionBuilder();
- setNwTosActionBuilder.setTos(63);
+ setNwTosActionBuilder.setTos(252);
wrapper.setSetNwTosAction(setNwTosActionBuilder.build());
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils;
+
+/**
+ * test of {@link ToSalConversionsUtils}
+ */
+public class ToSalConversionsUtilsTest {
+
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils#tosToNwDscp(int)}.
+ */
+ @Test
+ public void testTosToNwDscp() {
+ Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(0));
+ Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(1));
+ Assert.assertEquals(1, ToSalConversionsUtils.tosToNwDscp(4));
+ Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(252));
+ Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(253));
+ Assert.assertEquals(-1, ToSalConversionsUtils.tosToNwDscp(-1));
+ }
+}
if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj);
builder.setFlowRef(new FlowRef(identifier));
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalFlowService().removeFlow(builder.build());
if (tableIdValidationPrecondition(tableKey, update)) {
final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
if (tableIdValidationPrecondition(tableKey, addDataObj)) {
final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
private boolean tableIdValidationPrecondition (final TableKey tableKey, final Flow flow) {
Preconditions.checkNotNull(tableKey, "TableKey can not be null or empty!");
Preconditions.checkNotNull(flow, "Flow can not be null or empty!");
- if (flow.getTableId() != tableKey.getId()) {
+ if (! tableKey.getId().equals(flow.getTableId())) {
LOG.error("TableID in URI tableId={} and in palyload tableId={} is not same.",
flow.getTableId(), tableKey.getId());
return false;
final Group group = (removeDataObj);
final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().removeGroup(builder.build());
final Group updatedGroup = (update);
final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
final Group group = (addDataObj);
final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalGroupService().addGroup(builder.build());
final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().removeMeter(builder.build());
final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
- builder.setNode(new NodeRef(nodeIdent));
+ builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
this.provider.getSalMeterService().addMeter(builder.build());
<module>sal-binding-dom-it</module>
</modules>
</profile>
+ <profile>
+ <id>benchmarks</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ </activation>
+ <modules>
+ <module>benchmark-data-store</module>
+ </modules>
+ </profile>
</profiles>
</project>
\ No newline at end of file
<Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
<Export-package>org.opendaylight.cluster.raft</Export-package>
<Import-Package>*</Import-Package>
+ <DynamicImport-Package>*</DynamicImport-Package>
</instructions>
</configuration>
</plugin>
}
} else if (message instanceof PrintState) {
- LOG.debug("State of the node:{} has entries={}, {}",
- getId(), state.size(), getReplicatedLogState());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("State of the node:{} has entries={}, {}",
+ getId(), state.size(), getReplicatedLogState());
+ }
} else if (message instanceof PrintRole) {
- LOG.debug("{} = {}, Peers={}", getId(), getRaftState(),getPeers());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers());
+ }
} else {
super.onReceiveCommand(message);
} catch (Exception e) {
LOG.error("Exception in applying snapshot", e);
}
- LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+ }
}
private ByteString fromObject(Object snapshot) throws Exception {
import org.opendaylight.controller.cluster.example.messages.PrintState;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
import java.io.BufferedReader;
import java.io.InputStreamReader;
actorSystem.stop(actorRef);
actorRefs.remove(actorName);
-
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new RemoveRaftPeer(actorName), null);
- }
-
allPeers.remove(actorName);
}
allPeers.put(actorName, address);
ActorRef exampleActor = createExampleActor(actorName);
-
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new AddRaftPeer(actorName, address), null);
- }
-
actorRefs.put(actorName, exampleActor);
addClientsToNode(actorName, 1);
*/
public class DefaultConfigParamsImpl implements ConfigParams {
- private static final int SNAPSHOT_BATCH_COUNT = 100000;
+ private static final int SNAPSHOT_BATCH_COUNT = 20000;
/**
* The maximum election time variance
* This context should NOT be passed directly to any other actor it is
* only to be consumed by the RaftActorBehaviors
*/
- private RaftActorContext context;
+ protected RaftActorContext context;
/**
* The in-memory journal
@Override public void onReceiveRecover(Object message) {
if (message instanceof SnapshotOffer) {
- LOG.debug("SnapshotOffer called..");
+ LOG.info("SnapshotOffer called..");
SnapshotOffer offer = (SnapshotOffer) message;
Snapshot snapshot = (Snapshot) offer.snapshot();
context.setReplicatedLog(replicatedLog);
context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
- LOG.debug("Applied snapshot to replicatedLog. " +
- "snapshotIndex={}, snapshotTerm={}, journal-size={}",
+ LOG.info("Applied snapshot to replicatedLog. " +
+ "snapshotIndex={}, snapshotTerm={}, journal-size={}",
replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
- replicatedLog.size());
+ replicatedLog.size()
+ );
// Apply the snapshot to the actors state
applySnapshot(ByteString.copyFrom(snapshot.getState()));
} else if (message instanceof ReplicatedLogEntry) {
- replicatedLog.append((ReplicatedLogEntry) message);
+ ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
+
+ // Apply State immediately
+ replicatedLog.append(logEntry);
+ applyState(null, "recovery", logEntry.getData());
+ context.setLastApplied(logEntry.getIndex());
+ context.setCommitIndex(logEntry.getIndex());
+
} else if (message instanceof DeleteEntries) {
replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+
} else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor());
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+
} else if (message instanceof RecoveryCompleted) {
- LOG.debug(
+ LOG.info(
"RecoveryCompleted - Switching actor to Follower - " +
- "Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
+ "Persistence Id = " + persistenceId() +
+ " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
"journal-size={}",
replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
replicatedLog.snapshotTerm, replicatedLog.size());
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
- LOG.debug("Applying state for log index {} data {}",
- applyState.getReplicatedLogEntry().getIndex(),
- applyState.getReplicatedLogEntry().getData());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Applying state for log index {} data {}",
+ applyState.getReplicatedLogEntry().getIndex(),
+ applyState.getReplicatedLogEntry().getData());
+ }
applyState(applyState.getClientActor(), applyState.getIdentifier(),
applyState.getReplicatedLogEntry().getData());
} else if(message instanceof ApplySnapshot ) {
Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
- LOG.debug("ApplySnapshot called on Follower Actor " +
- "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
- snapshot.getLastAppliedTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("ApplySnapshot called on Follower Actor " +
+ "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
+ snapshot.getLastAppliedTerm()
+ );
+ }
applySnapshot(ByteString.copyFrom(snapshot.getState()));
//clears the followers log, sets the snapshot index to ensure adjusted-index works
context.removePeer(rrp.getName());
} else if (message instanceof CaptureSnapshot) {
- LOG.debug("CaptureSnapshot received by actor");
+ LOG.info("CaptureSnapshot received by actor");
CaptureSnapshot cs = (CaptureSnapshot)message;
captureSnapshot = cs;
createSnapshot();
} else if (message instanceof CaptureSnapshotReply){
- LOG.debug("CaptureSnapshotReply received by actor");
+ LOG.info("CaptureSnapshotReply received by actor");
CaptureSnapshotReply csr = (CaptureSnapshotReply) message;
ByteString stateInBytes = csr.getSnapshot();
- LOG.debug("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
+ LOG.info("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
handleCaptureSnapshotReply(stateInBytes);
} else {
if (!(message instanceof AppendEntriesMessages.AppendEntries)
&& !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
- LOG.debug("onReceiveCommand: message:" + message.getClass());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("onReceiveCommand: message:" + message.getClass());
+ }
}
RaftState state =
if(oldBehavior != currentBehavior){
onStateChanged();
}
+
+ onLeaderChanged(oldBehavior.getLeaderId(), currentBehavior.getLeaderId());
}
}
context.getReplicatedLog().lastIndex() + 1,
context.getTermInformation().getCurrentTerm(), data);
- LOG.debug("Persist data {}", replicatedLogEntry);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Persist data {}", replicatedLogEntry);
+ }
replicatedLog
.appendAndPersist(clientActor, identifier, replicatedLogEntry);
*/
protected abstract void onStateChanged();
+ protected void onLeaderChanged(String oldLeader, String newLeader){};
+
private RaftActorBehavior switchBehavior(RaftState state) {
if (currentBehavior != null) {
if (currentBehavior.state() == state) {
return null;
}
String peerAddress = context.getPeerAddress(leaderId);
- LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
- + peerAddress);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
+ + peerAddress);
+ }
return peerAddress;
}
lastAppliedTerm = lastAppliedEntry.getTerm();
}
- LOG.debug("Snapshot Capture logSize: {}", journal.size());
- LOG.debug("Snapshot Capture lastApplied:{} ", context.getLastApplied());
- LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
- LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot Capture logSize: {}", journal.size());
+ LOG.debug("Snapshot Capture lastApplied:{} ",
+ context.getLastApplied());
+ LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
+ LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+ }
// send a CaptureSnapshot to self to make the expensive operation async.
getSelf().tell(new CaptureSnapshot(
}
@Override public void update(long currentTerm, String votedFor) {
- LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+ }
this.currentTerm = currentTerm;
this.votedFor = votedFor;
}
return null;
}
+ /**
+ * Find the client request tracker for a specific logIndex
+ *
+ * @param logIndex
+ * @return
+ */
+ protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+ return null;
+ }
+
+
/**
* Find the log index from the previous to last entry in the log
*
i < index + 1; i++) {
ActorRef clientActor = null;
String identifier = null;
- ClientRequestTracker tracker = findClientRequestTracker(i);
+ ClientRequestTracker tracker = removeClientRequestTracker(i);
if (tracker != null) {
clientActor = tracker.getClientActor();
context.getReplicatedLog().get(i);
if (replicatedLogEntry != null) {
+ // Send a local message to the local RaftActor (it's derived class to be
+ // specific to apply the log to it's index)
actor().tell(new ApplyState(clientActor, identifier,
replicatedLogEntry), actor());
newLastApplied = i;
} else {
//if one index is not present in the log, no point in looping
// around as the rest wont be present either
- context.getLogger().error(
+ context.getLogger().warning(
"Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index );
break;
}
}
- // Send a local message to the local RaftActor (it's derived class to be
- // specific to apply the log to it's index)
context.getLogger().debug("Setting last applied to {}", newLastApplied);
context.setLastApplied(newLastApplied);
}
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
+import akka.event.LoggingAdapter;
import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
public class Follower extends AbstractRaftActorBehavior {
private ByteString snapshotChunksCollected = ByteString.EMPTY;
+ private final LoggingAdapter LOG;
+
public Follower(RaftActorContext context) {
super(context);
+ LOG = context.getLogger();
+
scheduleElection(electionDuration());
}
AppendEntries appendEntries) {
if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
- context.getLogger()
- .debug(appendEntries.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntries.toString());
+ }
}
// TODO : Refactor this method into a bunch of smaller methods
// an entry at prevLogIndex and this follower has no entries in
// it's log.
- context.getLogger().debug(
- "The followers log is empty and the senders prevLogIndex is {}",
- appendEntries.getPrevLogIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("The followers log is empty and the senders prevLogIndex is {}",
+ appendEntries.getPrevLogIndex());
+ }
} else if (lastIndex() > -1
&& appendEntries.getPrevLogIndex() != -1
// The follower's log is out of sync because the Leader's
// prevLogIndex entry was not found in it's log
- context.getLogger().debug(
- "The log is not empty but the prevLogIndex {} was not found in it",
- appendEntries.getPrevLogIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("The log is not empty but the prevLogIndex {} was not found in it",
+ appendEntries.getPrevLogIndex());
+ }
} else if (lastIndex() > -1
&& previousEntry != null
// prevLogIndex entry does exist in the follower's log but it has
// a different term in it
- context.getLogger().debug(
- "Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}"
- , previousEntry.getTerm()
- , appendEntries.getPrevLogTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Cannot append entries because previous entry term {} is not equal to append entries prevLogTerm {}"
+ , previousEntry.getTerm()
+ , appendEntries.getPrevLogTerm());
+ }
} else {
outOfSync = false;
}
if (outOfSync) {
// We found that the log was out of sync so just send a negative
// reply and return
- context.getLogger().debug("Follower is out-of-sync, " +
- "so sending negative reply, lastIndex():{}, lastTerm():{}",
- lastIndex(), lastTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Follower is out-of-sync, " +
+ "so sending negative reply, lastIndex():{}, lastTerm():{}",
+ lastIndex(), lastTerm()
+ );
+ }
sender.tell(
new AppendEntriesReply(context.getId(), currentTerm(), false,
lastIndex(), lastTerm()), actor()
if (appendEntries.getEntries() != null
&& appendEntries.getEntries().size() > 0) {
- context.getLogger().debug(
- "Number of entries to be appended = " + appendEntries
- .getEntries().size()
- );
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Number of entries to be appended = " + appendEntries
+ .getEntries().size()
+ );
+ }
// 3. If an existing entry conflicts with a new one (same index
// but different terms), delete the existing entry and all that
continue;
}
- context.getLogger().debug(
- "Removing entries from log starting at "
- + matchEntry.getIndex()
- );
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Removing entries from log starting at "
+ + matchEntry.getIndex()
+ );
+ }
// Entries do not match so remove all subsequent entries
context.getReplicatedLog()
}
}
- context.getLogger().debug(
- "After cleanup entries to be added from = " + (addEntriesFrom
- + lastIndex())
- );
+ if(LOG.isDebugEnabled()) {
+ context.getLogger().debug(
+ "After cleanup entries to be added from = " + (addEntriesFrom
+ + lastIndex())
+ );
+ }
// 4. Append any new entries not already in the log
for (int i = addEntriesFrom;
.appendAndPersist(appendEntries.getEntries().get(i));
}
- context.getLogger().debug(
- "Log size is now " + context.getReplicatedLog().size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Log size is now " + context.getReplicatedLog().size());
+ }
}
context.getReplicatedLog().lastIndex()));
if (prevCommitIndex != context.getCommitIndex()) {
- context.getLogger()
- .debug("Commit index set to " + context.getCommitIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Commit index set to " + context.getCommitIndex());
+ }
}
// If commitIndex > lastApplied: increment lastApplied, apply
// check if there are any entries to be applied. last-applied can be equal to last-index
if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
context.getLastApplied() < lastIndex()) {
- context.getLogger().debug("applyLogToStateMachine, " +
- "appendEntries.getLeaderCommit():{}," +
- "context.getLastApplied():{}, lastIndex():{}",
- appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("applyLogToStateMachine, " +
+ "appendEntries.getLeaderCommit():{}," +
+ "context.getLastApplied():{}, lastIndex():{}",
+ appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()
+ );
+ }
+
applyLogToStateMachine(appendEntries.getLeaderCommit());
}
}
private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
- context.getLogger().debug("InstallSnapshot received by follower " +
- "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
- installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("InstallSnapshot received by follower " +
+ "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
+ installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()
+ );
+ }
try {
if (installSnapshot.getChunkIndex() == installSnapshot.getTotalChunks()) {
} else {
// we have more to go
snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
- context.getLogger().debug("Chunk={},snapshotChunksCollected.size:{}",
- installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Chunk={},snapshotChunksCollected.size:{}",
+ installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+ }
}
sender.tell(new InstallSnapshotReply(
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
+import akka.event.LoggingAdapter;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
private final int minReplicationCount;
+ private final LoggingAdapter LOG;
+
public Leader(RaftActorContext context) {
super(context);
+ LOG = context.getLogger();
+
if (lastIndex() >= 0) {
context.setCommitIndex(lastIndex());
}
followerToLog.put(followerId, followerLogInformation);
}
- context.getLogger().debug("Election:Leader has following peers:"+ followers);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Election:Leader has following peers:" + followers);
+ }
if (followers.size() > 0) {
minReplicationCount = (followers.size() + 1) / 2 + 1;
@Override protected RaftState handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- context.getLogger().debug(appendEntries.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntries.toString());
+ }
return state();
}
AppendEntriesReply appendEntriesReply) {
if(! appendEntriesReply.isSuccess()) {
- context.getLogger()
- .debug(appendEntriesReply.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntriesReply.toString());
+ }
}
// Update the FollowerLogInformation
followerToLog.get(followerId);
if(followerLogInformation == null){
- context.getLogger().error("Unknown follower {}", followerId);
+ LOG.error("Unknown follower {}", followerId);
return state();
}
return state();
}
+ protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+
+ ClientRequestTracker toRemove = findClientRequestTracker(logIndex);
+ if(toRemove != null) {
+ trackerList.remove(toRemove);
+ }
+
+ return toRemove;
+ }
+
protected ClientRequestTracker findClientRequestTracker(long logIndex) {
for (ClientRequestTracker tracker : trackerList) {
if (tracker.getIndex() == logIndex) {
if (reply.isSuccess()) {
if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
//this was the last chunk reply
- context.getLogger().debug("InstallSnapshotReply received, " +
- "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
- reply.getChunkIndex(), followerId,
- context.getReplicatedLog().getSnapshotIndex() + 1);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("InstallSnapshotReply received, " +
+ "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
+ reply.getChunkIndex(), followerId,
+ context.getReplicatedLog().getSnapshotIndex() + 1
+ );
+ }
FollowerLogInformation followerLogInformation =
followerToLog.get(followerId);
followerLogInformation.setNextIndex(
context.getReplicatedLog().getSnapshotIndex() + 1);
mapFollowerToSnapshot.remove(followerId);
- context.getLogger().debug("followerToLog.get(followerId).getNextIndex().get()=" +
- followerToLog.get(followerId).getNextIndex().get());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" +
+ followerToLog.get(followerId).getNextIndex().get());
+ }
} else {
followerToSnapshot.markSendStatus(true);
}
} else {
- context.getLogger().info("InstallSnapshotReply received, " +
- "sending snapshot chunk failed, Will retry, Chunk:{}",
- reply.getChunkIndex());
+ LOG.info("InstallSnapshotReply received, " +
+ "sending snapshot chunk failed, Will retry, Chunk:{}",
+ reply.getChunkIndex()
+ );
followerToSnapshot.markSendStatus(false);
}
} else {
- context.getLogger().error("ERROR!!" +
- "FollowerId in InstallSnapshotReply not known to Leader" +
- " or Chunk Index in InstallSnapshotReply not matching {} != {}",
- followerToSnapshot.getChunkIndex(), reply.getChunkIndex() );
+ LOG.error("ERROR!!" +
+ "FollowerId in InstallSnapshotReply not known to Leader" +
+ " or Chunk Index in InstallSnapshotReply not matching {} != {}",
+ followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
+ );
}
}
private void replicate(Replicate replicate) {
long logIndex = replicate.getReplicatedLogEntry().getIndex();
- context.getLogger().debug("Replicate message " + logIndex);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Replicate message " + logIndex);
+ }
// Create a tracker entry we will use this later to notify the
// client actor
if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) {
// if the follower is just not starting and leader's index
// is more than followers index
- context.getLogger().debug("SendInstallSnapshot to follower:{}," +
- "follower-nextIndex:{}, leader-snapshot-index:{}, " +
- "leader-last-index:{}", followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("SendInstallSnapshot to follower:{}," +
+ "follower-nextIndex:{}, leader-snapshot-index:{}, " +
+ "leader-last-index:{}", followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+ );
+ }
actor().tell(new SendInstallSnapshot(), actor());
} else {
).toSerializable(),
actor()
);
- context.getLogger().info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
+ LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
mapFollowerToSnapshot.get(followerId).getTotalChunks());
} catch (IOException e) {
- context.getLogger().error("InstallSnapshot failed for Leader.", e);
+ LOG.error("InstallSnapshot failed for Leader.", e);
}
}
mapFollowerToSnapshot.put(followerId, followerToSnapshot);
}
ByteString nextChunk = followerToSnapshot.getNextChunk();
- context.getLogger().debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+ }
return nextChunk;
}
int size = snapshotBytes.size();
totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
- context.getLogger().debug("Snapshot {} bytes, total chunks to send:{}",
- size, totalChunks);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot {} bytes, total chunks to send:{}",
+ size, totalChunks);
+ }
}
public ByteString getSnapshotBytes() {
}
}
- context.getLogger().debug("length={}, offset={},size={}",
- snapshotLength, start, size);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("length={}, offset={},size={}",
+ snapshotLength, start, size);
+ }
return getSnapshotBytes().substring(start, start + size);
}
try {
if(leProtoBuff.getData() != null && leProtoBuff.getData().getClientPayloadClassName() != null) {
String clientPayloadClassName = leProtoBuff.getData().getClientPayloadClassName();
- payload = (Payload)Class.forName(clientPayloadClassName).newInstance();
+ payload = (Payload) Class.forName(clientPayloadClassName).newInstance();
payload = payload.decode(leProtoBuff.getData());
payload.setClientPayloadClassName(clientPayloadClassName);
} else {
package org.opendaylight.controller.cluster.raft.messages;
import com.google.protobuf.ByteString;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
public class InstallSnapshot extends AbstractRaftRPC {
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.event.Logging;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
import com.google.protobuf.ByteString;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
+import java.util.ArrayList;
import java.util.Collections;
+import java.util.List;
import java.util.Map;
+import static junit.framework.Assert.assertTrue;
import static junit.framework.TestCase.assertEquals;
public class RaftActorTest extends AbstractActorTest {
public static class MockRaftActor extends RaftActor {
+ boolean applySnapshotCalled = false;
+
public MockRaftActor(String id,
Map<String, String> peerAddresses) {
super(id, peerAddresses);
}
+ public RaftActorContext getRaftActorContext() {
+ return context;
+ }
+
+ public boolean isApplySnapshotCalled() {
+ return applySnapshotCalled;
+ }
+
public static Props props(final String id, final Map<String, String> peerAddresses){
return Props.create(new Creator<MockRaftActor>(){
}
@Override protected void applySnapshot(ByteString snapshot) {
- throw new UnsupportedOperationException("applySnapshot");
+ applySnapshotCalled = true;
}
@Override protected void onStateChanged() {
kit.findLeader(kit.getRaftActor().path().toString());
}
+ @Test
+ public void testActorRecovery() {
+ new JavaTestKit(getSystem()) {{
+ new Within(duration("1 seconds")) {
+ protected void run() {
+
+ String persistenceId = "follower10";
+
+ ActorRef followerActor = getSystem().actorOf(
+ MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId);
+
+
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E"));
+ ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F"));
+ entries.add(entry1);
+ entries.add(entry2);
+
+ int lastApplied = 3;
+ int lastIndex = 5;
+ Snapshot snapshot = Snapshot.create("A B C D".getBytes(), entries, lastIndex, 1 , lastApplied, 1);
+ MockSnapshotStore.setMockSnapshot(snapshot);
+ MockSnapshotStore.setPersistenceId(persistenceId);
+
+ followerActor.tell(PoisonPill.getInstance(), null);
+ try {
+ // give some time for actor to die
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId, Collections.EMPTY_MAP));
+ try {
+ //give some time for snapshot offer to get called.
+ Thread.sleep(200);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ assertEquals(entries.size(), context.getReplicatedLog().size());
+ assertEquals(lastApplied, context.getLastApplied());
+ assertEquals(lastApplied, context.getCommitIndex());
+ assertTrue(ref.underlyingActor().isApplySnapshotCalled());
+ }
+
+ };
+ }};
+
+ }
+
}
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.protobuf.ByteString;
import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import static akka.pattern.Patterns.ask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
public class FollowerTest extends AbstractRaftActorBehaviorTest {
return new Follower(actorContext);
}
- @Override protected RaftActorContext createActorContext() {
- return new MockRaftActorContext("test", getSystem(), followerActor);
+ @Override protected RaftActorContext createActorContext() {
+ return createActorContext(followerActor);
+ }
+
+ protected RaftActorContext createActorContext(ActorRef actorRef){
+ return new MockRaftActorContext("test", getSystem(), actorRef);
}
@Test
createActorContext();
context.setLastApplied(100);
- setLastLogEntry((MockRaftActorContext) context, 1, 100, new MockRaftActorContext.MockPayload(""));
+ setLastLogEntry((MockRaftActorContext) context, 1, 100,
+ new MockRaftActorContext.MockPayload(""));
((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
List<ReplicatedLogEntry> entries =
Arrays.asList(
- (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
- new MockRaftActorContext.MockPayload("foo"))
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
+ new MockRaftActorContext.MockPayload("foo"))
);
// The new commitIndex is 101
}};
}
+
+ /**
+ * This test verifies that when InstallSnapshot is received by
+ * the follower its applied correctly.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testHandleInstallSnapshot() throws Exception {
+ JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(
+ MessageCollectorActor.class));
+
+ MockRaftActorContext context = (MockRaftActorContext)
+ createActorContext(getRef());
+
+ Follower follower = (Follower)createBehavior(context);
+
+ HashMap<String, String> followerSnapshot = new HashMap<>();
+ followerSnapshot.put("1", "A");
+ followerSnapshot.put("2", "B");
+ followerSnapshot.put("3", "C");
+
+ ByteString bsSnapshot = toByteString(followerSnapshot);
+ ByteString chunkData = ByteString.EMPTY;
+ int offset = 0;
+ int snapshotLength = bsSnapshot.size();
+ int i = 1;
+
+ do {
+ chunkData = getNextChunk(bsSnapshot, offset);
+ final InstallSnapshot installSnapshot =
+ new InstallSnapshot(1, "leader-1", i, 1,
+ chunkData, i, 3);
+ follower.handleMessage(leaderActor, installSnapshot);
+ offset = offset + 50;
+ i++;
+ } while ((offset+50) < snapshotLength);
+
+ final InstallSnapshot installSnapshot3 = new InstallSnapshot(1, "leader-1", 3, 1, chunkData, 3, 3);
+ follower.handleMessage(leaderActor, installSnapshot3);
+
+ String[] matches = new ReceiveWhile<String>(String.class, duration("2 seconds")) {
+ @Override
+ protected String match(Object o) throws Exception {
+ if (o instanceof ApplySnapshot) {
+ ApplySnapshot as = (ApplySnapshot)o;
+ if (as.getSnapshot().getLastIndex() != installSnapshot3.getLastIncludedIndex()) {
+ return "applySnapshot-lastIndex-mismatch";
+ }
+ if (as.getSnapshot().getLastAppliedTerm() != installSnapshot3.getLastIncludedTerm()) {
+ return "applySnapshot-lastAppliedTerm-mismatch";
+ }
+ if (as.getSnapshot().getLastAppliedIndex() != installSnapshot3.getLastIncludedIndex()) {
+ return "applySnapshot-lastAppliedIndex-mismatch";
+ }
+ if (as.getSnapshot().getLastTerm() != installSnapshot3.getLastIncludedTerm()) {
+ return "applySnapshot-lastTerm-mismatch";
+ }
+ return "applySnapshot";
+ }
+
+ return "ignoreCase";
+ }
+ }.get();
+
+ String applySnapshotMatch = "";
+ for (String reply: matches) {
+ if (reply.startsWith("applySnapshot")) {
+ applySnapshotMatch = reply;
+ }
+ }
+
+ assertEquals("applySnapshot", applySnapshotMatch);
+
+ Object messages = executeLocalOperation(leaderActor, "get-all-messages");
+
+ assertNotNull(messages);
+ assertTrue(messages instanceof List);
+ List<Object> listMessages = (List<Object>) messages;
+
+ int installSnapshotReplyReceivedCount = 0;
+ for (Object message: listMessages) {
+ if (message instanceof InstallSnapshotReply) {
+ ++installSnapshotReplyReceivedCount;
+ }
+ }
+
+ assertEquals(3, installSnapshotReplyReceivedCount);
+
+ }};
+ }
+
+ public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
+ FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+ Timeout operationTimeout = new Timeout(operationDuration);
+ Future<Object> future = ask(actor, message, operationTimeout);
+
+ try {
+ return Await.result(future, operationDuration);
+ } catch (Exception e) {
+ throw e;
+ }
+ }
+
+ public ByteString getNextChunk (ByteString bs, int offset){
+ int snapshotLength = bs.size();
+ int start = offset;
+ int size = 50;
+ if (50 > snapshotLength) {
+ size = snapshotLength;
+ } else {
+ if ((start + 50) > snapshotLength) {
+ size = snapshotLength - start;
+ }
+ }
+ return bs.substring(start, start + size);
+ }
+
+ private ByteString toByteString(Map<String, String> state) {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(state);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ } catch (IOException e) {
+ org.junit.Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
+ }
+ return null;
+ }
}
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.UntypedActor;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+public class MessageCollectorActor extends UntypedActor {
+ private List<Object> messages = new ArrayList<>();
+
+ @Override public void onReceive(Object message) throws Exception {
+ if(message instanceof String){
+ if("get-all-messages".equals(message)){
+ getSender().tell(messages, getSelf());
+ }
+ } else {
+ messages.add(message);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import scala.concurrent.Future;
+
+
+public class MockSnapshotStore extends SnapshotStore {
+
+ private static Snapshot mockSnapshot;
+ private static String persistenceId;
+
+ public static void setMockSnapshot(Snapshot s) {
+ mockSnapshot = s;
+ }
+
+ public static void setPersistenceId(String pId) {
+ persistenceId = pId;
+ }
+
+ @Override
+ public Future<Option<SelectedSnapshot>> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) {
+ if (mockSnapshot == null) {
+ return Futures.successful(Option.<SelectedSnapshot>none());
+ }
+
+ SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345);
+ SelectedSnapshot selectedSnapshot =
+ new SelectedSnapshot(smd, mockSnapshot);
+ return Futures.successful(Option.some(selectedSnapshot));
+ }
+
+ @Override
+ public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ return null;
+ }
+
+ @Override
+ public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+
+ }
+
+ @Override
+ public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+
+ }
+
+ @Override
+ public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception {
+
+ }
+}
akka {
+ persistence.snapshot-store.plugin = "mock-snapshot-store"
+
loglevel = "DEBUG"
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
}
}
}
+
+mock-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
</dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-osgi_${scala.version}</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor_${scala.version}</artifactId>
+ </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
--- /dev/null
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public abstract class AbstractConfig implements UnifiedConfig {
+
+ private Config config;
+
+ public AbstractConfig(Config config){
+ this.config = config;
+ }
+
+ @Override
+ public Config get() {
+ return config;
+ }
+
+ public static abstract class Builder<T extends Builder>{
+ protected Map<String, Object> configHolder;
+ protected Config fallback;
+
+ private final String actorSystemName;
+
+ public Builder(String actorSystemName){
+ Preconditions.checkArgument(actorSystemName != null, "Actor system name must not be null");
+ this.actorSystemName = actorSystemName;
+ configHolder = new HashMap<>();
+ }
+
+ public T withConfigReader(AkkaConfigurationReader reader){
+ fallback = reader.read().getConfig(actorSystemName);
+ return (T)this;
+ }
+
+ protected Config merge(){
+ if (fallback == null)
+ fallback = ConfigFactory.load().getConfig(actorSystemName);
+
+ return ConfigFactory.parseMap(configHolder).withFallback(fallback);
+ }
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore;
+package org.opendaylight.controller.cluster.common.actor;
import akka.actor.UntypedActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
-import org.opendaylight.controller.cluster.datastore.messages.Monitor;
public abstract class AbstractUntypedActor extends UntypedActor {
protected final LoggingAdapter LOG =
Logging.getLogger(getContext().system(), this);
-
public AbstractUntypedActor() {
- LOG.debug("Actor created {}", getSelf());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor created {}", getSelf());
+ }
getContext().
system().
actorSelection("user/termination-monitor").
tell(new Monitor(getSelf()), getSelf());
+
}
@Override public void onReceive(Object message) throws Exception {
- LOG.debug("Received message {}", message.getClass().getSimpleName());
+ final String messageType = message.getClass().getSimpleName();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received message {}", messageType);
+ }
handleReceive(message);
- LOG.debug("Done handling message {}",
- message.getClass().getSimpleName());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Done handling message {}", messageType);
+ }
}
protected abstract void handleReceive(Object message) throws Exception;
}
protected void unknownMessage(Object message) throws Exception {
- LOG.debug("Received unhandled message {}", message);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received unhandled message {}", message);
+ }
unhandled(message);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+/**
+ * Actor with its behaviour metered. Metering is enabled by configuration.
+ */
+public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
+
+ public AbstractUntypedActorWithMetering() {
+ if (isMetricsCaptureEnabled())
+ getContext().become(new MeteringBehavior(this));
+ }
+
+ private boolean isMetricsCaptureEnabled(){
+ CommonConfig config = new CommonConfig(getContext().system().settings().config());
+ return config.isMetricCaptureEnabled();
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.remote.rpc.utils;
+package org.opendaylight.controller.cluster.common.actor;
import com.typesafe.config.Config;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+public class CommonConfig extends AbstractConfig {
+
+ protected static final String TAG_ACTOR_SYSTEM_NAME = "actor-system-name";
+ protected static final String TAG_METRIC_CAPTURE_ENABLED = "metric-capture-enabled";
+ protected static final String TAG_MAILBOX_CAPACITY = "mailbox-capacity";
+ protected static final String TAG_MAILBOX = "bounded-mailbox";
+ protected static final String TAG_MAILBOX_PUSH_TIMEOUT = "mailbox-push-timeout-time";
+
+ //TODO: Ideally these defaults should go to reference.conf
+ // https://bugs.opendaylight.org/show_bug.cgi?id=1709
+ private static final int DEFAULT_MAILBOX_CAPACITY = 1000;
+ private static final int DEFAULT_MAILBOX_PUSH_TIMEOUT = 100;
+
+ //locally cached values
+ private FiniteDuration cachedMailBoxPushTimeout;
+ private Integer cachedMailBoxCapacity;
+ private Boolean cachedMetricCaptureEnableFlag;
+
+ public CommonConfig(Config config) {
+ super(config);
+ }
+
+ public String getActorSystemName() {
+ return get().getString(TAG_ACTOR_SYSTEM_NAME);
+ }
+
+ public boolean isMetricCaptureEnabled(){
+ if (cachedMetricCaptureEnableFlag != null){
+ return cachedMetricCaptureEnableFlag;
+ }
+
+ cachedMetricCaptureEnableFlag = get().hasPath(TAG_METRIC_CAPTURE_ENABLED)
+ ? get().getBoolean(TAG_METRIC_CAPTURE_ENABLED)
+ : false;
+
+ return cachedMetricCaptureEnableFlag;
+ }
+
+ public String getMailBoxName() {
+ return TAG_MAILBOX;
+ }
+
+ public Integer getMailBoxCapacity() {
+
+ if (cachedMailBoxCapacity != null) {
+ return cachedMailBoxCapacity;
+ }
+
+ final String PATH = new StringBuilder(TAG_MAILBOX).append(".").append(TAG_MAILBOX_CAPACITY).toString();
+ cachedMailBoxCapacity = get().hasPath(PATH)
+ ? get().getInt(PATH)
+ : DEFAULT_MAILBOX_CAPACITY;
+
+ return cachedMailBoxCapacity;
+ }
+
+ public FiniteDuration getMailBoxPushTimeout() {
+
+ if (cachedMailBoxPushTimeout != null) {
+ return cachedMailBoxPushTimeout;
+ }
+
+ final String PATH = new StringBuilder(TAG_MAILBOX).append(".").append(TAG_MAILBOX_PUSH_TIMEOUT).toString();
+
+ long timeout = get().hasPath(PATH)
+ ? get().getDuration(PATH, TimeUnit.NANOSECONDS)
+ : DEFAULT_MAILBOX_PUSH_TIMEOUT;
+
+ cachedMailBoxPushTimeout = new FiniteDuration(timeout, TimeUnit.NANOSECONDS);
+ return cachedMailBoxPushTimeout;
+ }
+
+ public static class Builder<T extends Builder> extends AbstractConfig.Builder<T>{
+
+ public Builder(String actorSystemName) {
+ super(actorSystemName);
+
+ //actor system config
+ configHolder.put(TAG_ACTOR_SYSTEM_NAME, actorSystemName);
+
+ //config for bounded mailbox
+ configHolder.put(TAG_MAILBOX, new HashMap<String, Object>());
+ }
+
+ public T metricCaptureEnabled(boolean enabled) {
+ configHolder.put(TAG_METRIC_CAPTURE_ENABLED, String.valueOf(enabled));
+ return (T)this;
+ }
+
+ public T mailboxCapacity(int capacity) {
+ Preconditions.checkArgument(capacity > 0, "mailbox capacity must be >0");
+
+ Map<String, Object> boundedMailbox = (Map) configHolder.get(TAG_MAILBOX);
+ boundedMailbox.put(TAG_MAILBOX_CAPACITY, capacity);
+ return (T)this;
+ }
+
+ public T mailboxPushTimeout(String timeout){
+ Duration pushTimeout = Duration.create(timeout);
+ Preconditions.checkArgument(pushTimeout.isFinite(), "invalid value for mailbox push timeout");
+
+ Map<String, Object> boundedMailbox = (Map) configHolder.get(TAG_MAILBOX);
+ boundedMailbox.put(TAG_MAILBOX_PUSH_TIMEOUT, timeout);
+ return (T)this;
+ }
+
+ public CommonConfig build() {
+ return new CommonConfig(merge());
+ }
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.remote.rpc.utils;
+package org.opendaylight.controller.cluster.common.actor;
import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.dispatch.BoundedDequeBasedMailbox;
+import akka.dispatch.MailboxType;
+import akka.dispatch.ProducesMessageQueue;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.MetricRegistry;
+import com.typesafe.config.Config;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.FiniteDuration;
+
+public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<MeteredBoundedMailbox.MeteredMessageQueue> {
+
+ private final Logger LOG = LoggerFactory.getLogger(MeteredBoundedMailbox.class);
+
+ private MeteredMessageQueue queue;
+ private Integer capacity;
+ private FiniteDuration pushTimeOut;
+ private MetricRegistry registry;
+
+ private final String QUEUE_SIZE = "q-size";
+
+ public MeteredBoundedMailbox(ActorSystem.Settings settings, Config config) {
+
+ CommonConfig commonConfig = new CommonConfig(settings.config());
+ this.capacity = commonConfig.getMailBoxCapacity();
+ this.pushTimeOut = commonConfig.getMailBoxPushTimeout();
+
+ MetricsReporter reporter = MetricsReporter.getInstance();
+ registry = reporter.getMetricsRegistry();
+ }
+
+
+ @Override
+ public MeteredMessageQueue create(final scala.Option<ActorRef> owner, scala.Option<ActorSystem> system) {
+ this.queue = new MeteredMessageQueue(this.capacity, this.pushTimeOut);
+ monitorQueueSize(owner, this.queue);
+ return this.queue;
+ }
+
+ private void monitorQueueSize(scala.Option<ActorRef> owner, final MeteredMessageQueue monitoredQueue) {
+ if (owner.isEmpty()) {
+ return; //there's no actor to monitor
+ }
+ String actorName = owner.get().path().toStringWithoutAddress();
+ String metricName = registry.name(actorName, QUEUE_SIZE);
+
+ if (registry.getMetrics().containsKey(metricName))
+ return; //already registered
+
+ Gauge queueSize = getQueueSizeGuage(monitoredQueue);
+ registerQueueSizeMetric(metricName, queueSize);
+ }
+
+
+ public static class MeteredMessageQueue extends BoundedDequeBasedMailbox.MessageQueue {
+
+ public MeteredMessageQueue(int capacity, FiniteDuration pushTimeOut) {
+ super(capacity, pushTimeOut);
+ }
+ }
+
+ private Gauge getQueueSizeGuage(final MeteredMessageQueue monitoredQueue ){
+ return new Gauge<Integer>() {
+ @Override
+ public Integer getValue() {
+ return monitoredQueue.size();
+ }
+ };
+ }
+
+ private void registerQueueSizeMetric(String metricName, Gauge metric){
+ try {
+ registry.register(metricName,metric);
+ } catch (IllegalArgumentException e) {
+ LOG.warn("Unable to register queue size in metrics registry. Failed with exception {}. ", e);
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.UntypedActor;
+import akka.japi.Procedure;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+
+/**
+ * Represents behaviour that can be exhibited by actors of type {@link akka.actor.UntypedActor}
+ * <p/>
+ * This behaviour meters actor's default behaviour. It captures 2 metrics:
+ * <ul>
+ * <li>message processing rate of actor's receive block</li>
+ * <li>message processing rate by message type</li>
+ * </ul>
+ *
+ * The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
+ */
+public class MeteringBehavior implements Procedure<Object> {
+
+ private final UntypedActor meteredActor;
+
+ private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
+ private final String MSG_PROCESSING_RATE = "msg-rate";
+
+ private String actorName;
+ private Timer msgProcessingTimer;
+
+ /**
+ *
+ * @param actor whose behaviour needs to be metered
+ */
+ public MeteringBehavior(UntypedActor actor){
+ Preconditions.checkArgument(actor != null, "actor must not be null");
+
+ this.meteredActor = actor;
+ actorName = meteredActor.getSelf().path().toStringWithoutAddress();
+ final String msgProcessingTime = MetricRegistry.name(actorName, MSG_PROCESSING_RATE);
+ msgProcessingTimer = METRICREGISTRY.timer(msgProcessingTime);
+ }
+
+ /**
+ * Uses 2 timers to measure message processing rate. One for overall message processing rate and
+ * another to measure rate by message type. The timers are re-used if they were previously created.
+ * <p/>
+ * {@link com.codahale.metrics.MetricRegistry} maintains a reservoir for different timers where
+ * collected timings are kept. It exposes various metrics for each timer based on collected
+ * data. Eg: count of messages, 99, 95, 50... percentiles, max, mean etc.
+ * <p/>
+ * These metrics are exposed as JMX bean.
+ *
+ * @see <a href="http://dropwizard.github.io/metrics/manual/core/#timers">
+ * http://dropwizard.github.io/metrics/manual/core/#timers</a>
+ *
+ * @param message
+ * @throws Exception
+ */
+ @Override
+ public void apply(Object message) throws Exception {
+ final String messageType = message.getClass().getSimpleName();
+
+ final String msgProcessingTimeByMsgType =
+ MetricRegistry.name(actorName, MSG_PROCESSING_RATE, messageType);
+
+ final Timer msgProcessingTimerByMsgType = METRICREGISTRY.timer(msgProcessingTimeByMsgType);
+
+ //start timers
+ final Timer.Context context = msgProcessingTimer.time();
+ final Timer.Context contextByMsgType = msgProcessingTimerByMsgType.time();
+
+ meteredActor.onReceive(message);
+
+ //stop timers
+ contextByMsgType.stop();
+ context.stop();
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.remote.rpc.messages;
+package org.opendaylight.controller.cluster.common.actor;
import akka.actor.ActorRef;
private final ActorRef actorRef;
public Monitor(ActorRef actorRef){
-
this.actorRef = actorRef;
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.typesafe.config.Config;
+
+/**
+ * Represents a unified view of configuration.
+ * <p/>
+ * It merges configuration from:
+ * <ul>
+ * <li>Config subsystem</li>
+ * <li>Akka configuration files</li>
+ * </ul>
+ *
+ * Configurations defined in config subsystem takes precedence.
+ */
+public interface UnifiedConfig {
+
+ /**
+ * Returns an immutable instance of unified configuration
+ * @return
+ */
+ public Config get();
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.cluster.datastore;
+package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
import com.google.common.base.Preconditions;
import com.google.protobuf.GeneratedMessage;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.UnknownFieldSet;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.common.reporting;
+package org.opendaylight.controller.cluster.reporting;
import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.MetricRegistry;
public class MetricsReporter implements AutoCloseable{
private final MetricRegistry METRICS_REGISTRY = new MetricRegistry();
- private final String DOMAIN = "org.opendaylight.controller";
+ private final String DOMAIN = "org.opendaylight.controller.actor.metric";
public final JmxReporter jmxReporter = JmxReporter.forRegistry(METRICS_REGISTRY).inDomain(DOMAIN).build();
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.common.actor;
-
-import akka.actor.ActorPath;
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.dispatch.BoundedDequeBasedMailbox;
-import akka.dispatch.MailboxType;
-import akka.dispatch.ProducesMessageQueue;
-import com.codahale.metrics.Gauge;
-import com.codahale.metrics.MetricRegistry;
-import com.google.common.base.Preconditions;
-import com.typesafe.config.Config;
-import org.opendaylight.controller.common.reporting.MetricsReporter;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.concurrent.TimeUnit;
-
-public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<MeteredBoundedMailbox.MeteredMessageQueue> {
-
- private MeteredMessageQueue queue;
- private Integer capacity;
- private FiniteDuration pushTimeOut;
- private ActorPath actorPath;
- private MetricsReporter reporter;
-
- private final String QUEUE_SIZE = "queue-size";
- private final String CAPACITY = "mailbox-capacity";
- private final String TIMEOUT = "mailbox-push-timeout-time";
- private final Long DEFAULT_TIMEOUT = 10L;
-
- public MeteredBoundedMailbox(ActorSystem.Settings settings, Config config) {
- Preconditions.checkArgument( config.hasPath("mailbox-capacity"), "Missing configuration [mailbox-capacity]" );
- this.capacity = config.getInt(CAPACITY);
- Preconditions.checkArgument( this.capacity > 0, "mailbox-capacity must be > 0");
-
- Long timeout = -1L;
- if ( config.hasPath(TIMEOUT) ){
- timeout = config.getDuration(TIMEOUT, TimeUnit.NANOSECONDS);
- } else {
- timeout = DEFAULT_TIMEOUT;
- }
- Preconditions.checkArgument( timeout > 0, "mailbox-push-timeout-time must be > 0");
- this.pushTimeOut = new FiniteDuration(timeout, TimeUnit.NANOSECONDS);
-
- reporter = MetricsReporter.getInstance();
- }
-
-
- @Override
- public MeteredMessageQueue create(final scala.Option<ActorRef> owner, scala.Option<ActorSystem> system) {
- this.queue = new MeteredMessageQueue(this.capacity, this.pushTimeOut);
- monitorQueueSize(owner, this.queue);
- return this.queue;
- }
-
- private void monitorQueueSize(scala.Option<ActorRef> owner, final MeteredMessageQueue monitoredQueue) {
- if (owner.isEmpty()) {
- return; //there's no actor to monitor
- }
- actorPath = owner.get().path();
- String actorInstanceId = Integer.toString(owner.get().hashCode());
-
- MetricRegistry registry = reporter.getMetricsRegistry();
- String actorName = registry.name(actorPath.toString(), actorInstanceId, QUEUE_SIZE);
-
- if (registry.getMetrics().containsKey(actorName))
- return; //already registered
-
- registry.register(actorName,
- new Gauge<Integer>() {
- @Override
- public Integer getValue() {
- return monitoredQueue.size();
- }
- });
- }
-
-
- public static class MeteredMessageQueue extends BoundedDequeBasedMailbox.MessageQueue {
-
- public MeteredMessageQueue(int capacity, FiniteDuration pushTimeOut) {
- super(capacity, pushTimeOut);
- }
- }
-
-}
-
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: InstallSnapshot.proto
-package org.opendaylight.controller.cluster.raft.protobuff.messages;
+package org.opendaylight.controller.protobuff.messages.cluster.raft;
public final class InstallSnapshotMessages {
private InstallSnapshotMessages() {}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
}
public static com.google.protobuf.Parser<InstallSnapshot> PARSER =
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
- com.google.protobuf.ByteString bs =
+ com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
getLeaderIdBytes() {
java.lang.Object ref = leaderId_;
if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
leaderId_ = b;
return super.writeReplace();
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot prototype) {
+ public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshotOrBuilder {
+ implements org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshotOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
}
- // Construct using org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.newBuilder()
+ // Construct using org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
- return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
+ return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot build() {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot build() {
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot buildPartial() {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot(this);
+ public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot buildPartial() {
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) {
- return mergeFrom((org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot)other);
+ if (other instanceof org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) {
+ return mergeFrom((org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot other) {
- if (other == org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot other) {
+ if (other == org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
if (other.hasTerm()) {
setTerm(other.getTerm());
}
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
+ org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
+ parsedMessage = (org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
getLeaderIdBytes() {
java.lang.Object ref = leaderId_;
if (ref instanceof String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
leaderId_ = b;
"\021lastIncludedIndex\030\003 \001(\003\022\030\n\020lastIncluded" +
"Term\030\004 \001(\003\022\014\n\004data\030\005 \001(\014\022\022\n\nchunkIndex\030\006" +
" \001(\005\022\023\n\013totalChunks\030\007 \001(\005BX\n;org.openday" +
- "light.controller.cluster.raft.protobuff." +
- "messagesB\027InstallSnapshotMessagesH\001"
+ "light.controller.protobuff.messages.clus" +
+ "ter.raftB\027InstallSnapshotMessagesH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
}
public interface CloseTransactionChainOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string transactionChainId = 1;
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ boolean hasTransactionChainId();
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ java.lang.String getTransactionChainId();
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getTransactionChainIdBytes();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CloseTransactionChain}
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
}
break;
}
+ case 10: {
+ bitField0_ |= 0x00000001;
+ transactionChainId_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return PARSER;
}
+ private int bitField0_;
+ // optional string transactionChainId = 1;
+ public static final int TRANSACTIONCHAINID_FIELD_NUMBER = 1;
+ private java.lang.Object transactionChainId_;
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public boolean hasTransactionChainId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public java.lang.String getTransactionChainId() {
+ java.lang.Object ref = transactionChainId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ transactionChainId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionChainIdBytes() {
+ java.lang.Object ref = transactionChainId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionChainId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private void initFields() {
+ transactionChainId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTransactionChainIdBytes());
+ }
getUnknownFields().writeTo(output);
}
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTransactionChainIdBytes());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
public Builder clear() {
super.clear();
+ transactionChainId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain buildPartial() {
org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.transactionChainId_ = transactionChainId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain other) {
if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain.getDefaultInstance()) return this;
+ if (other.hasTransactionChainId()) {
+ bitField0_ |= 0x00000001;
+ transactionChainId_ = other.transactionChainId_;
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
}
return this;
}
+ private int bitField0_;
+
+ // optional string transactionChainId = 1;
+ private java.lang.Object transactionChainId_ = "";
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public boolean hasTransactionChainId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public java.lang.String getTransactionChainId() {
+ java.lang.Object ref = transactionChainId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ transactionChainId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionChainIdBytes() {
+ java.lang.Object ref = transactionChainId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionChainId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public Builder setTransactionChainId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionChainId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public Builder clearTransactionChainId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ transactionChainId_ = getDefaultInstance().getTransactionChainId();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string transactionChainId = 1;</code>
+ */
+ public Builder setTransactionChainIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionChainId_ = value;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CloseTransactionChain)
}
static {
java.lang.String[] descriptorData = {
"\n\033ShardTransactionChain.proto\022!org.opend" +
- "aylight.controller.mdsal\"\027\n\025CloseTransac" +
- "tionChain\"\034\n\032CloseTransactionChainReply\"" +
- "\030\n\026CreateTransactionChain\";\n\033CreateTrans" +
- "actionChainReply\022\034\n\024transactionChainPath" +
- "\030\001 \002(\tB[\n:org.opendaylight.controller.pr" +
- "otobuff.messages.transactionB\035ShardTrans" +
- "actionChainMessages"
+ "aylight.controller.mdsal\"3\n\025CloseTransac" +
+ "tionChain\022\032\n\022transactionChainId\030\001 \001(\t\"\034\n" +
+ "\032CloseTransactionChainReply\"\030\n\026CreateTra" +
+ "nsactionChain\";\n\033CreateTransactionChainR" +
+ "eply\022\034\n\024transactionChainPath\030\001 \002(\tB[\n:or" +
+ "g.opendaylight.controller.protobuff.mess" +
+ "ages.transactionB\035ShardTransactionChainM" +
+ "essages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChain_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChain_descriptor,
- new java.lang.String[] { });
+ new java.lang.String[] { "TransactionChainId", });
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChainReply_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChainReply_fieldAccessorTable = new
* <code>required int32 transactionType = 2;</code>
*/
int getTransactionType();
+
+ // optional string transactionChainId = 3;
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ boolean hasTransactionChainId();
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ java.lang.String getTransactionChainId();
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ com.google.protobuf.ByteString
+ getTransactionChainIdBytes();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransaction}
transactionType_ = input.readInt32();
break;
}
+ case 26: {
+ bitField0_ |= 0x00000004;
+ transactionChainId_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return transactionType_;
}
+ // optional string transactionChainId = 3;
+ public static final int TRANSACTIONCHAINID_FIELD_NUMBER = 3;
+ private java.lang.Object transactionChainId_;
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public boolean hasTransactionChainId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public java.lang.String getTransactionChainId() {
+ java.lang.Object ref = transactionChainId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ transactionChainId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionChainIdBytes() {
+ java.lang.Object ref = transactionChainId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionChainId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private void initFields() {
transactionId_ = "";
transactionType_ = 0;
+ transactionChainId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt32(2, transactionType_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getTransactionChainIdBytes());
+ }
getUnknownFields().writeTo(output);
}
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, transactionType_);
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getTransactionChainIdBytes());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
bitField0_ = (bitField0_ & ~0x00000001);
transactionType_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
+ transactionChainId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
to_bitField0_ |= 0x00000002;
}
result.transactionType_ = transactionType_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.transactionChainId_ = transactionChainId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
if (other.hasTransactionType()) {
setTransactionType(other.getTransactionType());
}
+ if (other.hasTransactionChainId()) {
+ bitField0_ |= 0x00000004;
+ transactionChainId_ = other.transactionChainId_;
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
return this;
}
+ // optional string transactionChainId = 3;
+ private java.lang.Object transactionChainId_ = "";
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public boolean hasTransactionChainId() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public java.lang.String getTransactionChainId() {
+ java.lang.Object ref = transactionChainId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ transactionChainId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionChainIdBytes() {
+ java.lang.Object ref = transactionChainId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionChainId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public Builder setTransactionChainId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ transactionChainId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public Builder clearTransactionChainId() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ transactionChainId_ = getDefaultInstance().getTransactionChainId();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string transactionChainId = 3;</code>
+ */
+ public Builder setTransactionChainIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ transactionChainId_ = value;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CreateTransaction)
}
java.lang.String[] descriptorData = {
"\n\026ShardTransaction.proto\022!org.opendaylig" +
"ht.controller.mdsal\032\014Common.proto\"\022\n\020Clo" +
- "seTransaction\"\027\n\025CloseTransactionReply\"C" +
+ "seTransaction\"\027\n\025CloseTransactionReply\"_" +
"\n\021CreateTransaction\022\025\n\rtransactionId\030\001 \002" +
- "(\t\022\027\n\017transactionType\030\002 \002(\005\"M\n\026CreateTra" +
- "nsactionReply\022\034\n\024transactionActorPath\030\001 " +
- "\002(\t\022\025\n\rtransactionId\030\002 \002(\t\"\022\n\020ReadyTrans" +
- "action\"*\n\025ReadyTransactionReply\022\021\n\tactor" +
- "Path\030\001 \002(\t\"l\n\nDeleteData\022^\n\037instanceIden" +
- "tifierPathArguments\030\001 \002(\01325.org.opendayl",
- "ight.controller.mdsal.InstanceIdentifier" +
- "\"\021\n\017DeleteDataReply\"j\n\010ReadData\022^\n\037insta" +
- "nceIdentifierPathArguments\030\001 \002(\01325.org.o" +
- "pendaylight.controller.mdsal.InstanceIde" +
- "ntifier\"P\n\rReadDataReply\022?\n\016normalizedNo" +
- "de\030\001 \001(\0132\'.org.opendaylight.controller.m" +
- "dsal.Node\"\254\001\n\tWriteData\022^\n\037instanceIdent" +
- "ifierPathArguments\030\001 \002(\01325.org.opendayli" +
- "ght.controller.mdsal.InstanceIdentifier\022" +
- "?\n\016normalizedNode\030\002 \002(\0132\'.org.opendaylig",
- "ht.controller.mdsal.Node\"\020\n\016WriteDataRep" +
- "ly\"\254\001\n\tMergeData\022^\n\037instanceIdentifierPa" +
- "thArguments\030\001 \002(\01325.org.opendaylight.con" +
- "troller.mdsal.InstanceIdentifier\022?\n\016norm" +
- "alizedNode\030\002 \002(\0132\'.org.opendaylight.cont" +
- "roller.mdsal.Node\"\020\n\016MergeDataReply\"l\n\nD" +
- "ataExists\022^\n\037instanceIdentifierPathArgum" +
- "ents\030\001 \002(\01325.org.opendaylight.controller" +
- ".mdsal.InstanceIdentifier\"!\n\017DataExistsR" +
- "eply\022\016\n\006exists\030\001 \002(\010BV\n:org.opendaylight",
- ".controller.protobuff.messages.transacti" +
- "onB\030ShardTransactionMessages"
+ "(\t\022\027\n\017transactionType\030\002 \002(\005\022\032\n\022transacti" +
+ "onChainId\030\003 \001(\t\"M\n\026CreateTransactionRepl" +
+ "y\022\034\n\024transactionActorPath\030\001 \002(\t\022\025\n\rtrans" +
+ "actionId\030\002 \002(\t\"\022\n\020ReadyTransaction\"*\n\025Re" +
+ "adyTransactionReply\022\021\n\tactorPath\030\001 \002(\t\"l" +
+ "\n\nDeleteData\022^\n\037instanceIdentifierPathAr",
+ "guments\030\001 \002(\01325.org.opendaylight.control" +
+ "ler.mdsal.InstanceIdentifier\"\021\n\017DeleteDa" +
+ "taReply\"j\n\010ReadData\022^\n\037instanceIdentifie" +
+ "rPathArguments\030\001 \002(\01325.org.opendaylight." +
+ "controller.mdsal.InstanceIdentifier\"P\n\rR" +
+ "eadDataReply\022?\n\016normalizedNode\030\001 \001(\0132\'.o" +
+ "rg.opendaylight.controller.mdsal.Node\"\254\001" +
+ "\n\tWriteData\022^\n\037instanceIdentifierPathArg" +
+ "uments\030\001 \002(\01325.org.opendaylight.controll" +
+ "er.mdsal.InstanceIdentifier\022?\n\016normalize",
+ "dNode\030\002 \002(\0132\'.org.opendaylight.controlle" +
+ "r.mdsal.Node\"\020\n\016WriteDataReply\"\254\001\n\tMerge" +
+ "Data\022^\n\037instanceIdentifierPathArguments\030" +
+ "\001 \002(\01325.org.opendaylight.controller.mdsa" +
+ "l.InstanceIdentifier\022?\n\016normalizedNode\030\002" +
+ " \002(\0132\'.org.opendaylight.controller.mdsal" +
+ ".Node\"\020\n\016MergeDataReply\"l\n\nDataExists\022^\n" +
+ "\037instanceIdentifierPathArguments\030\001 \002(\01325" +
+ ".org.opendaylight.controller.mdsal.Insta" +
+ "nceIdentifier\"!\n\017DataExistsReply\022\016\n\006exis",
+ "ts\030\001 \002(\010BV\n:org.opendaylight.controller." +
+ "protobuff.messages.transactionB\030ShardTra" +
+ "nsactionMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_CreateTransaction_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CreateTransaction_descriptor,
- new java.lang.String[] { "TransactionId", "TransactionType", });
+ new java.lang.String[] { "TransactionId", "TransactionType", "TransactionChainId", });
internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_fieldAccessorTable = new
for (Entry<URI, String> e: prefixes.getPrefixes()) {
writer.writeNamespace(e.getValue(), e.getKey().toString());
}
- LOG.debug("Instance identifier with Random prefix is now {}", str);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Instance identifier with Random prefix is now {}", str);
+ }
writer.writeCharacters(str);
}
DataSchemaNode childSchema = null;
if (schema instanceof DataNodeContainer) {
childSchema = SchemaUtils.findFirstSchema(child.getNodeType(), ((DataNodeContainer) schema).getChildNodes()).orNull();
- if (childSchema == null) {
+ if (childSchema == null && LOG.isDebugEnabled()) {
LOG.debug("Probably the data node \"{}\" does not conform to schema", child == null ? "" : child.getNodeType().getLocalName());
}
}
*/
public void writeValue(final @Nonnull XMLStreamWriter writer, final @Nonnull TypeDefinition<?> type, final Object value) throws XMLStreamException {
if (value == null) {
- LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+ if(LOG.isDebugEnabled()){
+ LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+ }
return;
}
writer.writeNamespace(prefix, qname.getNamespace().toString());
writer.writeCharacters(prefix + ':' + qname.getLocalName());
} else {
- LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+ }
writer.writeCharacters(String.valueOf(value));
}
}
private static void write(final @Nonnull XMLStreamWriter writer, final @Nonnull InstanceIdentifierTypeDefinition type, final @Nonnull Object value) throws XMLStreamException {
if (value instanceof YangInstanceIdentifier) {
- LOG.debug("Writing InstanceIdentifier object {}", value);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Writing InstanceIdentifier object {}", value);
+ }
write(writer, (YangInstanceIdentifier)value);
} else {
- LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
- writer.writeCharacters(String.valueOf(value));
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+ }
+ writer.writeCharacters(String.valueOf(value));
}
}
}
* @return xml String
*/
public static String inputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
- LOG.debug("Converting input composite node to xml {}", cNode);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converting input composite node to xml {}", cNode);
+ }
if (cNode == null) {
return BLANK;
}
Set<RpcDefinition> rpcs = schemaContext.getOperations();
for(RpcDefinition rpc : rpcs) {
if(rpc.getQName().equals(cNode.getNodeType())){
- LOG.debug("Found the rpc definition from schema context matching with input composite node {}", rpc.getQName());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Found the rpc definition from schema context matching with input composite node {}", rpc.getQName());
+ }
CompositeNode inputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "input"));
domTree = XmlDocumentUtils.toDocument(inputContainer, rpc.getInput(), XmlDocumentUtils.defaultValueCodecProvider());
-
- LOG.debug("input composite node to document conversion complete, document is {}", domTree);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("input composite node to document conversion complete, document is {}", domTree);
+ }
break;
}
}
* @return xml string
*/
public static String outputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
- LOG.debug("Converting output composite node to xml {}", cNode);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converting output composite node to xml {}", cNode);
+ }
if (cNode == null) {
return BLANK;
}
Set<RpcDefinition> rpcs = schemaContext.getOperations();
for(RpcDefinition rpc : rpcs) {
if(rpc.getQName().equals(cNode.getNodeType())){
- LOG.debug("Found the rpc definition from schema context matching with output composite node {}", rpc.getQName());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Found the rpc definition from schema context matching with output composite node {}", rpc.getQName());
+ }
CompositeNode outputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "output"));
domTree = XmlDocumentUtils.toDocument(outputContainer, rpc.getOutput(), XmlDocumentUtils.defaultValueCodecProvider());
-
- LOG.debug("output composite node to document conversion complete, document is {}", domTree);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("output composite node to document conversion complete, document is {}", domTree);
+ }
break;
}
}
LOG.error("Error during translation of Document to OutputStream", e);
}
- LOG.debug("Document to string conversion complete, xml string is {} ", writer.toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Document to string conversion complete, xml string is {} ", writer.toString());
+ }
return writer.toString();
}
* @return CompositeNode object based on the input, if any of the input parameter is null, a null object is returned
*/
public static CompositeNode inputXmlToCompositeNode(QName rpc, String xml, SchemaContext schemaContext){
- LOG.debug("Converting input xml to composite node {}", xml);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converting input xml to composite node {}", xml);
+ }
if (xml==null || xml.length()==0) {
return null;
}
Set<RpcDefinition> rpcs = schemaContext.getOperations();
for(RpcDefinition rpcDef : rpcs) {
if(rpcDef.getQName().equals(rpc)){
- LOG.debug("found the rpc definition from schema context matching rpc {}", rpc);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("found the rpc definition from schema context matching rpc {}", rpc);
+ }
if(rpcDef.getInput() == null) {
LOG.warn("found rpc definition's input is null");
return null;
List<Node<?>> dataNodes = XmlDocumentUtils.toDomNodes(xmlData,
Optional.of(rpcDef.getInput().getChildNodes()), schemaContext);
-
- LOG.debug("Converted xml input to list of nodes {}", dataNodes);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Converted xml input to list of nodes {}", dataNodes);
+ }
final CompositeNodeBuilder<ImmutableCompositeNode> it = ImmutableCompositeNode.builder();
it.setQName(rpc);
it.add(ImmutableCompositeNode.create(input, dataNodes));
} catch (IOException e) {
LOG.error("Error during building data tree from XML", e);
}
-
- LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+ }
return compositeNode;
}
package org.opendaylight.controller.cluster.raft;
-option java_package = "org.opendaylight.controller.cluster.raft.protobuff.messages";
+option java_package = "org.opendaylight.controller.protobuff.messages.cluster.raft";
option java_outer_classname = "InstallSnapshotMessages";
option optimize_for = SPEED;
message CreateTransaction{
required string transactionId = 1;
required int32 transactionType =2;
+ optional string transactionChainId = 3;
}
message CreateTransactionReply{
option java_outer_classname = "ShardTransactionChainMessages";
message CloseTransactionChain {
-
+ optional string transactionChainId = 1;
}
message CloseTransactionChainReply{
-
-}
-
-message CreateTransactionChain {
-
-}
-
-message CreateTransactionChainReply{
-required string transactionChainPath = 1;
-
}
--- /dev/null
+package org.opendaylight.controller.cluster.common.actor;
+
+import org.junit.Test;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class CommonConfigTest {
+
+ @Test
+ public void testCommonConfigDefaults(){
+ CommonConfig config = new CommonConfig.Builder<>("testsystem").build();
+
+ assertNotNull(config.getActorSystemName());
+ assertNotNull(config.getMailBoxCapacity());
+ assertNotNull(config.getMailBoxName());
+ assertNotNull(config.getMailBoxPushTimeout());
+ assertNotNull(config.isMetricCaptureEnabled());
+ }
+
+ @Test
+ public void testCommonConfigOverride(){
+
+ int expectedCapacity = 123;
+ String timeoutValue = "1000ms";
+ CommonConfig config = new CommonConfig.Builder<>("testsystem")
+ .mailboxCapacity(expectedCapacity)
+ .mailboxPushTimeout(timeoutValue)
+ .metricCaptureEnabled(true)
+ .build();
+
+ assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+
+ FiniteDuration expectedTimeout = FiniteDuration.create(1000, TimeUnit.MILLISECONDS);
+ assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+
+ assertTrue(config.isMetricCaptureEnabled());
+ }
+}
\ No newline at end of file
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.common.actor;
+package org.opendaylight.controller.cluster.common.actor;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
public class MeteredBoundedMailboxTest {
private static ActorSystem actorSystem;
+ private static CommonConfig config;
private final ReentrantLock lock = new ReentrantLock();
@Before
public void setUp() throws Exception {
- actorSystem = ActorSystem.create("testsystem");
+ config = new CommonConfig.Builder<>("testsystem").build();
+ actorSystem = ActorSystem.create("testsystem", config.get());
}
@After
}
@Test
- public void test_WhenQueueIsFull_ShouldSendMsgToDeadLetter() throws InterruptedException {
+ public void shouldSendMsgToDeadLetterWhenQueueIsFull() throws InterruptedException {
final JavaTestKit mockReceiver = new JavaTestKit(actorSystem);
actorSystem.eventStream().subscribe(mockReceiver.getRef(), DeadLetter.class);
final FiniteDuration TWENTY_SEC = new FiniteDuration(20, TimeUnit.SECONDS);
- String boundedMailBox = actorSystem.name() + ".bounded-mailbox";
- ActorRef pingPongActor = actorSystem.actorOf(PingPongActor.props(lock).withMailbox(boundedMailBox),
+ ActorRef pingPongActor = actorSystem.actorOf(PingPongActor.props(lock).withMailbox(config.getMailBoxName()),
"pingpongactor");
actorSystem.mailboxes().settings();
testsystem {
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 10
mailbox-push-timeout-time = 100ms
}
-testsystem {
-
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 10ms
}
-}
\ No newline at end of file
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
<name>dom-broker</name>
</dom-broker>
+ <enable-metric-capture xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">true</enable-metric-capture>
+ <actor-system-name xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">odl-cluster-rpc</actor-system-name>
+ <bounded-mailbox-capacity xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">1000</bounded-mailbox-capacity>
</module>
</modules>
odl-cluster-data {
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 100ms
- }
+ }
+
+ metric-capture-enabled = true
+
akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+
actor {
+
provider = "akka.cluster.ClusterActorRefProvider"
serializers {
java = "akka.serialization.JavaSerializer"
odl-cluster-rpc {
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 100ms
}
+
+ metric-capture-enabled = true
+
akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+
actor {
provider = "akka.cluster.ClusterActorRefProvider"
}
cluster {
- seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"]
+ seed-nodes = ["akka.tcp://odl-cluster-rpc@127.0.0.1:2551"]
auto-down-unreachable-after = 10s
}
package org.opendaylight.controller.md.sal.common.api.data;
+import com.google.common.base.Supplier;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-
-import com.google.common.base.Function;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
/**
* A type of TransactionCommitFailedException that indicates a situation that would result in a
* @author Thomas Pantelis
*/
public class TransactionCommitDeadlockException extends TransactionCommitFailedException {
-
private static final long serialVersionUID = 1L;
-
private static final String DEADLOCK_MESSAGE =
"An attempt to block on a ListenableFuture via a get method from a write " +
"transaction submit was detected that would result in deadlock. The commit " +
"result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock.";
+ private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE);
- public static Function<Void, Exception> DEADLOCK_EXECUTOR_FUNCTION = new Function<Void, Exception>() {
+ public static final Supplier<Exception> DEADLOCK_EXCEPTION_SUPPLIER = new Supplier<Exception>() {
@Override
- public Exception apply(Void notUsed) {
- return new TransactionCommitDeadlockException( DEADLOCK_MESSAGE,
- RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE));
+ public Exception get() {
+ return new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR);
}
};
- public TransactionCommitDeadlockException(String message, final RpcError... errors) {
+ public TransactionCommitDeadlockException(final String message, final RpcError... errors) {
super(message, errors);
}
}
*/
package org.opendaylight.controller.md.sal.common.impl.service;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
-
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.AbstractDataModification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-
public abstract class AbstractDataTransaction<P extends Path<P>, D extends Object> extends
AbstractDataModification<P, D> {
- private final static Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+ private static final ListenableFuture<RpcResult<TransactionStatus>> SUCCESS_FUTURE =
+ Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
private final Object identifier;
private final long allocationTime;
@Override
public Future<RpcResult<TransactionStatus>> commit() {
readyTime = System.nanoTime();
- LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+ }
changeStatus(TransactionStatus.SUBMITED);
-
return this.broker.commit(this);
}
}
@Override
- public boolean equals(Object obj) {
+ public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
public void succeeded() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.COMMITED);
}
public void failed() {
this.completeTime = System.nanoTime();
- LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+ }
changeStatus(TransactionStatus.FAILED);
}
this.onStatusChange(status);
}
- public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(
- CheckedFuture<Void,TransactionCommitFailedException> from ) {
+ public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(final CheckedFuture<Void,TransactionCommitFailedException> from) {
return Futures.transform(from, new AsyncFunction<Void, RpcResult<TransactionStatus>>() {
@Override
- public ListenableFuture<RpcResult<TransactionStatus>> apply(Void input) throws Exception {
- return Futures.immediateFuture(RpcResultBuilder.<TransactionStatus>
- success(TransactionStatus.COMMITED).build());
+ public ListenableFuture<RpcResult<TransactionStatus>> apply(final Void input) {
+ return SUCCESS_FUTURE;
}
- } );
+ });
}
}
import javax.annotation.Nullable;
import org.opendaylight.yangtools.util.concurrent.CountingRejectedExecutionHandler;
import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* MXBean implementation of the ThreadExecutorStatsMXBean interface that retrieves statistics
*/
public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
implements ThreadExecutorStatsMXBean {
-
+ private static final Logger LOG = LoggerFactory.getLogger(ThreadExecutorStatsMXBeanImpl.class);
private final ThreadPoolExecutor executor;
/**
* @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
* @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
*/
- public ThreadExecutorStatsMXBeanImpl(Executor executor, String mBeanName,
- String mBeanType, @Nullable String mBeanCategory) {
+ public ThreadExecutorStatsMXBeanImpl(final ThreadPoolExecutor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
super(mBeanName, mBeanType, mBeanCategory);
+ this.executor = Preconditions.checkNotNull(executor);
+ }
+
+ private static ThreadExecutorStatsMXBeanImpl createInternal(final Executor executor,
+ final String mBeanName, final String mBeanType, final String mBeanCategory) {
+ if (executor instanceof ThreadPoolExecutor) {
+ final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl(
+ (ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+ return ret;
+ }
+
+ LOG.info("Executor {} is not supported", executor);
+ return null;
+ }
+
+ /**
+ * Creates a new bean if the backing executor is a ThreadPoolExecutor and registers it.
+ *
+ * @param executor the backing {@link Executor}
+ * @param mBeanName Used as the <code>name</code> property in the bean's ObjectName.
+ * @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
+ * @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
+ * @return a registered ThreadExecutorStatsMXBeanImpl instance if the backing executor
+ * is a ThreadPoolExecutor, otherwise null.
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
+ final String mBeanType, @Nullable final String mBeanCategory) {
+ ThreadExecutorStatsMXBeanImpl ret = createInternal(executor, mBeanName, mBeanType, mBeanCategory);
+ if(ret != null) {
+ ret.registerMBean();
+ }
- Preconditions.checkArgument(executor instanceof ThreadPoolExecutor,
- "The ExecutorService of type {} is not an instanceof ThreadPoolExecutor",
- executor.getClass());
- this.executor = (ThreadPoolExecutor)executor;
+ return ret;
+ }
+
+ /**
+ * Creates a new bean if the backing executor is a ThreadPoolExecutor.
+ *
+ * @param executor the backing {@link Executor}
+ * @return a ThreadExecutorStatsMXBeanImpl instance if the backing executor
+ * is a ThreadPoolExecutor, otherwise null.
+ */
+ public static ThreadExecutorStatsMXBeanImpl create(final Executor executor) {
+ return createInternal(executor, "", "", null);
}
@Override
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.osgi.BundleDelegatingClassLoader;
-import com.google.common.base.Preconditions;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
-import org.osgi.framework.BundleContext;
-
-import java.io.File;
-
-public class ActorSystemFactory {
-
- public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
- public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
- public static final String CONFIGURATION_NAME = "odl-cluster-data";
-
- private static volatile ActorSystem actorSystem = null;
-
- public static final ActorSystem getInstance(){
- return actorSystem;
- }
-
- /**
- * This method should be called only once during initialization
- *
- * @param bundleContext
- */
- public static final ActorSystem createInstance(final BundleContext bundleContext) {
- if(actorSystem == null) {
- // Create an OSGi bundle classloader for actor system
- BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
- Thread.currentThread().getContextClassLoader());
- synchronized (ActorSystemFactory.class) {
- // Double check
-
- if (actorSystem == null) {
- ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
- ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
- system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
- actorSystem = system;
- }
- }
- }
-
- return actorSystem;
- }
-
-
- private static final Config readAkkaConfiguration(){
- File defaultConfigFile = new File(AKKA_CONF_PATH);
- Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
- return ConfigFactory.parseFile(defaultConfigFile);
- }
-}
import java.util.List;
import java.util.Map;
+import java.util.Set;
public interface Configuration {
* @return
*/
List<String> getMembersFromShardName(String shardName);
+
+ /**
+ *
+ * @return
+ */
+ Set<String> getAllShardNames();
}
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
public class ConfigurationImpl implements Configuration {
return Collections.EMPTY_LIST;
}
+ @Override public Set<String> getAllShardNames() {
+ Set<String> shardNames = new LinkedHashSet<>();
+ for(ModuleShard ms : moduleShards){
+ for(Shard s : ms.getShards()) {
+ shardNames.add(s.getName());
+ }
+ }
+ return shardNames;
+ }
+
private void readModules(Config modulesConfig) {
import akka.japi.Creator;
import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.japi.Creator;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistrationReply;
* </p>
*/
public class DataChangeListenerRegistrationProxy implements ListenerRegistration {
- private final ActorSelection listenerRegistrationActor;
+ private volatile ActorSelection listenerRegistrationActor;
private final AsyncDataChangeListener listener;
private final ActorRef dataChangeListenerActor;
+ private boolean closed = false;
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
DataChangeListenerRegistrationProxy(
this.dataChangeListenerActor = dataChangeListenerActor;
}
+ public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
+ DataChangeListenerRegistrationProxy(
+ L listener, ActorRef dataChangeListenerActor) {
+ this(null, listener, dataChangeListenerActor);
+ }
+
@Override
public Object getInstance() {
return listener;
}
+ public void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) {
+ boolean sendCloseMessage = false;
+ synchronized(this) {
+ if(closed) {
+ sendCloseMessage = true;
+ } else {
+ this.listenerRegistrationActor = listenerRegistrationActor;
+ }
+ }
+ if(sendCloseMessage) {
+ listenerRegistrationActor.tell(new
+ CloseDataChangeListenerRegistration().toSerializable(), null);
+ }
+
+ this.listenerRegistrationActor = listenerRegistrationActor;
+ }
+
+ public ActorSelection getListenerRegistrationActor() {
+ return listenerRegistrationActor;
+ }
+
@Override
public void close() {
- listenerRegistrationActor.tell(new CloseDataChangeListenerRegistration().toSerializable(), null);
+
+ boolean sendCloseMessage;
+ synchronized(this) {
+ sendCloseMessage = !closed && listenerRegistrationActor != null;
+ closed = true;
+ }
+ if(sendCloseMessage) {
+ listenerRegistrationActor.tell(new
+ CloseDataChangeListenerRegistration().toSerializable(), null);
+ }
+
dataChangeListenerActor.tell(PoisonPill.getInstance(), null);
}
}
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
-
+import akka.dispatch.OnComplete;
+import akka.util.Timeout;
import com.google.common.base.Preconditions;
-
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
/**
*
public class DistributedDataStore implements DOMStore, SchemaContextListener, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
+ public static final int REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR = 24; // 24 times the usual operation timeout
private final ActorContext actorContext;
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
ListenerRegistration<L> registerChangeListener(
- YangInstanceIdentifier path, L listener,
+ final YangInstanceIdentifier path, L listener,
AsyncDataBroker.DataChangeScope scope) {
Preconditions.checkNotNull(path, "path should not be null");
Preconditions.checkNotNull(listener, "listener should not be null");
-
- LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+ }
ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
DataChangeListener.props(listener ));
String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
- Object result = actorContext.executeLocalShardOperation(shardName,
- new RegisterChangeListener(path, dataChangeListenerActor.path(), scope));
-
- if (result != null) {
- RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
- return new DataChangeListenerRegistrationProxy(actorContext
- .actorSelection(reply.getListenerRegistrationPath()), listener,
- dataChangeListenerActor);
+ Future future = actorContext.executeLocalShardOperationAsync(shardName,
+ new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
+ new Timeout(actorContext.getOperationDuration().$times(
+ REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR)));
+
+ if (future != null) {
+ final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
+ new DataChangeListenerRegistrationProxy(listener, dataChangeListenerActor);
+
+ future.onComplete(new OnComplete(){
+
+ @Override public void onComplete(Throwable failure, Object result)
+ throws Throwable {
+ if(failure != null){
+ LOG.error("Failed to register listener at path " + path.toString(), failure);
+ return;
+ }
+ RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
+ listenerRegistrationProxy.setListenerRegistrationActor(actorContext
+ .actorSelection(reply.getListenerRegistrationPath()));
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ return listenerRegistrationProxy;
+ }
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "No local shard for shardName {} was found so returning a noop registration",
+ shardName);
}
-
- LOG.debug(
- "No local shard for shardName {} was found so returning a noop registration",
- shardName);
-
return new NoOpDataChangeListenerRegistration(listener);
}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorSystem;
-
+import akka.actor.Props;
+import akka.osgi.BundleDelegatingClassLoader;
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.osgi.framework.BundleContext;
+import java.io.File;
+import java.util.concurrent.atomic.AtomicReference;
+
public class DistributedDataStoreFactory {
+
+ public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+ public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
+ public static final String CONFIGURATION_NAME = "odl-cluster-data";
+ private static AtomicReference<ActorSystem> actorSystem = new AtomicReference<>();
+
public static DistributedDataStore createInstance(String name, SchemaService schemaService,
- DatastoreContext datastoreContext, BundleContext bundleContext) {
+ DatastoreContext datastoreContext, BundleContext bundleContext) {
- ActorSystem actorSystem = ActorSystemFactory.createInstance(bundleContext);
+ ActorSystem actorSystem = getOrCreateInstance(bundleContext);
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
final DistributedDataStore dataStore =
- new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
- config, datastoreContext );
+ new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
+ config, datastoreContext);
+
ShardStrategyFactory.setConfiguration(config);
schemaService.registerSchemaContextListener(dataStore);
return dataStore;
}
+
+ synchronized private static final ActorSystem getOrCreateInstance(final BundleContext bundleContext) {
+
+ if (actorSystem.get() != null){
+ return actorSystem.get();
+ }
+ // Create an OSGi bundle classloader for actor system
+ BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
+ Thread.currentThread().getContextClassLoader());
+
+ ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
+ ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
+ system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+
+ actorSystem.set(system);
+ return system;
+ }
+
+
+ private static final Config readAkkaConfiguration() {
+ File defaultConfigFile = new File(AKKA_CONF_PATH);
+ Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
+ return ConfigFactory.parseFile(defaultConfigFile);
+ }
}
import com.google.common.util.concurrent.ListenableFuture;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
+import org.opendaylight.controller.cluster.common.actor.CommonConfig;
+import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
private final DatastoreContext datastoreContext;
-
private SchemaContext schemaContext;
private ActorRef createSnapshotTransaction;
+ private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
+
private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
DatastoreContext datastoreContext, SchemaContext schemaContext) {
super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
shardMBean.setDataStoreExecutor(store.getDomStoreExecutor());
shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
+ if (isMetricsCaptureEnabled()) {
+ getContext().become(new MeteringBehavior(this));
+ }
}
private static Map<String, String> mapPeerAddresses(
}
@Override public void onReceiveRecover(Object message) {
- LOG.debug("onReceiveRecover: Received message {} from {}", message.getClass().toString(),
- getSender());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("onReceiveRecover: Received message {} from {}",
+ message.getClass().toString(),
+ getSender());
+ }
if (message instanceof RecoveryFailure){
LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
}
@Override public void onReceiveCommand(Object message) {
- LOG.debug("onReceiveCommand: Received message {} from {}", message.getClass().toString(),
- getSender());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("onReceiveCommand: Received message {} from {}",
+ message.getClass().toString(),
+ getSender());
+ }
- if (message.getClass()
- .equals(CreateTransactionChain.SERIALIZABLE_CLASS)) {
- if (isLeader()) {
- createTransactionChain();
- } else if (getLeader() != null) {
- getLeader().forward(message, getContext());
- }
- } else if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+ if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
// This must be for install snapshot. Don't want to open this up and trigger
// deSerialization
- self().tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)), self());
+ self()
+ .tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
+ self());
+ createSnapshotTransaction = null;
// Send a PoisonPill instead of sending close transaction because we do not really need
// a response
getSender().tell(PoisonPill.getInstance(), self());
+ } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
+ closeTransactionChain(CloseTransactionChain.fromSerializable(message));
} else if (message instanceof RegisterChangeListener) {
registerChangeListener((RegisterChangeListener) message);
} else if (message instanceof UpdateSchemaContext) {
createTransaction(CreateTransaction.fromSerializable(message));
} else if (getLeader() != null) {
getLeader().forward(message, getContext());
+ } else {
+ getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
+ "Could not find leader so transaction cannot be created")), getSelf());
}
} else if (message instanceof PeerAddressResolved) {
PeerAddressResolved resolved = (PeerAddressResolved) message;
}
}
+ private void closeTransactionChain(CloseTransactionChain closeTransactionChain) {
+ DOMStoreTransactionChain chain =
+ transactionChains.remove(closeTransactionChain.getTransactionChainId());
+
+ if(chain != null) {
+ chain.close();
+ }
+ }
+
private ActorRef createTypedTransactionActor(
int transactionType,
- ShardTransactionIdentifier transactionId) {
+ ShardTransactionIdentifier transactionId,
+ String transactionChainId ) {
+
+ DOMStoreTransactionFactory factory = store;
+
+ if(!transactionChainId.isEmpty()) {
+ factory = transactionChains.get(transactionChainId);
+ if(factory == null){
+ DOMStoreTransactionChain transactionChain = store.createTransactionChain();
+ transactionChains.put(transactionChainId, transactionChain);
+ factory = transactionChain;
+ }
+ }
if(this.schemaContext == null){
throw new NullPointerException("schemaContext should not be null");
shardMBean.incrementReadOnlyTransactionCount();
return getContext().actorOf(
- ShardTransaction.props(store.newReadOnlyTransaction(), getSelf(),
+ ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
schemaContext,datastoreContext, shardMBean), transactionId.toString());
} else if (transactionType
shardMBean.incrementReadWriteTransactionCount();
return getContext().actorOf(
- ShardTransaction.props(store.newReadWriteTransaction(), getSelf(),
+ ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
schemaContext, datastoreContext, shardMBean), transactionId.toString());
shardMBean.incrementWriteOnlyTransactionCount();
return getContext().actorOf(
- ShardTransaction.props(store.newWriteOnlyTransaction(), getSelf(),
+ ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
schemaContext, datastoreContext, shardMBean), transactionId.toString());
} else {
throw new IllegalArgumentException(
private void createTransaction(CreateTransaction createTransaction) {
createTransaction(createTransaction.getTransactionType(),
- createTransaction.getTransactionId());
+ createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
}
- private ActorRef createTransaction(int transactionType, String remoteTransactionId) {
+ private ActorRef createTransaction(int transactionType, String remoteTransactionId, String transactionChainId) {
ShardTransactionIdentifier transactionId =
ShardTransactionIdentifier.builder()
.remoteTransactionId(remoteTransactionId)
.build();
- LOG.debug("Creating transaction : {} ", transactionId);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Creating transaction : {} ", transactionId);
+ }
ActorRef transactionActor =
- createTypedTransactionActor(transactionType, transactionId);
+ createTypedTransactionActor(transactionType, transactionId, transactionChainId);
getSender()
.tell(new CreateTransactionReply(
DOMStoreThreePhaseCommitCohort cohort =
modificationToCohort.remove(serialized);
if (cohort == null) {
- LOG.debug(
- "Could not find cohort for modification : {}. Writing modification using a new transaction",
- modification);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Could not find cohort for modification : {}. Writing modification using a new transaction",
+ modification);
+ }
+
DOMStoreWriteTransaction transaction =
store.newWriteOnlyTransaction();
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
+ }
+
modification.apply(transaction);
try {
syncCommitTransaction(transaction);
return;
}
- final ListenableFuture<Void> future = cohort.commit();
- final ActorRef self = getSelf();
+
+ if(sender == null){
+ LOG.error("Commit failed. Sender cannot be null");
+ return;
+ }
+
+ ListenableFuture<Void> future = cohort.commit();
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(Void v) {
- sender.tell(new CommitTransactionReply().toSerializable(), self);
+ sender.tell(new CommitTransactionReply().toSerializable(), getSelf());
shardMBean.incrementCommittedTransactionCount();
shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
}
public void onFailure(Throwable t) {
LOG.error(t, "An exception happened during commit");
shardMBean.incrementFailedTransactionsCount();
- sender.tell(new akka.actor.Status.Failure(t), self);
+ sender.tell(new akka.actor.Status.Failure(t), getSelf());
}
});
private void registerChangeListener(
RegisterChangeListener registerChangeListener) {
- LOG.debug("registerDataChangeListener for {}", registerChangeListener
- .getPath());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("registerDataChangeListener for {}", registerChangeListener
+ .getPath());
+ }
ActorSelection dataChangeListenerPath = getContext()
getContext().actorOf(
DataChangeListenerRegistration.props(registration));
- LOG.debug(
- "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
- , listenerRegistration.path().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
+ , listenerRegistration.path().toString());
+ }
getSender()
.tell(new RegisterChangeListenerReply(listenerRegistration.path()),
getSelf());
}
- private void createTransactionChain() {
- DOMStoreTransactionChain chain = store.createTransactionChain();
- ActorRef transactionChain = getContext().actorOf(
- ShardTransactionChain.props(chain, schemaContext, datastoreContext, shardMBean));
- getSender().tell(new CreateTransactionChainReply(transactionChain.path()).toSerializable(),
- getSelf());
+ private boolean isMetricsCaptureEnabled(){
+ CommonConfig config = new CommonConfig(getContext().system().settings().config());
+ return config.isMetricCaptureEnabled();
}
@Override protected void applyState(ActorRef clientActor, String identifier,
}
} else {
- LOG.error("Unknown state received {}", data);
+ LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
}
// Update stats
// so that this actor does not get block building the snapshot
createSnapshotTransaction = createTransaction(
TransactionProxy.TransactionType.READ_ONLY.ordinal(),
- "createSnapshot");
+ "createSnapshot", "");
createSnapshotTransaction.tell(
new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
// Since this will be done only on Recovery or when this actor is a Follower
// we can safely commit everything in here. We not need to worry about event notifications
// as they would have already been disabled on the follower
+
+ LOG.info("Applying snapshot");
try {
DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
syncCommitTransaction(transaction);
} catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
LOG.error(e, "An exception occurred when applying snapshot");
+ } finally {
+ LOG.info("Done applying snapshot");
}
}
.tell(new EnableNotification(isLeader()), getSelf());
}
- if (getLeaderId() != null) {
- shardMBean.setLeader(getLeaderId());
- }
-
shardMBean.setRaftState(getRaftState().name());
shardMBean.setCurrentTerm(getCurrentTerm());
+
+ // If this actor is no longer the leader close all the transaction chains
+ if(!isLeader()){
+ for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
+ entry.getKey(), getId());
+ }
+ entry.getValue().close();
+ }
+
+ transactionChains.clear();
+ }
+ }
+
+ @Override protected void onLeaderChanged(String oldLeader, String newLeader) {
+ shardMBean.setLeader(newLeader);
}
@Override public String persistenceId() {
import akka.japi.Creator;
import akka.japi.Function;
import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
* <li> Monitor the cluster members and store their addresses
* <ul>
*/
-public class ShardManager extends AbstractUntypedActor {
+public class ShardManager extends AbstractUntypedActorWithMetering {
// Stores a mapping between a member name and the address of the member
// Member names look like "member-1", "member-2" etc and are as specified
peerAddress);
if(peerAddresses.containsKey(peerId)){
peerAddresses.put(peerId, peerAddress);
-
- LOG.debug(
- "Sending PeerAddressResolved for peer {} with address {} to {}",
- peerId, peerAddress, actor.path());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Sending PeerAddressResolved for peer {} with address {} to {}",
+ peerId, peerAddress, actor.path());
+ }
actor
.tell(new PeerAddressResolved(peerId, peerAddress),
getSelf());
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
getSender().tell(new GetCompositeModificationReply(
new ImmutableCompositeModification(modification)), getSelf());
} else if (message instanceof ReceiveTimeout) {
- LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+ }
closeTransaction(false);
} else {
throw new UnknownMessageException(message);
protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
modification.addModification(
new WriteModification(message.getPath(), message.getData(),schemaContext));
- LOG.debug("writeData at path : " + message.getPath().toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("writeData at path : " + message.getPath().toString());
+ }
try {
transaction.write(message.getPath(), message.getData());
getSender().tell(new WriteDataReply().toSerializable(), getSelf());
protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
modification.addModification(
new MergeModification(message.getPath(), message.getData(), schemaContext));
- LOG.debug("mergeData at path : " + message.getPath().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("mergeData at path : " + message.getPath().toString());
+ }
try {
transaction.merge(message.getPath(), message.getData());
getSender().tell(new MergeDataReply().toSerializable(), getSelf());
}
protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
- LOG.debug("deleteData at path : " + message.getPath().toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("deleteData at path : " + message.getPath().toString());
+ }
modification.addModification(new DeleteModification(message.getPath()));
try {
transaction.delete(message.getPath());
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
@Override public void onReceive(Object message) throws Exception {
if(message instanceof Terminated){
Terminated terminated = (Terminated) message;
- LOG.debug("Actor terminated : {}", terminated.actor());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor terminated : {}", terminated.actor());
+ }
} else if(message instanceof Monitor){
Monitor monitor = (Monitor) message;
getContext().watch(monitor.getActorRef());
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
private void commit(CommitTransaction message) {
// Forward the commit to the shard
- log.debug("Forward commit transaction to Shard {} ", shardActor);
+ if(log.isDebugEnabled()) {
+ log.debug("Forward commit transaction to Shard {} ", shardActor);
+ }
shardActor.forward(new ForwardedCommitTransaction(cohort, modification),
getContext());
@Override
public Void apply(Iterable<ActorPath> paths) {
cohortPaths = Lists.newArrayList(paths);
-
- LOG.debug("Tx {} successfully built cohort path list: {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} successfully built cohort path list: {}",
transactionId, cohortPaths);
+ }
return null;
}
}, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
@Override
public ListenableFuture<Boolean> canCommit() {
- LOG.debug("Tx {} canCommit", transactionId);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} canCommit", transactionId);
+ }
final SettableFuture<Boolean> returnFuture = SettableFuture.create();
// The first phase of canCommit is to gather the list of cohort actor paths that will
@Override
public void onComplete(Throwable failure, Void notUsed) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+ }
returnFuture.setException(failure);
} else {
finishCanCommit(returnFuture);
}
private void finishCanCommit(final SettableFuture<Boolean> returnFuture) {
-
- LOG.debug("Tx {} finishCanCommit", transactionId);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finishCanCommit", transactionId);
+ }
// The last phase of canCommit is to invoke all the cohort actors asynchronously to perform
// their canCommit processing. If any one fails then we'll fail canCommit.
@Override
public void onComplete(Throwable failure, Iterable<Object> responses) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+ }
returnFuture.setException(failure);
return;
}
return;
}
}
-
- LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
+ }
returnFuture.set(Boolean.valueOf(result));
}
}, actorContext.getActorSystem().dispatcher());
private Future<Iterable<Object>> invokeCohorts(Object message) {
List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohortPaths.size());
for(ActorPath actorPath : cohortPaths) {
-
- LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
+ }
ActorSelection cohort = actorContext.actorSelection(actorPath);
futureList.add(actorContext.executeRemoteOperationAsync(cohort, message));
private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
final Class<?> expectedResponseClass, final boolean propagateException) {
- LOG.debug("Tx {} {}", transactionId, operationName);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} {}", transactionId, operationName);
+ }
final SettableFuture<Void> returnFuture = SettableFuture.create();
// The cohort actor list should already be built at this point by the canCommit phase but,
@Override
public void onComplete(Throwable failure, Void notUsed) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
operationName, failure);
-
+ }
if(propagateException) {
returnFuture.setException(failure);
} else {
private void finishVoidOperation(final String operationName, final Object message,
final Class<?> expectedResponseClass, final boolean propagateException,
final SettableFuture<Void> returnFuture) {
-
- LOG.debug("Tx {} finish {}", transactionId, operationName);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finish {}", transactionId, operationName);
+ }
Future<Iterable<Object>> combinedFuture = invokeCohorts(message);
combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
}
if(exceptionToPropagate != null) {
- LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
operationName, exceptionToPropagate);
-
+ }
if(propagateException) {
// We don't log the exception here to avoid redundant logging since we're
// propagating to the caller in MD-SAL core who will log it.
// Since the caller doesn't want us to propagate the exception we'll also
// not log it normally. But it's usually not good to totally silence
// exceptions so we'll log it to debug level.
- LOG.debug(String.format("%s failed", message.getClass().getSimpleName()),
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(String.format("%s failed", message.getClass().getSimpleName()),
exceptionToPropagate);
+ }
returnFuture.set(null);
}
} else {
- LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+ }
returnFuture.set(null);
}
}
package org.opendaylight.controller.cluster.datastore;
+import akka.actor.ActorPath;
+import akka.dispatch.Futures;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+
+import java.util.Collections;
+import java.util.List;
/**
* TransactionChainProxy acts as a proxy for a DOMStoreTransactionChain created on a remote shard
*/
public class TransactionChainProxy implements DOMStoreTransactionChain{
private final ActorContext actorContext;
+ private final String transactionChainId;
+ private volatile List<Future<ActorPath>> cohortPathFutures = Collections.emptyList();
public TransactionChainProxy(ActorContext actorContext) {
this.actorContext = actorContext;
+ transactionChainId = actorContext.getCurrentMemberName() + "-" + System.currentTimeMillis();
}
@Override
public DOMStoreReadTransaction newReadOnlyTransaction() {
return new TransactionProxy(actorContext,
- TransactionProxy.TransactionType.READ_ONLY);
+ TransactionProxy.TransactionType.READ_ONLY, this);
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
return new TransactionProxy(actorContext,
- TransactionProxy.TransactionType.READ_WRITE);
+ TransactionProxy.TransactionType.READ_WRITE, this);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
return new TransactionProxy(actorContext,
- TransactionProxy.TransactionType.WRITE_ONLY);
+ TransactionProxy.TransactionType.WRITE_ONLY, this);
}
@Override
public void close() {
- // FIXME : The problem here is don't know which shard the transaction chain is to be created on ???
- throw new UnsupportedOperationException("close - not sure what to do here?");
+ // Send a close transaction chain request to each and every shard
+ actorContext.broadcast(new CloseTransactionChain(transactionChainId));
+ }
+
+ public String getTransactionChainId() {
+ return transactionChainId;
+ }
+
+ public void onTransactionReady(List<Future<ActorPath>> cohortPathFutures){
+ this.cohortPathFutures = cohortPathFutures;
+ }
+
+ public void waitTillCurrentTransactionReady(){
+ try {
+ Await.result(Futures
+ .sequence(this.cohortPathFutures, actorContext.getActorSystem().dispatcher()),
+ actorContext.getOperationDuration());
+ } catch (Exception e) {
+ throw new IllegalStateException("Failed when waiting for transaction on a chain to become ready", e);
+ }
}
}
* </p>
*/
public class TransactionProxy implements DOMStoreReadWriteTransaction {
+
+ private final TransactionChainProxy transactionChainProxy;
+
+
+
public enum TransactionType {
READ_ONLY,
WRITE_ONLY,
private boolean inReadyState;
public TransactionProxy(ActorContext actorContext, TransactionType transactionType) {
+ this(actorContext, transactionType, null);
+ }
+
+ @VisibleForTesting
+ List<Future<Object>> getRecordedOperationFutures() {
+ List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+ for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
+ recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+ }
+
+ return recordedOperationFutures;
+ }
+
+ public TransactionProxy(ActorContext actorContext, TransactionType transactionType, TransactionChainProxy transactionChainProxy) {
this.actorContext = Preconditions.checkNotNull(actorContext,
- "actorContext should not be null");
+ "actorContext should not be null");
this.transactionType = Preconditions.checkNotNull(transactionType,
- "transactionType should not be null");
+ "transactionType should not be null");
this.schemaContext = Preconditions.checkNotNull(actorContext.getSchemaContext(),
- "schemaContext should not be null");
+ "schemaContext should not be null");
+ this.transactionChainProxy = transactionChainProxy;
String memberName = actorContext.getCurrentMemberName();
if(memberName == null){
}
this.identifier = TransactionIdentifier.builder().memberName(memberName).counter(
- counter.getAndIncrement()).build();
+ counter.getAndIncrement()).build();
if(transactionType == TransactionType.READ_ONLY) {
// Read-only Tx's aren't explicitly closed by the client so we create a PhantomReference
remoteTransactionActorsMB = new AtomicBoolean();
TransactionProxyCleanupPhantomReference cleanup =
- new TransactionProxyCleanupPhantomReference(this);
+ new TransactionProxyCleanupPhantomReference(this);
phantomReferenceCache.put(cleanup, cleanup);
}
-
- LOG.debug("Created txn {} of type {}", identifier, transactionType);
- }
-
- @VisibleForTesting
- List<Future<Object>> getRecordedOperationFutures() {
- List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Created txn {} of type {}", identifier, transactionType);
}
-
- return recordedOperationFutures;
}
@Override
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
- LOG.debug("Tx {} read {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} read {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
return transactionContext(path).readData(path);
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Exists operation on write-only transaction is not allowed");
- LOG.debug("Tx {} exists {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} exists {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
return transactionContext(path).dataExists(path);
checkModificationState();
- LOG.debug("Tx {} write {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} write {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
transactionContext(path).writeData(path, data);
checkModificationState();
- LOG.debug("Tx {} merge {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} merge {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
transactionContext(path).mergeData(path, data);
public void delete(YangInstanceIdentifier path) {
checkModificationState();
-
- LOG.debug("Tx {} delete {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} delete {}", identifier, path);
+ }
createTransactionIfMissing(actorContext, path);
transactionContext(path).deleteData(path);
inReadyState = true;
- LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
remoteTransactionPaths.size());
-
+ }
List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- LOG.debug("Tx {} Readying transaction for shard {}", identifier,
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Readying transaction for shard {}", identifier,
transactionContext.getShardName());
-
+ }
cohortPathFutures.add(transactionContext.readyTransaction());
}
+ if(transactionChainProxy != null){
+ transactionChainProxy.onTransactionReady(cohortPathFutures);
+ }
+
return new ThreePhaseCommitCohortProxy(actorContext, cohortPathFutures,
identifier.toString());
}
return ShardStrategyFactory.getStrategy(path).findShard(path);
}
- private void createTransactionIfMissing(ActorContext actorContext, YangInstanceIdentifier path) {
+ private void createTransactionIfMissing(ActorContext actorContext,
+ YangInstanceIdentifier path) {
+
+ if(transactionChainProxy != null){
+ transactionChainProxy.waitTillCurrentTransactionReady();
+ }
+
String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
TransactionContext transactionContext =
remoteTransactionPaths.get(shardName);
- if(transactionContext != null){
+ if (transactionContext != null) {
// A transaction already exists with that shard
return;
}
try {
Object response = actorContext.executeShardOperation(shardName,
- new CreateTransaction(identifier.toString(),this.transactionType.ordinal() ).toSerializable());
+ new CreateTransaction(identifier.toString(), this.transactionType.ordinal(),
+ getTransactionChainId()).toSerializable());
if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
CreateTransactionReply reply =
CreateTransactionReply.fromSerializable(response);
String transactionPath = reply.getTransactionPath();
- LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
+ }
ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
- if(transactionType == TransactionType.READ_ONLY) {
+ if (transactionType == TransactionType.READ_ONLY) {
// Add the actor to the remoteTransactionActors list for access by the
// cleanup PhantonReference.
remoteTransactionActors.add(transactionActor);
}
transactionContext = new TransactionContextImpl(shardName, transactionPath,
- transactionActor, identifier, actorContext, schemaContext);
+ transactionActor, identifier, actorContext, schemaContext);
remoteTransactionPaths.put(shardName, transactionContext);
} else {
throw new IllegalArgumentException(String.format(
- "Invalid reply type {} for CreateTransaction", response.getClass()));
+ "Invalid reply type {} for CreateTransaction", response.getClass()));
}
- } catch(Exception e){
- LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
- remoteTransactionPaths.put(shardName, new NoOpTransactionContext(shardName, e, identifier));
+ } catch (Exception e) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+ }
+ remoteTransactionPaths
+ .put(shardName, new NoOpTransactionContext(shardName, e, identifier));
+ }
+ }
+
+ public String getTransactionChainId() {
+ if(transactionChainProxy == null){
+ return "";
}
+ return transactionChainProxy.getTransactionChainId();
}
+
private interface TransactionContext {
String getShardName();
@Override
public void closeTransaction() {
- LOG.debug("Tx {} closeTransaction called", identifier);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} closeTransaction called", identifier);
+ }
actorContext.sendRemoteOperationAsync(getActor(), new CloseTransaction().toSerializable());
}
@Override
public Future<ActorPath> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
identifier, recordedOperationFutures.size());
-
+ }
// Send the ReadyTransaction message to the Tx actor.
final Future<Object> replyFuture = actorContext.executeRemoteOperationAsync(getActor(),
return combinedFutures.transform(new AbstractFunction1<Iterable<Object>, ActorPath>() {
@Override
public ActorPath apply(Iterable<Object> notUsed) {
-
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
identifier);
-
+ }
// At this point all the Futures succeeded and we need to extract the cohort
// actor path from the ReadyTransactionReply. For the recorded operations, they
// don't return any data so we're only interested that they completed
String resolvedCohortPath = getResolvedCohortPath(
reply.getCohortPath().toString());
- LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
identifier, resolvedCohortPath);
-
+ }
return actorContext.actorFor(resolvedCohortPath);
} else {
// Throwing an exception here will fail the Future.
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ }
recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
new DeleteData(path).toSerializable() ));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ }
recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
new MergeData(path, data, schemaContext).toSerializable()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ }
recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
new WriteData(path, data, schemaContext).toSerializable()));
}
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
final YangInstanceIdentifier path) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
+ }
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishReadData(path, returnFuture);
} else {
- LOG.debug("Tx {} readData: verifying {} previous recorded operations",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
-
+ }
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} readData: a recorded operation failed: {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData: a recorded operation failed: {}",
identifier, failure);
-
+ }
returnFuture.setException(new ReadFailedException(
"The read could not be performed because a previous put, merge,"
+ "or delete operation failed", failure));
private void finishReadData(final YangInstanceIdentifier path,
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
- LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
+ }
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object readResponse) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} read operation failed: {}", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+ }
returnFuture.setException(new ReadFailedException(
"Error reading data for path " + path, failure));
} else {
- LOG.debug("Tx {} read operation succeeded", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} read operation succeeded", identifier, failure);
+ }
if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,
path, readResponse);
public CheckedFuture<Boolean, ReadFailedException> dataExists(
final YangInstanceIdentifier path) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ }
final SettableFuture<Boolean> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishDataExists(path, returnFuture);
} else {
- LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
-
+ }
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
identifier, failure);
-
+ }
returnFuture.setException(new ReadFailedException(
"The data exists could not be performed because a previous "
+ "put, merge, or delete operation failed", failure));
private void finishDataExists(final YangInstanceIdentifier path,
final SettableFuture<Boolean> returnFuture) {
- LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
+ }
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object response) throws Throwable {
if(failure != null) {
- LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+ }
returnFuture.setException(new ReadFailedException(
"Error checking data exists for path " + path, failure));
} else {
- LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+ }
if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
returnFuture.set(Boolean.valueOf(DataExistsReply.
fromSerializable(response).exists()));
@Override
public void closeTransaction() {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+ }
}
@Override
public Future<ActorPath> readyTransaction() {
- LOG.debug("Tx {} readyTransaction called", identifier);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readyTransaction called", identifier);
+ }
return akka.dispatch.Futures.failed(failure);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+ }
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+ }
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
+ }
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
YangInstanceIdentifier path) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
+ }
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error reading data for path " + path, failure));
}
@Override
public CheckedFuture<Boolean, ReadFailedException> dataExists(
YangInstanceIdentifier path) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+ }
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error checking exists for path " + path, failure));
}
}
public void setDataStoreExecutor(ExecutorService dsExecutor) {
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor,
- "notification-executor", getMBeanType(), getMBeanCategory());
+ this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor);
}
public void setNotificationManager(QueuedNotificationManager<?, ?> manager) {
this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", getMBeanType(), getMBeanCategory());
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
- "data-store-executor", getMBeanType(), getMBeanCategory());
+ this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor());
}
@Override
@Override
public ThreadExecutorStats getDataStoreExecutorStats() {
- return dataStoreExecutorStatsBean.toThreadExecutorStats();
+ return dataStoreExecutorStatsBean == null ? null :
+ dataStoreExecutorStatsBean.toThreadExecutorStats();
}
@Override
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
-public class CloseTransactionChain implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CloseTransactionChain.class;
- @Override
- public Object toSerializable() {
- return ShardTransactionChainMessages.CloseTransactionChain.newBuilder().build();
- }
+public class CloseTransactionChain implements SerializableMessage {
+ public static final Class SERIALIZABLE_CLASS =
+ ShardTransactionChainMessages.CloseTransactionChain.class;
+ private final String transactionChainId;
+
+ public CloseTransactionChain(String transactionChainId){
+ this.transactionChainId = transactionChainId;
+ }
+
+ @Override
+ public Object toSerializable() {
+ return ShardTransactionChainMessages.CloseTransactionChain.newBuilder()
+ .setTransactionChainId(transactionChainId).build();
+ }
+
+ public static CloseTransactionChain fromSerializable(Object message){
+ ShardTransactionChainMessages.CloseTransactionChain closeTransactionChain
+ = (ShardTransactionChainMessages.CloseTransactionChain) message;
+
+ return new CloseTransactionChain(closeTransactionChain.getTransactionChainId());
+ }
+
+ public String getTransactionChainId() {
+ return transactionChainId;
+ }
}
public class CreateTransaction implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
- private final String transactionId;
- private final int transactionType;
-
- public CreateTransaction(String transactionId, int transactionType){
-
- this.transactionId = transactionId;
- this.transactionType = transactionType;
- }
-
- public String getTransactionId() {
- return transactionId;
- }
-
- public int getTransactionType() { return transactionType;}
-
- @Override
- public Object toSerializable() {
- return ShardTransactionMessages.CreateTransaction.newBuilder().setTransactionId(transactionId).setTransactionType(transactionType).build();
- }
-
- public static CreateTransaction fromSerializable(Object message){
- ShardTransactionMessages.CreateTransaction createTransaction = (ShardTransactionMessages.CreateTransaction)message;
- return new CreateTransaction(createTransaction.getTransactionId(),createTransaction.getTransactionType() );
- }
-
+ public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
+ private final String transactionId;
+ private final int transactionType;
+ private final String transactionChainId;
+
+ public CreateTransaction(String transactionId, int transactionType) {
+ this(transactionId, transactionType, "");
+ }
+
+ public CreateTransaction(String transactionId, int transactionType, String transactionChainId) {
+
+ this.transactionId = transactionId;
+ this.transactionType = transactionType;
+ this.transactionChainId = transactionChainId;
+
+ }
+
+
+ public String getTransactionId() {
+ return transactionId;
+ }
+
+ public int getTransactionType() {
+ return transactionType;
+ }
+
+ @Override
+ public Object toSerializable() {
+ return ShardTransactionMessages.CreateTransaction.newBuilder()
+ .setTransactionId(transactionId)
+ .setTransactionType(transactionType)
+ .setTransactionChainId(transactionChainId).build();
+ }
+
+ public static CreateTransaction fromSerializable(Object message) {
+ ShardTransactionMessages.CreateTransaction createTransaction =
+ (ShardTransactionMessages.CreateTransaction) message;
+ return new CreateTransaction(createTransaction.getTransactionId(),
+ createTransaction.getTransactionType(), createTransaction.getTransactionChainId());
+ }
+
+ public String getTransactionChainId() {
+ return transactionChainId;
+ }
}
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
+import akka.pattern.Patterns;
import akka.util.Timeout;
-
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.Configuration;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
if (result instanceof LocalShardFound) {
LocalShardFound found = (LocalShardFound) result;
- LOG.debug("Local shard found {}", found.getPath());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Local shard found {}", found.getPath());
+ }
return found.getPath();
}
if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
PrimaryFound found = PrimaryFound.fromSerializable(result);
- LOG.debug("Primary found {}", found.getPrimaryPath());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Primary found {}", found.getPrimaryPath());
+ }
return found.getPrimaryPath();
}
throw new PrimaryNotFoundException("Could not find primary for shardName " + shardName);
*/
public Object executeRemoteOperation(ActorSelection actor, Object message) {
- LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
+ actor.toString());
+ }
Future<Object> future = ask(actor, message, operationTimeout);
try {
*/
public Future<Object> executeRemoteOperationAsync(ActorSelection actor, Object message) {
- LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
+ }
return ask(actor, message, operationTimeout);
}
actor.tell(message, ActorRef.noSender());
}
+ public void sendShardOperationAsync(String shardName, Object message) {
+ ActorSelection primary = findPrimary(shardName);
+
+ primary.tell(message, ActorRef.noSender());
+ }
+
+
/**
* Execute an operation on the primary for a given shard
* <p>
}
+ /**
+ * Execute an operation on the the local shard only asynchronously
+ *
+ * <p>
+ * This method first finds the address of the local shard if any. It then
+ * executes the operation on it.
+ * </p>
+ *
+ * @param shardName the name of the shard on which the operation needs to be executed
+ * @param message the message that needs to be sent to the shard
+ * @param timeout the amount of time that this method should wait for a response before timing out
+ * @return null if the shard could not be located else a future on which the caller can wait
+ *
+ */
+ public Future executeLocalShardOperationAsync(String shardName, Object message, Timeout timeout) {
+ ActorRef local = findLocalShard(shardName);
+ if(local == null){
+ return null;
+ }
+ return Patterns.ask(local, message, timeout);
+ }
+
+
+
public void shutdown() {
shardManager.tell(PoisonPill.getInstance(), null);
actorSystem.shutdown();
return clusterWrapper.getCurrentMemberName();
}
+ /**
+ * Send the message to each and every shard
+ *
+ * @param message
+ */
+ public void broadcast(Object message){
+ for(String shardName : configuration.getAllShardNames()){
+ try {
+ sendShardOperationAsync(shardName, message);
+ } catch(Exception e){
+ LOG.warn("broadcast failed to send message " + message.getClass().getSimpleName() + " to shard " + shardName, e);
+ }
+ }
+ }
+
+ public FiniteDuration getOperationDuration() {
+ return operationDuration;
+ }
}
odl-cluster-data {
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 100ms
}
+
+ metric-capture-enabled = true
+
akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
cluster {
range "1..max";
}
}
-
+
typedef operation-timeout-type {
type uint16 {
range "5..max";
}
}
-
+
grouping data-store-properties {
leaf max-shard-data-change-executor-queue-size {
default 1000;
type non-zero-uint16-type;
description "The maximum queue size for each shard's data store executor.";
}
-
+
leaf shard-transaction-idle-timeout-in-minutes {
default 10;
type non-zero-uint16-type;
description "The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.";
}
-
+
leaf operation-timeout-in-seconds {
default 5;
type operation-timeout-type;
description "The maximum amount of time for akka operations (remote or local) to complete before failing.";
}
+
+ leaf enable-metric-capture {
+ default false;
+ type boolean;
+ description "Enable or disable metric capture.";
+ }
+
+ leaf bounded-mailbox-capacity {
+ default 1000;
+ type non-zero-uint16-type;
+ description "Max queue size that an actor's mailbox can reach";
+ }
}
-
+
// Augments the 'configuration' choice node under modules/module.
augment "/config:modules/config:module/config:configuration" {
case distributed-config-datastore-provider {
System.setProperty("shard.persistent", "false");
system = ActorSystem.create("test");
+
+ deletePersistenceFiles();
}
@AfterClass
public static void tearDownClass() throws IOException {
JavaTestKit.shutdownActorSystem(system);
system = null;
+
+ deletePersistenceFiles();
}
protected static void deletePersistenceFiles() throws IOException {
import akka.actor.Props;
import akka.event.Logging;
import akka.testkit.JavaTestKit;
-
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
assertEquals(true, result);
- // 1. Create a TransactionChain
- shard.tell(new CreateTransactionChain().toSerializable(), getRef());
-
- final ActorSelection transactionChain =
- new ExpectMsg<ActorSelection>(duration("3 seconds"), "CreateTransactionChainReply") {
- @Override
- protected ActorSelection match(Object in) {
- if (in.getClass().equals(CreateTransactionChainReply.SERIALIZABLE_CLASS)) {
- ActorPath transactionChainPath =
- CreateTransactionChainReply.fromSerializable(getSystem(),in)
- .getTransactionChainPath();
- return getSystem()
- .actorSelection(transactionChainPath);
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertNotNull(transactionChain);
-
- System.out.println("Successfully created transaction chain");
-
- // 2. Create a Transaction on the TransactionChain
- transactionChain.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.WRITE_ONLY.ordinal() ).toSerializable(), getRef());
+ // Create a transaction on the shard
+ shard.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.WRITE_ONLY.ordinal() ).toSerializable(), getRef());
final ActorSelection transaction =
new ExpectMsg<ActorSelection>(duration("3 seconds"), "CreateTransactionReply") {
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import java.io.File;
import java.io.File;
import java.util.List;
+import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
File f = new File("./module-shards.conf");
ConfigFactory.parseFile(f);
}
+
+ @Test
+ public void testGetAllShardNames(){
+ Set<String> allShardNames = configuration.getAllShardNames();
+
+ assertEquals(4, allShardNames.size());
+ assertTrue(allShardNames.contains("default"));
+ assertTrue(allShardNames.contains("people-1"));
+ assertTrue(allShardNames.contains("cars-1"));
+ assertTrue(allShardNames.contains("test-1"));
+ }
}
import java.util.List;
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertNotNull;
+import static junit.framework.TestCase.assertTrue;
+
public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{
private ActorRef dataChangeListenerActor = getSystem().actorOf(Props.create(DoNothingActor.class));
Object messages = testContext
.executeLocalOperation(actorRef, "messages");
- Assert.assertNotNull(messages);
+ assertNotNull(messages);
- Assert.assertTrue(messages instanceof List);
+ assertTrue(messages instanceof List);
List<Object> listMessages = (List<Object>) messages;
- Assert.assertEquals(1, listMessages.size());
+ assertEquals(1, listMessages.size());
+
+ assertTrue(listMessages.get(0).getClass()
+ .equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS));
+ }
+
+ @Test
+ public void testCloseWhenRegistrationIsNull() throws Exception {
+ final Props props = Props.create(MessageCollectorActor.class);
+ final ActorRef actorRef = getSystem().actorOf(props);
+
+ DataChangeListenerRegistrationProxy proxy =
+ new DataChangeListenerRegistrationProxy(
+ new MockDataChangeListener(), dataChangeListenerActor);
+
+ proxy.close();
+
+ //Check if it was received by the remote actor
+ ActorContext
+ testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration());
+ Object messages = testContext
+ .executeLocalOperation(actorRef, "messages");
+
+ assertNotNull(messages);
+
+ assertTrue(messages instanceof List);
+
+ List<Object> listMessages = (List<Object>) messages;
- Assert.assertTrue(listMessages.get(0).getClass().equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS));
+ assertEquals(0, listMessages.size());
}
}
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
}
+ @Test
+ public void transactionChainIntegrationTest() throws Exception {
+ final Configuration configuration = new ConfigurationImpl("module-shards.conf", "modules.conf");
+ ShardStrategyFactory.setConfiguration(configuration);
+
+
+
+ new JavaTestKit(getSystem()) {
+ {
+
+ new Within(duration("10 seconds")) {
+ @Override
+ protected void run() {
+ try {
+ final DistributedDataStore distributedDataStore =
+ new DistributedDataStore(getSystem(), "config",
+ new MockClusterWrapper(), configuration,
+ new DatastoreContext());
+
+ distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
+
+ // Wait for a specific log message to show up
+ final boolean result =
+ new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from("akka://test/user/shardmanager-config/member-1-shard-test-1-config")
+ .message("Switching from state Candidate to Leader")
+ .occurrences(1).exec();
+
+ assertEquals(true, result);
+
+ DOMStoreTransactionChain transactionChain =
+ distributedDataStore.createTransactionChain();
+
+ DOMStoreReadWriteTransaction transaction =
+ transactionChain.newReadWriteTransaction();
+
+ transaction
+ .write(TestModel.TEST_PATH, ImmutableNodes
+ .containerNode(TestModel.TEST_QNAME));
+
+ ListenableFuture<Optional<NormalizedNode<?, ?>>>
+ future =
+ transaction.read(TestModel.TEST_PATH);
+
+ Optional<NormalizedNode<?, ?>> optional =
+ future.get();
+
+ Assert.assertTrue("Node not found", optional.isPresent());
+
+ NormalizedNode<?, ?> normalizedNode =
+ optional.get();
+
+ assertEquals(TestModel.TEST_QNAME,
+ normalizedNode.getNodeType());
+
+ DOMStoreThreePhaseCommitCohort ready =
+ transaction.ready();
+
+ ListenableFuture<Boolean> canCommit =
+ ready.canCommit();
+
+ assertTrue(canCommit.get(5, TimeUnit.SECONDS));
+
+ ListenableFuture<Void> preCommit =
+ ready.preCommit();
+
+ preCommit.get(5, TimeUnit.SECONDS);
+
+ ListenableFuture<Void> commit = ready.commit();
+
+ commit.get(5, TimeUnit.SECONDS);
+
+ transactionChain.close();
+ } catch (ExecutionException | TimeoutException | InterruptedException e){
+ fail(e.getMessage());
+ }
+ }
+ };
+ }
+ };
+
+ }
+
//FIXME : Disabling test because it's flaky
//@Test
package org.opendaylight.controller.cluster.datastore;
+import akka.actor.ActorPath;
import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.Props;
-
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.Futures;
+import akka.util.Timeout;
+import com.google.common.util.concurrent.MoreExecutors;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertNull;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
public class DistributedDataStoreTest extends AbstractActorTest{
@Test
public void testRegisterChangeListenerWhenShardIsLocal() throws Exception {
+ ActorContext actorContext = mock(ActorContext.class);
+
+ distributedDataStore = new DistributedDataStore(actorContext);
+ distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
- mockActorContext.setExecuteLocalShardOperationResponse(new RegisterChangeListenerReply(doNothingActorRef.path()));
+ Future future = mock(Future.class);
+ when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
+ when(actorContext.getActorSystem()).thenReturn(getSystem());
+ when(actorContext
+ .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(future);
ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH, new AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>() {
- @Override
- public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- throw new UnsupportedOperationException("onDataChanged");
- }
- }, AsyncDataBroker.DataChangeScope.BASE);
+ distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
+ mock(AsyncDataChangeListener.class),
+ AsyncDataBroker.DataChangeScope.BASE);
- assertTrue(registration instanceof DataChangeListenerRegistrationProxy);
+ assertNotNull(registration);
+
+ assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
+ }
+
+ @Test
+ public void testRegisterChangeListenerWhenSuccessfulReplyReceived() throws Exception {
+ ActorContext actorContext = mock(ActorContext.class);
+
+ distributedDataStore = new DistributedDataStore(actorContext);
+ distributedDataStore.onGlobalContextUpdated(
+ TestModel.createTestContext());
+
+ ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
+
+ // Make Future successful
+ Future f = Futures.successful(new RegisterChangeListenerReply(doNothingActorRef.path()));
+
+ // Setup the mocks
+ ActorSystem actorSystem = mock(ActorSystem.class);
+ ActorSelection actorSelection = mock(ActorSelection.class);
+
+ when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
+ when(actorSystem.dispatcher()).thenReturn(executor);
+ when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
+ when(actorContext.getActorSystem()).thenReturn(actorSystem);
+ when(actorContext
+ .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f);
+ when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
+
+ ListenerRegistration registration =
+ distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
+ mock(AsyncDataChangeListener.class),
+ AsyncDataBroker.DataChangeScope.BASE);
assertNotNull(registration);
+
+ assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
+
+ ActorSelection listenerRegistrationActor =
+ ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
+
+ assertNotNull(listenerRegistrationActor);
+
+ assertEquals(actorSelection, listenerRegistrationActor);
+ }
+
+ @Test
+ public void testRegisterChangeListenerWhenSuccessfulReplyFailed() throws Exception {
+ ActorContext actorContext = mock(ActorContext.class);
+
+ distributedDataStore = new DistributedDataStore(actorContext);
+ distributedDataStore.onGlobalContextUpdated(
+ TestModel.createTestContext());
+
+ ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
+
+ // Make Future fail
+ Future f = Futures.failed(new IllegalArgumentException());
+
+ // Setup the mocks
+ ActorSystem actorSystem = mock(ActorSystem.class);
+ ActorSelection actorSelection = mock(ActorSelection.class);
+
+ when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
+ when(actorSystem.dispatcher()).thenReturn(executor);
+ when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
+ when(actorContext.getActorSystem()).thenReturn(actorSystem);
+ when(actorContext
+ .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f);
+ when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
+
+ ListenerRegistration registration =
+ distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
+ mock(AsyncDataChangeListener.class),
+ AsyncDataBroker.DataChangeScope.BASE);
+
+ assertNotNull(registration);
+
+ assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
+
+ ActorSelection listenerRegistrationActor =
+ ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
+
+ assertNull(listenerRegistrationActor);
+
}
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
- @Test
- public void testOnReceiveCreateTransactionChain() throws Exception {
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateTransactionChain");
-
-
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(subject.path().toString())
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
-
- Assert.assertEquals(true, result);
-
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new CreateTransactionChain().toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(CreateTransactionChainReply.SERIALIZABLE_CLASS)){
- CreateTransactionChainReply reply =
- CreateTransactionChainReply.fromSerializable(getSystem(),in);
- return reply.getTransactionChainPath()
- .toString();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("Unexpected transaction path " + out,
- "akka://test/user/testCreateTransactionChain/$a",
- out);
-
- expectNoMsg();
- }
-
-
- };
- }};
- }
-
@Test
public void testOnReceiveRegisterListener() throws Exception {
new JavaTestKit(getSystem()) {{
}};
}
+ @Test
+ public void testCreateTransactionOnChain(){
+ new JavaTestKit(getSystem()) {{
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
+ final ActorRef subject =
+ getSystem().actorOf(props, "testCreateTransactionOnChain");
+
+ // Wait for a specific log message to show up
+ final boolean result =
+ new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(subject.path().toString())
+ .message("Switching from state Candidate to Leader")
+ .occurrences(1).exec();
+
+ Assert.assertEquals(true, result);
+
+ new Within(duration("3 seconds")) {
+ @Override
+ protected void run() {
+
+ subject.tell(
+ new UpdateSchemaContext(TestModel.createTestContext()),
+ getRef());
+
+ subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
+ getRef());
+
+ final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
+ // do not put code outside this method, will run afterwards
+ @Override
+ protected String match(Object in) {
+ if (in instanceof CreateTransactionReply) {
+ CreateTransactionReply reply =
+ (CreateTransactionReply) in;
+ return reply.getTransactionActorPath()
+ .toString();
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ assertTrue("Unexpected transaction path " + out,
+ out.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
+ expectNoMsg();
+ }
+ };
+ }};
+ }
+
@Test
public void testPeerAddressResolved(){
new JavaTestKit(getSystem()) {{
subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
getRef());
- waitForLogMessage(Logging.Debug.class, subject, "CaptureSnapshotReply received by actor");
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
+ subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
+ getRef());
+
+ waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
}
};
- Thread.sleep(2000);
deletePersistenceFiles();
}};
}
+++ /dev/null
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.JavaTestKit;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChainReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-import static org.junit.Assert.assertEquals;
-
-public class ShardTransactionChainTest extends AbstractActorTest {
-
- private static ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
-
- private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor,
- MoreExecutors.sameThreadExecutor());
-
- private static final SchemaContext testSchemaContext = TestModel.createTestContext();
-
- private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
-
- private static final String mockShardName = "mockShardName";
-
- private final ShardStats shardStats = new ShardStats(mockShardName, "DataStore");
-
- @BeforeClass
- public static void staticSetup() {
- store.onGlobalContextUpdated(testSchemaContext);
- }
-
- @Test
- public void testOnReceiveCreateTransaction() throws Exception {
- new JavaTestKit(getSystem()) {{
- final Props props = ShardTransactionChain.props(store.createTransactionChain(),
- testSchemaContext, DATA_STORE_CONTEXT, shardStats);
- final ActorRef subject = getSystem().actorOf(props, "testCreateTransaction");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
- return CreateTransactionReply.fromSerializable(in).getTransactionPath();
- }else{
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("Unexpected transaction path " + out,
- "akka://test/user/testCreateTransaction/shard-txn-1",
- out);
-
- // Will wait for the rest of the 3 seconds
- expectNoMsg();
- }
-
-
- };
- }};
- }
-
- @Test
- public void testOnReceiveCloseTransactionChain() throws Exception {
- new JavaTestKit(getSystem()) {{
- final Props props = ShardTransactionChain.props(store.createTransactionChain(),
- testSchemaContext, DATA_STORE_CONTEXT, shardStats );
- final ActorRef subject = getSystem().actorOf(props, "testCloseTransactionChain");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new CloseTransactionChain().toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(CloseTransactionChainReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
- // Will wait for the rest of the 3 seconds
- expectNoMsg();
- }
-
-
- };
- }};
- }
-}
package org.opendaylight.controller.cluster.datastore;
-import static org.mockito.Mockito.doReturn;
-
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
public class TransactionChainProxyTest {
- ActorContext actorContext = Mockito.mock(ActorContext.class);
- SchemaContext schemaContext = Mockito.mock(SchemaContext.class);
+ ActorContext actorContext = mock(ActorContext.class);
+ SchemaContext schemaContext = mock(SchemaContext.class);
@Before
public void setUp() {
}
- @Test(expected=UnsupportedOperationException.class)
+ @Test
public void testClose() throws Exception {
- new TransactionChainProxy(actorContext).close();
+ ActorContext context = mock(ActorContext.class);
+
+ new TransactionChainProxy(context).close();
+
+ verify(context, times(1)).broadcast(anyObject());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import com.google.common.collect.Iterables;
+import scala.concurrent.Future;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class InMemorySnapshotStore extends SnapshotStore {
+
+ Map<String, List<Snapshot>> snapshots = new HashMap<>();
+
+ @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+ SnapshotSelectionCriteria snapshotSelectionCriteria) {
+ List<Snapshot> snapshotList = snapshots.get(s);
+ if(snapshotList == null){
+ return Futures.successful(Option.<SelectedSnapshot>none());
+ }
+
+ Snapshot snapshot = Iterables.getLast(snapshotList);
+ SelectedSnapshot selectedSnapshot =
+ new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
+ return Futures.successful(Option.some(selectedSnapshot));
+ }
+
+ @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+ if(snapshotList == null){
+ snapshotList = new ArrayList<>();
+ snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
+ }
+ snapshotList.add(new Snapshot(snapshotMetadata, o));
+
+ return Futures.successful(null);
+ }
+
+ @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+ }
+
+ @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+ List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+ if(snapshotList == null){
+ return;
+ }
+
+ int deleteIndex = -1;
+
+ for(int i=0;i<snapshotList.size(); i++){
+ Snapshot snapshot = snapshotList.get(i);
+ if(snapshotMetadata.equals(snapshot.getMetadata())){
+ deleteIndex = i;
+ break;
+ }
+ }
+
+ if(deleteIndex != -1){
+ snapshotList.remove(deleteIndex);
+ }
+
+ }
+
+ @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+ throws Exception {
+ List<Snapshot> snapshotList = snapshots.get(s);
+
+ if(snapshotList == null){
+ return;
+ }
+
+ // TODO : This is a quick and dirty implementation. Do actual match later.
+ snapshotList.clear();
+ snapshots.remove(s);
+ }
+
+ private static class Snapshot {
+ private final SnapshotMetadata metadata;
+ private final Object data;
+
+ private Snapshot(SnapshotMetadata metadata, Object data) {
+ this.metadata = metadata;
+ this.data = data;
+ }
+
+ public SnapshotMetadata getMetadata() {
+ return metadata;
+ }
+
+ public Object getData() {
+ return data;
+ }
+ }
+}
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.Set;
public class MockConfiguration implements Configuration{
@Override public List<String> getMemberShardNames(String memberName) {
return Collections.EMPTY_LIST;
}
+
+ @Override public Set<String> getAllShardNames() {
+ return Collections.emptySet();
+ }
}
import akka.actor.Props;
import akka.actor.UntypedActor;
import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import akka.actor.Props;
import akka.actor.UntypedActor;
import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
akka {
+ persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
actor {
}
}
}
+
+in-memory-snapshot-store {
+ # Class name of the plugin.
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+ # Dispatcher for the plugin actor.
+ plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 100ms
}
*/
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import com.google.common.collect.ImmutableMap;
/**
*
//we will default to InMemoryDOMDataStore creation
configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
}
- ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
- .<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
- .put(LogicalDatastoreType.CONFIGURATION, configStore).build();
+
+ final Map<LogicalDatastoreType, DOMStore> datastores = new EnumMap<>(LogicalDatastoreType.class);
+ datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
+ datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
/*
* We use a single-threaded executor for commits with a bounded queue capacity. If the
DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
listenableFutureExecutor));
final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE);
commitStatsMXBean.registerMBean();
- final ThreadExecutorStatsMXBeanImpl commitExecutorStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(commitExecutor, "CommitExecutorStats",
+ final AbstractMXBean commitExecutorStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
JMX_BEAN_TYPE, null);
- commitExecutorStatsMXBean.registerMBean();
-
- final ThreadExecutorStatsMXBeanImpl commitFutureStatsMXBean =
- new ThreadExecutorStatsMXBeanImpl(listenableFutureExecutor,
+ final AbstractMXBean commitFutureStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
"CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
- commitFutureStatsMXBean.registerMBean();
newDataBroker.setCloseable(new AutoCloseable() {
@Override
public void close() {
commitStatsMXBean.unregisterMBean();
- commitExecutorStatsMXBean.unregisterMBean();
- commitFutureStatsMXBean.unregisterMBean();
+ if (commitExecutorStatsMXBean != null) {
+ commitExecutorStatsMXBean.unregisterMBean();
+ }
+ if (commitFutureStatsMXBean != null) {
+ commitFutureStatsMXBean.unregisterMBean();
+ }
}
});
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
-
-/**
-*
-*/
-public final class HashMapDataStoreModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModule
-{
-
- public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
- super(identifier, dependencyResolver);
- }
-
- public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, HashMapDataStoreModule oldModule, java.lang.AutoCloseable oldInstance) {
- super(identifier, dependencyResolver, oldModule, oldInstance);
- }
-
- @Override
- public void validate(){
- super.validate();
- // Add custom validation for module attributes here.
- }
-
- @Override
- public java.lang.AutoCloseable createInstance() {
- HashMapDataStore store = new HashMapDataStore();
- return store;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-/**
-*
-*/
-public class HashMapDataStoreModuleFactory extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModuleFactory
-{
-
-
-}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
* Composite DOM Transaction backed by {@link DOMStoreTransaction}.
*
abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTransaction> implements
AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- private final ImmutableMap<K, T> backingTxs;
+ private final Map<K, T> backingTxs;
private final Object identifier;
/**
* @param backingTxs
* Key,value map of backing transactions.
*/
- protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final ImmutableMap<K, T> backingTxs) {
+ protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map<K, T> backingTxs) {
this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null");
}
*/
protected final T getSubtransaction(final K key) {
Preconditions.checkNotNull(key, "key must not be null.");
- Preconditions.checkArgument(backingTxs.containsKey(key), "No subtransaction associated with %s", key);
- return backingTxs.get(key);
+
+ final T ret = backingTxs.get(key);
+ Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+ return ret;
}
/**
* Returns immutable Iterable of all subtransactions.
*
*/
- protected Iterable<T> getSubtransactions() {
+ protected Collection<T> getSubtransactions() {
return backingTxs.values();
}
protected void closeSubtransactions() {
/*
- * We share one exception for all failures, which are added
- * as supressedExceptions to it.
- *
+ * We share one exception for all failures, which are added
+ * as supressedExceptions to it.
*/
IllegalStateException failure = null;
for (T subtransaction : backingTxs.values()) {
subtransaction.close();
} catch (Exception e) {
// If we did not allocated failure we allocate it
- if(failure == null) {
- failure = new IllegalStateException("Uncaught exception occured during closing transaction.", e);
+ if (failure == null) {
+ failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
} else {
- // We update it with addotional exceptions, which occured during error.
+ // We update it with additional exceptions, which occurred during error.
failure.addSuppressed(e);
}
}
}
// If we have failure, we throw it at after all attempts to close.
- if(failure != null) {
+ if (failure != null) {
throw failure;
}
}
-}
\ No newline at end of file
+}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import java.util.EnumMap;
import java.util.Map;
import java.util.Map.Entry;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
/**
*
* Abstract composite transaction factory.
* @param <T>
* Type of {@link DOMStoreTransactionFactory} factory.
*/
-public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
-
- private final ImmutableMap<LogicalDatastoreType, T> storeTxFactories;
-
- private boolean closed;
+abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicIntegerFieldUpdater<AbstractDOMForwardedTransactionFactory> UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed");
+ private final Map<LogicalDatastoreType, T> storeTxFactories;
+ private volatile int closed = 0;
protected AbstractDOMForwardedTransactionFactory(final Map<LogicalDatastoreType, ? extends T> txFactories) {
- this.storeTxFactories = ImmutableMap.copyOf(txFactories);
+ this.storeTxFactories = new EnumMap<>(txFactories);
}
/**
*
* @return New composite read-only transaction.
*/
- public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newReadOnlyTransaction());
}
- return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), builder.build());
+ return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns);
}
-
-
/**
* Creates a new composite write-only transaction
*
* @return New composite write-only transaction associated with this
* factory.
*/
- public DOMDataWriteTransaction newWriteOnlyTransaction() {
+ public final DOMDataWriteTransaction newWriteOnlyTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
+ txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
}
- return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), builder.build(),
- this);
+ return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), txns, this);
}
/**
*
* @return New composite read-write transaction associated with this
* factory.
- *
*/
- public DOMDataReadWriteTransaction newReadWriteTransaction() {
+ public final DOMDataReadWriteTransaction newReadWriteTransaction() {
checkNotClosed();
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadWriteTransaction> builder = ImmutableMap.builder();
+
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
- builder.put(store.getKey(), store.getValue().newReadWriteTransaction());
+ txns.put(store.getKey(), store.getValue().newReadWriteTransaction());
}
- return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), builder.build(), this);
+ return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this);
}
/**
}
/**
- *
* Checks if instance is not closed.
*
* @throws IllegalStateException If instance of this class was closed.
*
*/
- @GuardedBy("this")
- protected synchronized void checkNotClosed() {
- Preconditions.checkState(!closed,"Transaction factory was closed. No further operations allowed.");
+ protected final void checkNotClosed() {
+ Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
}
@Override
- @GuardedBy("this")
- public synchronized void close() {
- closed = true;
+ public void close() {
+ final boolean success = UPDATER.compareAndSet(this, 0, 1);
+ Preconditions.checkState(success, "Transaction factory was already closed");
}
-
}
+
package org.opendaylight.controller.md.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkState;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.EnumMap;
+import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DOMStore> implements DOMDataBroker,
AutoCloseable {
private final AtomicLong chainNum = new AtomicLong();
private volatile AutoCloseable closeable;
- public DOMDataBrokerImpl(final ImmutableMap<LogicalDatastoreType, DOMStore> datastores,
+ public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
final ListeningExecutorService executor) {
super(datastores);
this.coordinator = new DOMDataCommitCoordinatorImpl(executor);
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
@Override
public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
- ImmutableMap.Builder<LogicalDatastoreType, DOMStoreTransactionChain> backingChainsBuilder = ImmutableMap
- .builder();
+ checkNotClosed();
+
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
for (Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
- backingChainsBuilder.put(entry.getKey(), entry.getValue().createTransactionChain());
+ backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
}
- long chainId = chainNum.getAndIncrement();
- ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = backingChainsBuilder.build();
+
+ final long chainId = chainNum.getAndIncrement();
LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener,
backingChains);
return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, coordinator, listener);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.GuardedBy;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
* NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which is backed
* by several {@link DOMStoreTransactionChain} differentiated by provided
implements DOMTransactionChain, DOMDataCommitErrorListener {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
+ private final AtomicLong txNum = new AtomicLong();
private final DOMDataCommitExecutor coordinator;
private final TransactionChainListener listener;
private final long chainId;
- private final AtomicLong txNum = new AtomicLong();
- @GuardedBy("this")
- private boolean failed = false;
+
+ private volatile boolean failed = false;
/**
*
* If any of arguments is null.
*/
public DOMDataBrokerTransactionChainImpl(final long chainId,
- final ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+ final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
final DOMDataCommitExecutor coordinator, final TransactionChainListener listener) {
super(chains);
this.chainId = chainId;
}
@Override
- public synchronized CheckedFuture<Void,TransactionCommitFailedException> submit(
+ public CheckedFuture<Void,TransactionCommitFailedException> submit(
final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotClosed();
+
return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
}
@Override
- public synchronized void close() {
+ public void close() {
super.close();
+
for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
subChain.close();
}
if (!failed) {
LOG.debug("Transaction chain {}Â successfully finished.", this);
+ // FIXME: this event should be emitted once all operations complete
listener.onTransactionChainSuccessful(this);
}
}
@Override
- public synchronized void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
failed = true;
LOG.debug("Transaction chain {}Â failed.", this, cause);
listener.onTransactionChainFailed(this, tx, cause);
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import java.util.List;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
/**
*
* Implementation of blocking three phase commit coordinator, which which
public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataCommitCoordinatorImpl.class);
-
- /**
- * Runs AND binary operation between all booleans in supplied iteration of booleans.
- *
- * This method will stop evaluating iterables if first found is false.
- */
- private static final Function<Iterable<Boolean>, Boolean> AND_FUNCTION = new Function<Iterable<Boolean>, Boolean>() {
-
- @Override
- public Boolean apply(final Iterable<Boolean> input) {
- for(boolean value : input) {
- if(!value) {
- return Boolean.FALSE;
- }
- }
- return Boolean.TRUE;
- }
- };
-
- private final ListeningExecutorService executor;
-
private final DurationStatsTracker commitStatsTracker = new DurationStatsTracker();
+ private final ListeningExecutorService executor;
/**
*
}
/**
- *
* Implementation of blocking three-phase commit-coordination tasks without
- * support of cancelation.
- *
+ * support of cancellation.
*/
- private static class CommitCoordinationTask implements Callable<Void> {
-
+ private static final class CommitCoordinationTask implements Callable<Void> {
+ private static final AtomicReferenceFieldUpdater<CommitCoordinationTask, CommitPhase> PHASE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase");
private final DOMDataWriteTransaction tx;
private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
private final DurationStatsTracker commitStatTracker;
-
- @GuardedBy("this")
- private CommitPhase currentPhase;
+ private final int cohortSize;
+ private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
final DurationStatsTracker commitStatTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
- this.currentPhase = CommitPhase.SUBMITTED;
this.commitStatTracker = commitStatTracker;
+ this.cohortSize = Iterables.size(cohorts);
}
@Override
public Void call() throws TransactionCommitFailedException {
+ final long startTime = commitStatTracker != null ? System.nanoTime() : 0;
- long startTime = System.nanoTime();
try {
canCommitBlocking();
preCommitBlocking();
commitBlocking();
return null;
} catch (TransactionCommitFailedException e) {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, e);
- abortBlocking(e);
+ final CommitPhase phase = currentPhase;
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e);
+ abortBlocking(e, phase);
throw e;
} finally {
- if(commitStatTracker != null) {
+ if (commitStatTracker != null) {
commitStatTracker.addDuration(System.nanoTime() - startTime);
}
}
*
*/
private void canCommitBlocking() throws TransactionCommitFailedException {
- final Boolean canCommitResult = canCommitAll().checkedGet();
- if (!canCommitResult) {
- throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+ for (ListenableFuture<?> canCommit : canCommitAll()) {
+ try {
+ final Boolean result = (Boolean)canCommit.get();
+ if (result == null || !result) {
+ throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ throw TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
+ }
}
}
/**
*
- * Invokes preCommit on underlying cohorts and blocks till
- * all results are returned.
+ * Invokes canCommit on underlying cohorts and returns composite future
+ * which will contains {@link Boolean#TRUE} only and only if
+ * all cohorts returned true.
*
- * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
- * state is not CAN_COMMIT
- * throws IllegalStateException.
+ * Valid state transition is from SUBMITTED to CAN_COMMIT,
+ * if currentPhase is not SUBMITTED throws IllegalStateException.
*
- * @throws TransactionCommitFailedException
- * If one of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private void preCommitBlocking() throws TransactionCommitFailedException {
- preCommitAll().checkedGet();
+ private ListenableFuture<?>[] canCommitAll() {
+ changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
+ for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
+ ops[i++] = cohort.canCommit();
+ }
+ return ops;
}
/**
*
- * Invokes commit on underlying cohorts and blocks till
+ * Invokes preCommit on underlying cohorts and blocks till
* all results are returned.
*
- * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
- * IllegalStateException.
+ * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
+ * state is not CAN_COMMIT
+ * throws IllegalStateException.
*
* @throws TransactionCommitFailedException
* If one of cohorts failed preCommit
*
*/
- private void commitBlocking() throws TransactionCommitFailedException {
- commitAll().checkedGet();
- }
-
- /**
- * Aborts transaction.
- *
- * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
- * cohorts, blocks
- * for all results. If any of the abort failed throws
- * IllegalStateException,
- * which will contains originalCause as suppressed Exception.
- *
- * If aborts we're successful throws supplied exception
- *
- * @param originalCause
- * Exception which should be used to fail transaction for
- * consumers of transaction
- * future and listeners of transaction failure.
- * @throws TransactionCommitFailedException
- * on invocation of this method.
- * originalCa
- * @throws IllegalStateException
- * if abort failed.
- */
- private void abortBlocking(final TransactionCommitFailedException originalCause)
- throws TransactionCommitFailedException {
- LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, originalCause);
- Exception cause = originalCause;
+ private void preCommitBlocking() throws TransactionCommitFailedException {
+ final ListenableFuture<?>[] preCommitFutures = preCommitAll();
try {
- abortAsyncAll().get();
+ for(ListenableFuture<?> future : preCommitFutures) {
+ future.get();
+ }
} catch (InterruptedException | ExecutionException e) {
- LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
- cause = new IllegalStateException("Abort failed.", e);
- cause.addSuppressed(e);
+ throw TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER.apply(e);
}
- Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
}
/**
* state is not CAN_COMMIT
* throws IllegalStateException.
*
- * @return Future which will complete once all cohorts completed
- * preCommit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private CheckedFuture<Void, TransactionCommitFailedException> preCommitAll() {
+ private ListenableFuture<?>[] preCommitAll() {
changeStateFrom(CommitPhase.CAN_COMMIT, CommitPhase.PRE_COMMIT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.preCommit());
+ ops[i++] = cohort.preCommit();
+ }
+ return ops;
+ }
+
+ /**
+ *
+ * Invokes commit on underlying cohorts and blocks till
+ * all results are returned.
+ *
+ * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
+ * IllegalStateException.
+ *
+ * @throws TransactionCommitFailedException
+ * If one of cohorts failed preCommit
+ *
+ */
+ private void commitBlocking() throws TransactionCommitFailedException {
+ final ListenableFuture<?>[] commitFutures = commitAll();
+ try {
+ for(ListenableFuture<?> future : commitFutures) {
+ future.get();
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ throw TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e);
}
- /*
- * We are returing all futures as list, not only succeeded ones in
- * order to fail composite future if any of them failed.
- * See Futures.allAsList for this description.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
- return MappingCheckedFuture.create(compositeResult,
- TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER);
}
/**
* Valid state transition is from PRE_COMMIT to COMMIT, if not throws
* IllegalStateException
*
- * @return Future which will complete once all cohorts completed
- * commit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * @return List of all cohorts futures from can commit phase.
*
*/
- private CheckedFuture<Void, TransactionCommitFailedException> commitAll() {
+ private ListenableFuture<?>[] commitAll() {
changeStateFrom(CommitPhase.PRE_COMMIT, CommitPhase.COMMIT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.commit());
+ ops[i++] = cohort.commit();
}
- /*
- * We are returing all futures as list, not only succeeded ones in
- * order to fail composite future if any of them failed.
- * See Futures.allAsList for this description.
- */
- @SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
- return MappingCheckedFuture.create(compositeResult,
- TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
+ return ops;
}
/**
+ * Aborts transaction.
*
- * Invokes canCommit on underlying cohorts and returns composite future
- * which will contains {@link Boolean#TRUE} only and only if
- * all cohorts returned true.
- *
- * Valid state transition is from SUBMITTED to CAN_COMMIT,
- * if currentPhase is not SUBMITTED throws IllegalStateException.
+ * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
+ * cohorts, blocks
+ * for all results. If any of the abort failed throws
+ * IllegalStateException,
+ * which will contains originalCause as suppressed Exception.
*
- * @return Future which will complete once all cohorts completed
- * preCommit.
- * Future throws TransactionCommitFailedException
- * If any of cohorts failed preCommit
+ * If aborts we're successful throws supplied exception
*
+ * @param originalCause
+ * Exception which should be used to fail transaction for
+ * consumers of transaction
+ * future and listeners of transaction failure.
+ * @param phase phase in which the problem ensued
+ * @throws TransactionCommitFailedException
+ * on invocation of this method.
+ * originalCa
+ * @throws IllegalStateException
+ * if abort failed.
*/
- private CheckedFuture<Boolean, TransactionCommitFailedException> canCommitAll() {
- changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
- Builder<ListenableFuture<Boolean>> canCommitOperations = ImmutableList.builder();
- for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- canCommitOperations.add(cohort.canCommit());
+ private void abortBlocking(final TransactionCommitFailedException originalCause, final CommitPhase phase)
+ throws TransactionCommitFailedException {
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, originalCause);
+ Exception cause = originalCause;
+ try {
+ abortAsyncAll(phase).get();
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
+ cause = new IllegalStateException("Abort failed.", e);
+ cause.addSuppressed(e);
}
- ListenableFuture<List<Boolean>> allCanCommits = Futures.allAsList(canCommitOperations.build());
- ListenableFuture<Boolean> allSuccessFuture = Futures.transform(allCanCommits, AND_FUNCTION);
- return MappingCheckedFuture.create(allSuccessFuture,
- TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER);
-
+ Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
}
/**
- *
* Invokes abort on underlying cohorts and returns future which
- * completes
- * once all abort on cohorts are completed.
+ * completes once all abort on cohorts are completed.
*
+ * @param phase phase in which the problem ensued
* @return Future which will complete once all cohorts completed
* abort.
- *
*/
- private ListenableFuture<Void> abortAsyncAll() {
- changeStateFrom(currentPhase, CommitPhase.ABORT);
- Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+ private ListenableFuture<Void> abortAsyncAll(final CommitPhase phase) {
+ changeStateFrom(phase, CommitPhase.ABORT);
+
+ final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+ int i = 0;
for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
- ops.add(cohort.abort());
+ ops[i++] = cohort.abort();
}
+
/*
- * We are returing all futures as list, not only succeeded ones in
+ * We are returning all futures as list, not only succeeded ones in
* order to fail composite future if any of them failed.
* See Futures.allAsList for this description.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
- ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
+ ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops);
return compositeResult;
}
* @throws IllegalStateException
* If currentState of task does not match expected state
*/
- private synchronized void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
- Preconditions.checkState(currentPhase.equals(currentExpected),
- "Invalid state transition: Tx: %s current state: %s new state: %s", tx.getIdentifier(),
- currentPhase, newState);
- LOG.debug("Transaction {}: Phase {} Started ", tx.getIdentifier(), newState);
- currentPhase = newState;
- };
+ private void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
+ final boolean success = PHASE_UPDATER.compareAndSet(this, currentExpected, newState);
+ Preconditions.checkState(success, "Invalid state transition: Tx: %s expected: %s current: %s target: %s",
+ tx.getIdentifier(), currentExpected, currentPhase, newState);
+ LOG.debug("Transaction {}: Phase {} Started", tx.getIdentifier(), newState);
+ };
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
- *
* Read Only Transaction, which is composed of several
* {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by
* {@link LogicalDatastoreType} type parameter in
DOMDataReadOnlyTransaction {
protected DOMForwardedReadOnlyTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
+ final Map<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
super(identifier, backingTxs);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
public void close() {
closeSubtransactions();
}
-
}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
/**
*
* Read-Write Transaction, which is composed of several
* transactions.
*
*/
-
-class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements
- DOMDataReadWriteTransaction {
-
+final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
protected DOMForwardedReadWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
+ final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs, commitImpl);
}
return getSubtransaction(store).read(path);
}
- @Override public CheckedFuture<Boolean, ReadFailedException> exists(
- LogicalDatastoreType store,
- YangInstanceIdentifier path) {
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(
+ final LogicalDatastoreType store,
+ final YangInstanceIdentifier path) {
return getSubtransaction(store).exists(path);
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import static com.google.common.base.Preconditions.checkState;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
- *
- *
* Read-Write Transaction, which is composed of several
- * {@link DOMStoreWriteTransaction} transactions. Subtransaction is selected by
+ * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by
* {@link LogicalDatastoreType} type parameter in:
*
* <ul>
* invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} for underlying
* transactions.
*
- * @param <T>
- * Subtype of {@link DOMStoreWriteTransaction} which is used as
+ * @param <T> Subtype of {@link DOMStoreWriteTransaction} which is used as
* subtransaction.
*/
class DOMForwardedWriteTransaction<T extends DOMStoreWriteTransaction> extends
AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, DOMDataCommitImplementation> IMPL_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, DOMDataCommitImplementation.class, "commitImpl");
+ @SuppressWarnings("rawtypes")
+ private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, Future> FUTURE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class);
+ private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
/**
- * Implementation of real commit.
- *
- * Transaction can not be commited if commitImpl is null,
- * so this seting this property to null is also used to
- * prevent write to
- * already commited / canceled transaction {@link #checkNotCanceled()
- *
- *
+ * Implementation of real commit. It also acts as an indication that
+ * the transaction is running -- which we flip atomically using
+ * {@link #IMPL_UPDATER}.
*/
- @GuardedBy("this")
private volatile DOMDataCommitImplementation commitImpl;
/**
+ * Future task of transaction commit. It starts off as null, but is
+ * set appropriately on {@link #submit()} and {@link #cancel()} via
+ * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
*
- * Future task of transaction commit.
- *
- * This value is initially null, and is once updated if transaction
- * is commited {@link #commit()}.
- * If this future exists, transaction MUST not be commited again
- * and all modifications should fail. See {@link #checkNotCommited()}.
- *
+ * Lazy set is safe for use because it is only referenced to in the
+ * {@link #cancel()} slow path, where we will busy-wait for it. The
+ * fast path gets the benefit of a store-store barrier instead of the
+ * usual store-load barrier.
*/
- @GuardedBy("this")
- private volatile CheckedFuture<Void, TransactionCommitFailedException> commitFuture;
+ private volatile Future<?> commitFuture;
protected DOMForwardedWriteTransaction(final Object identifier,
- final ImmutableMap<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
+ final Map<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
super(identifier, backingTxs);
this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
}
@Override
public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).write(path, data);
}
@Override
public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).delete(path);
}
@Override
public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
- checkNotReady();
+ checkRunning(commitImpl);
getSubtransaction(store).merge(path, data);
}
@Override
- public synchronized boolean cancel() {
- // Transaction is already canceled, we are safe to return true
- final boolean cancelationResult;
- if (commitImpl == null && commitFuture != null) {
- // Transaction is submitted, we try to cancel future.
- cancelationResult = commitFuture.cancel(false);
- } else if(commitImpl == null) {
+ public boolean cancel() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ if (impl != null) {
+ LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+ FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
return true;
- } else {
- cancelationResult = true;
- commitImpl = null;
}
- return cancelationResult;
+ // The transaction is in process of being submitted or cancelled. Busy-wait
+ // for the corresponding future.
+ Future<?> future;
+ do {
+ future = commitFuture;
+ } while (future == null);
+
+ return future.cancel(false);
}
@Override
- public synchronized ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
}
@Override
- public CheckedFuture<Void,TransactionCommitFailedException> submit() {
- checkNotReady();
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+ checkRunning(impl);
- ImmutableList.Builder<DOMStoreThreePhaseCommitCohort> cohortsBuilder = ImmutableList.builder();
- for (DOMStoreWriteTransaction subTx : getSubtransactions()) {
- cohortsBuilder.add(subTx.ready());
- }
- ImmutableList<DOMStoreThreePhaseCommitCohort> cohorts = cohortsBuilder.build();
- commitFuture = commitImpl.submit(this, cohorts);
-
- /*
- *We remove reference to Commit Implementation in order
- *to prevent memory leak
- */
- commitImpl = null;
- return commitFuture;
- }
+ final Collection<T> txns = getSubtransactions();
+ final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
- private void checkNotReady() {
- checkNotCommited();
- checkNotCanceled();
- }
+ // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+ for (DOMStoreWriteTransaction txn : txns) {
+ cohorts.add(txn.ready());
+ }
- private void checkNotCanceled() {
- Preconditions.checkState(commitImpl != null, "Transaction was canceled.");
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+ FUTURE_UPDATER.lazySet(this, ret);
+ return ret;
}
- private void checkNotCommited() {
- checkState(commitFuture == null, "Transaction was already submited.");
+ private void checkRunning(final DOMDataCommitImplementation impl) {
+ Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
}
-}
\ No newline at end of file
+}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.sal.core.api.data.DataStore;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class HashMapDataStore implements DataStore, AutoCloseable {
- private static final Logger LOG = LoggerFactory
- .getLogger(HashMapDataStore.class);
-
- private final Map<YangInstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
- private final Map<YangInstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-
- @Override
- public boolean containsConfigurationPath(final YangInstanceIdentifier path) {
- return configuration.containsKey(path);
- }
-
- @Override
- public boolean containsOperationalPath(final YangInstanceIdentifier path) {
- return operational.containsKey(path);
- }
-
- @Override
- public Iterable<YangInstanceIdentifier> getStoredConfigurationPaths() {
- return configuration.keySet();
- }
-
- @Override
- public Iterable<YangInstanceIdentifier> getStoredOperationalPaths() {
- return operational.keySet();
- }
-
- @Override
- public CompositeNode readConfigurationData(final YangInstanceIdentifier path) {
- LOG.trace("Reading configuration path {}", path);
- return configuration.get(path);
- }
-
- @Override
- public CompositeNode readOperationalData(YangInstanceIdentifier path) {
- LOG.trace("Reading operational path {}", path);
- return operational.get(path);
- }
-
- @Override
- public DataCommitHandler.DataCommitTransaction<YangInstanceIdentifier, CompositeNode> requestCommit(
- final DataModification<YangInstanceIdentifier, CompositeNode> modification) {
- return new HashMapDataStoreTransaction(modification, this);
- }
-
- public RpcResult<Void> rollback(HashMapDataStoreTransaction transaction) {
- return RpcResultBuilder.<Void> success().build();
- }
-
- public RpcResult<Void> finish(HashMapDataStoreTransaction transaction) {
- final DataModification<YangInstanceIdentifier, CompositeNode> modification = transaction
- .getModification();
- for (final YangInstanceIdentifier removal : modification
- .getRemovedConfigurationData()) {
- LOG.trace("Removing configuration path {}", removal);
- remove(configuration, removal);
- }
- for (final YangInstanceIdentifier removal : modification
- .getRemovedOperationalData()) {
- LOG.trace("Removing operational path {}", removal);
- remove(operational, removal);
- }
- if (LOG.isTraceEnabled()) {
- for (final YangInstanceIdentifier a : modification
- .getUpdatedConfigurationData().keySet()) {
- LOG.trace("Adding configuration path {}", a);
- }
- for (final YangInstanceIdentifier a : modification
- .getUpdatedOperationalData().keySet()) {
- LOG.trace("Adding operational path {}", a);
- }
- }
- configuration.putAll(modification.getUpdatedConfigurationData());
- operational.putAll(modification.getUpdatedOperationalData());
-
- return RpcResultBuilder.<Void> success().build();
- }
-
- public void remove(final Map<YangInstanceIdentifier, CompositeNode> map,
- final YangInstanceIdentifier identifier) {
- Set<YangInstanceIdentifier> affected = new HashSet<YangInstanceIdentifier>();
- for (final YangInstanceIdentifier path : map.keySet()) {
- if (identifier.contains(path)) {
- affected.add(path);
- }
- }
- for (final YangInstanceIdentifier pathToRemove : affected) {
- LOG.trace("Removed path {}", pathToRemove);
- map.remove(pathToRemove);
- }
- }
-
- @Override
- public void close() {
- // NOOP
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class HashMapDataStoreTransaction implements
- DataCommitTransaction<YangInstanceIdentifier, CompositeNode> {
- private final DataModification<YangInstanceIdentifier, CompositeNode> modification;
- private final HashMapDataStore datastore;
-
- HashMapDataStoreTransaction(
- final DataModification<YangInstanceIdentifier, CompositeNode> modify,
- final HashMapDataStore store) {
- modification = modify;
- datastore = store;
- }
-
- @Override
- public RpcResult<Void> finish() throws IllegalStateException {
- return datastore.finish(this);
- }
-
- @Override
- public DataModification<YangInstanceIdentifier, CompositeNode> getModification() {
- return this.modification;
- }
-
- @Override
- public RpcResult<Void> rollback() throws IllegalStateException {
- return datastore.rollback(this);
- }
-}
\ No newline at end of file
config:provided-service sal:dom-async-data-broker;
}
- identity hash-map-data-store {
- base config:module-type;
- config:provided-service sal:dom-data-store;
- config:java-name-prefix HashMapDataStore;
- }
-
identity schema-service-singleton {
base config:module-type;
config:provided-service sal:schema-service;
}
}
- augment "/config:modules/config:module/config:state" {
- case hash-map-data-store {
- when "/config:modules/config:module/config:type = 'hash-map-data-store'";
- }
- }
-
augment "/config:modules/config:module/config:state" {
case schema-service-singleton {
when "/config:modules/config:module/config:type = 'schema-service-singleton'";
}
}
}
-}
\ No newline at end of file
+}
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
public class DOMBrokerTest {
private SchemaContext schemaContext;
commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
domBroker = new DOMDataBrokerImpl(stores, executor);
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
Futures.addCallback( writeTx.submit(), new FutureCallback<Void>() {
@Override
- public void onSuccess( Void result ) {
+ public void onSuccess( final Void result ) {
commitCompletedLatch.countDown();
}
@Override
- public void onFailure( Throwable t ) {
+ public void onFailure( final Throwable t ) {
caughtCommitEx.set( t );
commitCompletedLatch.countDown();
}
TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
private final CountDownLatch latch = new CountDownLatch( 1 );
@Override
- public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+ public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
this.change = change;
latch.countDown();
}
ExecutorService delegate;
- public CommitExecutorService( ExecutorService delegate ) {
+ public CommitExecutorService( final ExecutorService delegate ) {
this.delegate = delegate;
}
package org.opendaylight.controller.md.sal.dom.broker.impl.jmx;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import org.junit.Test;
import org.opendaylight.yangtools.util.DurationStatsTracker;
commitStatsTracker.addDuration(100);
- String prefix = "100.0 ns";
assertEquals("getTotalCommits", 1L, bean.getTotalCommits());
- assertEquals("getLongestCommitTime starts with \"" + prefix + "\"", true,
- bean.getLongestCommitTime().startsWith("100.0 ns"));
- assertEquals("getShortestCommitTime starts with \"" + prefix + "\"", true,
- bean.getShortestCommitTime().startsWith(prefix));
- assertEquals("getAverageCommitTime starts with \"" + prefix + "\"", true,
- bean.getAverageCommitTime().startsWith(prefix));
+ assertNotNull(bean.getLongestCommitTime());
+ assertNotNull(bean.getShortestCommitTime());
+ assertNotNull(bean.getAverageCommitTime());
}
}
return this.bluePrint;
}
- public List<Object> collectModuleRoots(XSQLBluePrintNode table) {
+ public List<Object> collectModuleRoots(XSQLBluePrintNode table,LogicalDatastoreType type) {
if (table.getParent().isModule()) {
try {
List<Object> result = new LinkedList<Object>();
.toInstance();
DOMDataReadTransaction t = this.domDataBroker
.newReadOnlyTransaction();
- Object node = t.read(LogicalDatastoreType.OPERATIONAL,
+ Object node = t.read(type,
instanceIdentifier).get();
+
node = XSQLODLUtils.get(node, "reference");
if (node == null) {
return result;
XSQLAdapter.log(err);
}
} else {
- return collectModuleRoots(table.getParent());
+ return collectModuleRoots(table.getParent(),type);
}
return null;
}
public void execute(JDBCResultSet rs) {
List<XSQLBluePrintNode> tables = rs.getTables();
- List<Object> roots = collectModuleRoots(tables.get(0));
+ List<Object> roots = collectModuleRoots(tables.get(0),LogicalDatastoreType.OPERATIONAL);
+ roots.addAll(collectModuleRoots(tables.get(0),LogicalDatastoreType.CONFIGURATION));
+ if(roots.isEmpty()){
+ rs.setFinished(true);
+ }
XSQLBluePrintNode main = rs.getMainTable();
List<NETask> tasks = new LinkedList<XSQLAdapter.NETask>();
out.print(prompt);
char c = 0;
byte data[] = new byte[1];
- while (c != '\n') {
+ while (!socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && c != '\n') {
try {
in.read(data);
c = (char) data[0];
inputString.append(c);
} catch (Exception err) {
err.printStackTrace(out);
+ stopped = true;
+ break;
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
* to implement {@link DOMStore} contract.
*
*/
-public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
- TransactionReadyPrototype,AutoCloseable {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
private final ListenerTree listenerTree = ListenerTree.create();
private final AtomicLong txCounter = new AtomicLong(0);
- private final ListeningExecutorService listeningExecutor;
private final QueuedNotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> dataChangeListenerNotificationManager;
private final ExecutorService dataChangeListenerExecutor;
-
- private final ExecutorService domStoreExecutor;
+ private final ListeningExecutorService commitExecutor;
private final boolean debugTransactions;
private final String name;
private volatile AutoCloseable closeable;
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor) {
- this(name, domStoreExecutor, dataChangeListenerExecutor,
+ this(name, commitExecutor, dataChangeListenerExecutor,
InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false);
}
- public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
final ExecutorService dataChangeListenerExecutor, final int maxDataChangeListenerQueueSize,
final boolean debugTransactions) {
this.name = Preconditions.checkNotNull(name);
- this.domStoreExecutor = Preconditions.checkNotNull(domStoreExecutor);
- this.listeningExecutor = MoreExecutors.listeningDecorator(this.domStoreExecutor);
+ this.commitExecutor = Preconditions.checkNotNull(commitExecutor);
this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
this.debugTransactions = debugTransactions;
"DataChangeListenerQueueMgr");
}
- public void setCloseable(AutoCloseable closeable) {
+ public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
}
public ExecutorService getDomStoreExecutor() {
- return domStoreExecutor;
+ return commitExecutor;
}
@Override
@Override
public void close() {
- ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+ ExecutorServiceUtil.tryGracefulShutdown(commitExecutor, 30, TimeUnit.SECONDS);
ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
if(closeable != null) {
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction writeTx) {
- LOG.debug("Tx: {} is submitted. Modifications: {}", writeTx.getIdentifier(), writeTx.getMutatedView());
- return new ThreePhaseCommitImpl(writeTx);
+ protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ LOG.debug("Tx: {} is closed.", tx.getIdentifier());
+ }
+
+ @Override
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
+ return new ThreePhaseCommitImpl(tx, tree);
}
private Object nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private class DOMStoreTransactionChainImpl implements DOMStoreTransactionChain, TransactionReadyPrototype {
-
+ private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
+ @GuardedBy("this")
+ private SnapshotBackedWriteTransaction allocatedTransaction;
+ @GuardedBy("this")
+ private DataTreeSnapshot readySnapshot;
@GuardedBy("this")
- private SnapshotBackedWriteTransaction latestOutstandingTx;
-
private boolean chainFailed = false;
+ @GuardedBy("this")
private void checkFailed() {
Preconditions.checkState(!chainFailed, "Transaction chain is failed.");
}
- @Override
- public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
- final DataTreeSnapshot snapshot;
+ @GuardedBy("this")
+ private DataTreeSnapshot getSnapshot() {
checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
+
+ if (allocatedTransaction != null) {
+ Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier());
+ return readySnapshot;
} else {
- snapshot = dataTree.takeSnapshot();
+ return dataTree.takeSnapshot();
}
+ }
+
+ @GuardedBy("this")
+ private <T extends SnapshotBackedWriteTransaction> T recordTransaction(final T transaction) {
+ allocatedTransaction = transaction;
+ readySnapshot = null;
+ return transaction;
+ }
+
+ @Override
+ public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
+ final DataTreeSnapshot snapshot = getSnapshot();
return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot);
}
@Override
public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
- }
- final SnapshotBackedReadWriteTransaction ret = new SnapshotBackedReadWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
}
@Override
public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() {
- final DataTreeSnapshot snapshot;
- checkFailed();
- if (latestOutstandingTx != null) {
- checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
- snapshot = latestOutstandingTx.getMutatedView();
- } else {
- snapshot = dataTree.takeSnapshot();
+ final DataTreeSnapshot snapshot = getSnapshot();
+ return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(),
+ getDebugTransactions(), snapshot, this));
+ }
+
+ @Override
+ protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ if (tx.equals(allocatedTransaction)) {
+ Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot);
+ allocatedTransaction = null;
}
- final SnapshotBackedWriteTransaction ret = new SnapshotBackedWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this);
- latestOutstandingTx = ret;
- return ret;
}
@Override
- public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction tx) {
- DOMStoreThreePhaseCommitCohort storeCohort = InMemoryDOMDataStore.this.ready(tx);
- return new ChainedTransactionCommitImpl(tx, storeCohort, this);
+ protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction);
+ if (readySnapshot != null) {
+ // The snapshot should have been cleared
+ LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree);
+ }
+
+ final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree);
+ readySnapshot = tree;
+ return new ChainedTransactionCommitImpl(tx, cohort, this);
}
@Override
public void close() {
-
// FIXME: this call doesn't look right here - listeningExecutor is shared and owned
// by the outer class.
//listeningExecutor.shutdownNow();
protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
final Throwable t) {
chainFailed = true;
-
}
public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If committed transaction is latestOutstandingTx we clear
- // latestOutstandingTx
- // field in order to base new transactions on Datastore Data Tree
- // directly.
- if (transaction.equals(latestOutstandingTx)) {
- latestOutstandingTx = null;
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ if (transaction.equals(allocatedTransaction)) {
+ if (readySnapshot == null) {
+ LOG.warn("Transaction {} committed while no ready snapshot present", transaction);
+ }
+
+ allocatedTransaction = null;
+ readySnapshot = null;
}
}
-
}
private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DOMStoreThreePhaseCommitCohort delegate;
-
private final DOMStoreTransactionChainImpl txChain;
protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- super();
this.transaction = transaction;
this.delegate = delegate;
this.txChain = txChain;
public void onSuccess(final Void result) {
txChain.onTransactionCommited(transaction);
}
-
});
return commitFuture;
}
-
}
private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
private final SnapshotBackedWriteTransaction transaction;
private final DataTreeModification modification;
private ResolveDataChangeEventsTask listenerResolver;
private DataTreeCandidate candidate;
- public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction) {
+ public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
this.transaction = writeTransaction;
- this.modification = transaction.getMutatedView();
+ this.modification = modification;
}
@Override
public ListenableFuture<Boolean> canCommit() {
- return listeningExecutor.submit(new Callable<Boolean>() {
+ return commitExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws TransactionCommitFailedException {
try {
@Override
public ListenableFuture<Void> preCommit() {
- return listeningExecutor.submit(new Callable<Void>() {
+ return commitExecutor.submit(new Callable<Void>() {
@Override
public Void call() {
candidate = dataTree.prepare(modification);
* The commit has to occur atomically with regard to listener
* registrations.
*/
- synchronized (this) {
+ synchronized (InMemoryDOMDataStore.this) {
dataTree.commit(candidate);
listenerResolver.resolve(dataChangeListenerNotificationManager);
}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
import java.util.concurrent.ExecutorService;
import javax.annotation.Nullable;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
@Nullable final InMemoryDOMDataStoreConfigProperties properties) {
InMemoryDOMDataStoreConfigProperties actualProperties = properties;
- if(actualProperties == null) {
+ if (actualProperties == null) {
actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault();
}
// task execution time to get higher throughput as DataChangeListeners typically provide
// much of the business logic for a data model. If the executor queue size limit is reached,
// subsequent submitted notifications will block the calling thread.
-
int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize();
int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize();
ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
- ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- actualProperties.getMaxDataStoreExecutorQueueSize(), "DOMStore-" + name );
-
- InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
- domStoreExecutor, dataChangeListenerExecutor,
+ final ListeningExecutorService commitExecutor = MoreExecutors.sameThreadExecutor();
+ final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+ commitExecutor, dataChangeListenerExecutor,
actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions);
- if(schemaService != null) {
+ if (schemaService != null) {
schemaService.registerSchemaContextListener(dataStore);
}
import com.google.common.base.Preconditions;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
-
import java.util.Collection;
import java.util.Map.Entry;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
Preconditions.checkArgument(node.getDataAfter().isPresent(),
"Modification at {} has type {} but no after-data", state.getPath(), node.getModificationType());
if (!node.getDataBefore().isPresent()) {
- resolveCreateEvent(state, node.getDataAfter().get());
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ final NormalizedNode<PathArgument, ?> afterNode = (NormalizedNode)node.getDataAfter().get();
+ resolveSameEventRecursivelly(state, afterNode, DOMImmutableDataChangeEvent.getCreateEventFactory());
return true;
}
case DELETE:
Preconditions.checkArgument(node.getDataBefore().isPresent(),
"Modification at {} has type {} but no before-data", state.getPath(), node.getModificationType());
- resolveDeleteEvent(state, node.getDataBefore().get());
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ final NormalizedNode<PathArgument, ?> beforeNode = (NormalizedNode)node.getDataBefore().get();
+ resolveSameEventRecursivelly(state, beforeNode, DOMImmutableDataChangeEvent.getRemoveEventFactory());
return true;
case UNMODIFIED:
return false;
return true;
}
- /**
- * Resolves create events deep down the interest listener tree.
- *
- * @param path
- * @param listeners
- * @param afterState
- * @return
- */
- private void resolveCreateEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> afterState) {
- @SuppressWarnings({ "unchecked", "rawtypes" })
- final NormalizedNode<PathArgument, ?> node = (NormalizedNode) afterState;
- resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getCreateEventFactory());
- }
-
- private void resolveDeleteEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> beforeState) {
- @SuppressWarnings({ "unchecked", "rawtypes" })
- final NormalizedNode<PathArgument, ?> node = (NormalizedNode) beforeState;
- resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getRemoveEventFactory());
- }
-
private void resolveSameEventRecursivelly(final ResolveDataChangeState state,
final NormalizedNode<PathArgument, ?> node, final SimpleEventFactory eventFactory) {
if (!state.needsProcessing()) {
Preconditions.checkArgument(modification.getDataBefore().isPresent(), "Subtree change with before-data not present at path %s", state.getPath());
Preconditions.checkArgument(modification.getDataAfter().isPresent(), "Subtree change with after-data not present at path %s", state.getPath());
+ if (!state.needsProcessing()) {
+ LOG.trace("Not processing modified subtree {}", state.getPath());
+ return true;
+ }
+
DataChangeScope scope = null;
for (DataTreeCandidateNode childMod : modification.getChildNodes()) {
final ResolveDataChangeState childState = state.child(childMod.getIdentifier());
/**
* Inherited from immediate parent
*/
- private final Iterable<Builder> inheritedOne;
+ private final Collection<Builder> inheritedOne;
private final YangInstanceIdentifier nodeId;
private final Collection<Node> nodes;
- private final Map<DataChangeListenerRegistration<?>, Builder> subBuilders = new HashMap<>();
- private final Map<DataChangeListenerRegistration<?>, Builder> oneBuilders = new HashMap<>();
- private final Map<DataChangeListenerRegistration<?>, Builder> baseBuilders = new HashMap<>();
+ private final Map<DataChangeListenerRegistration<?>, Builder> subBuilders;
+ private final Map<DataChangeListenerRegistration<?>, Builder> oneBuilders;
+ private final Map<DataChangeListenerRegistration<?>, Builder> baseBuilders;
private ResolveDataChangeState(final YangInstanceIdentifier nodeId,
- final Iterable<Builder> inheritedSub, final Iterable<Builder> inheritedOne,
+ final Iterable<Builder> inheritedSub, final Collection<Builder> inheritedOne,
final Collection<Node> nodes) {
this.nodeId = Preconditions.checkNotNull(nodeId);
this.nodes = Preconditions.checkNotNull(nodes);
/*
* Collect the nodes which need to be propagated from us to the child.
*/
+ final Map<DataChangeListenerRegistration<?>, Builder> sub = new HashMap<>();
+ final Map<DataChangeListenerRegistration<?>, Builder> one = new HashMap<>();
+ final Map<DataChangeListenerRegistration<?>, Builder> base = new HashMap<>();
for (Node n : nodes) {
for (DataChangeListenerRegistration<?> l : n.getListeners()) {
final Builder b = DOMImmutableDataChangeEvent.builder(DataChangeScope.BASE);
switch (l.getScope()) {
case BASE:
- baseBuilders.put(l, b);
+ base.put(l, b);
break;
case ONE:
- oneBuilders.put(l, b);
+ one.put(l, b);
break;
case SUBTREE:
- subBuilders.put(l, b);
+ sub.put(l, b);
break;
}
}
}
+
+ baseBuilders = maybeEmpty(base);
+ oneBuilders = maybeEmpty(one);
+ subBuilders = maybeEmpty(sub);
+ }
+
+ private static <K, V> Map<K, V> maybeEmpty(final Map<K, V> map) {
+ if (map.isEmpty()) {
+ return Collections.emptyMap();
+ }
+ return map;
}
/**
* @return State handle
*/
public ResolveDataChangeState child(final PathArgument childId) {
- return new ResolveDataChangeState(nodeId.node(childId),
- Iterables.concat(inheritedSub, subBuilders.values()),
+ /*
+ * We instantiate a concatenation only when needed:
+ *
+ * 1) If our collection is empty, we reuse the parent's. This is typically the case
+ * for intermediate node, which should be the vast majority.
+ * 2) If the parent's iterable is a Collection and it is empty, reuse our collection.
+ * This is the case for the first node which defines a subtree listener in a
+ * particular subtree.
+ * 3) Concatenate the two collections. This happens when we already have some
+ * subtree listeners and we encounter a node which adds a few more.
+ *
+ * This allows us to lower number of objects allocated and also
+ * speeds up Iterables.isEmpty() in needsProcessing().
+ *
+ * Note that the check for Collection in 2) relies on precisely this logic, which
+ * ensures that we simply cannot see an empty concatenation, but rather start off with
+ * an empty collection, then switch to a non-empty collection and finally switch to
+ * a concatenation. This saves us from instantiating iterators, which a trivial
+ * Iterables.isEmpty() would do as soon as we cross case 3).
+ */
+ final Iterable<Builder> sb;
+ if (!subBuilders.isEmpty()) {
+ if (inheritedSub instanceof Collection && ((Collection<?>) inheritedSub).isEmpty()) {
+ sb = subBuilders.values();
+ } else {
+ sb = Iterables.concat(inheritedSub, subBuilders.values());
+ }
+ } else {
+ sb = inheritedSub;
+ }
+
+ return new ResolveDataChangeState(nodeId.node(childId), sb,
oneBuilders.values(), getListenerChildrenWildcarded(nodes, childId));
}
if (!nodes.isEmpty()) {
return true;
}
- // Have SUBTREE listeners
- if (!Iterables.isEmpty(inheritedSub)) {
- return true;
- }
// Have ONE listeners
- if (!Iterables.isEmpty(inheritedOne)) {
+ if (!inheritedOne.isEmpty()) {
return true;
}
- return false;
+ /*
+ * Have SUBTREE listeners
+ *
+ * This is slightly magical replacement for !Iterables.isEmpty(inheritedSub).
+ * It relies on the logic in child(), which gives us the guarantee that when
+ * inheritedSub is not a Collection, it is guaranteed to be non-empty (which
+ * means we need to process). If it is a collection, we still need to check
+ * it for emptiness.
+ *
+ * Unlike Iterables.isEmpty() this code does not instantiate any temporary
+ * objects and is thus more efficient.
+ */
+ if (inheritedSub instanceof Collection) {
+ return !((Collection<?>) inheritedSub).isEmpty();
+ }
+
+ // Non-Collection => non-empty => have to process
+ return true;
}
/**
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkNotNull;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* and executed according to {@link TransactionReadyPrototype}.
*
*/
-class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
- implements DOMStoreReadWriteTransaction {
-
+final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
/**
LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
checkNotNull(path, "Path must not be null.");
- DataTreeModification dataView = getMutatedView();
- if(dataView == null) {
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
- }
-
+ final Optional<NormalizedNode<?, ?>> result;
try {
- return Futures.immediateCheckedFuture(dataView.readNode(path));
+ result = readSnapshotNode(path);
} catch (Exception e) {
LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
- return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e));
+ }
+
+ if (result == null) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
+ } else {
+ return Futures.immediateCheckedFuture(result);
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static com.google.common.base.Preconditions.checkState;
-
import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*
*/
class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
-
private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
- private DataTreeModification mutableTree;
- private boolean ready = false;
- private TransactionReadyPrototype readyImpl;
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+ private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
+
+ // non-null when not ready
+ private volatile TransactionReadyPrototype readyImpl;
+ // non-null when not committed/closed
+ private volatile DataTreeModification mutableTree;
/**
* Creates new write-only transaction.
public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
super(identifier, debug);
- mutableTree = snapshot.newModification();
this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
+ mutableTree = snapshot.newModification();
LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot);
}
- @Override
- public void close() {
- LOG.debug("Store transaction: {} : Closed", getIdentifier());
- this.mutableTree = null;
- this.readyImpl = null;
- }
-
@Override
public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
- mutableTree.write(path, data);
+ tree.write(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
+
try {
- LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
- mutableTree.merge(path, data);
+ tree.merge(path, data);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+ LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
@Override
public void delete(final YangInstanceIdentifier path) {
checkNotReady();
+
+ final DataTreeModification tree = mutableTree;
+ LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
+
try {
- LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
- mutableTree.delete(path);
+ tree.delete(path);
// FIXME: Add checked exception
} catch (Exception e) {
- LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, mutableTree, e);
+ LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e);
// Rethrow original ones if they are subclasses of RuntimeException
// or Error
Throwables.propagateIfPossible(e);
}
}
- protected final boolean isReady() {
- return ready;
+ /**
+ * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does
+ * not allow data access after the transaction has been closed or readied.
+ *
+ * @param path Path to read
+ * @return null if the the transaction has been closed;
+ */
+ protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+ return readyImpl == null ? null : mutableTree.readNode(path);
}
- protected final void checkNotReady() {
- checkState(!ready, "Transaction %s is ready. No further modifications allowed.", getIdentifier());
+ private final void checkNotReady() {
+ checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.", getIdentifier());
}
@Override
- public synchronized DOMStoreThreePhaseCommitCohort ready() {
- checkState(!ready, "Transaction %s is already ready.", getIdentifier());
- ready = true;
+ public DOMStoreThreePhaseCommitCohort ready() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
+
LOG.debug("Store transaction: {} : Ready", getIdentifier());
- mutableTree.ready();
- return readyImpl.ready(this);
+
+ final DataTreeModification tree = mutableTree;
+ TREE_UPDATER.lazySet(this, null);
+ tree.ready();
+ return wasReady.transactionReady(this, tree);
}
- protected DataTreeModification getMutatedView() {
- return mutableTree;
+ @Override
+ public void close() {
+ final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+ if (wasReady != null) {
+ LOG.debug("Store transaction: {} : Closed", getIdentifier());
+ TREE_UPDATER.lazySet(this, null);
+ wasReady.transactionAborted(this);
+ } else {
+ LOG.debug("Store transaction: {} : Closed after submit", getIdentifier());
+ }
}
@Override
protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
- return toStringHelper.add("ready", isReady());
+ return toStringHelper.add("ready", readyImpl == null);
}
/**
* providing underlying logic for applying implementation.
*
*/
- public static interface TransactionReadyPrototype {
+ abstract static class TransactionReadyPrototype {
+ /**
+ * Called when a transaction is closed without being readied. This is not invoked for
+ * transactions which are ready.
+ *
+ * @param tx Transaction which got aborted.
+ */
+ protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
/**
* Returns a commit coordinator associated with supplied transactions.
*
* @param tx
* Transaction on which ready was invoked.
+ * @param tree
+ * Modified data tree which has been constructed.
* @return DOMStoreThreePhaseCommitCohort associated with transaction
*/
- DOMStoreThreePhaseCommitCohort ready(SnapshotBackedWriteTransaction tx);
+ protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
}
}
\ No newline at end of file
package org.opendaylight.controller.md.sal.dom.store.impl.jmx;
import java.util.concurrent.ExecutorService;
-
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
*/
public class InMemoryDataStoreStats implements AutoCloseable {
- private final ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean;
- private final ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean;
+ private final AbstractMXBean notificationExecutorStatsBean;
+ private final AbstractMXBean dataStoreExecutorStatsBean;
private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
- public InMemoryDataStoreStats(String mBeanType, QueuedNotificationManager<?, ?> manager,
- ExecutorService dataStoreExecutor) {
+ public InMemoryDataStoreStats(final String mBeanType, final QueuedNotificationManager<?, ?> manager,
+ final ExecutorService dataStoreExecutor) {
- this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
+ notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
"notification-manager", mBeanType, null);
notificationManagerStatsBean.registerMBean();
- this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
"notification-executor", mBeanType, null);
- this.notificationExecutorStatsBean.registerMBean();
+ if (notificationExecutorStatsBean != null) {
+ notificationExecutorStatsBean.registerMBean();
+ }
- this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dataStoreExecutor,
+ dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dataStoreExecutor,
"data-store-executor", mBeanType, null);
- this.dataStoreExecutorStatsBean.registerMBean();
+ if (dataStoreExecutorStatsBean != null) {
+ dataStoreExecutorStatsBean.registerMBean();
+ }
}
@Override
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
-
+import java.net.URI;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
-
import org.opendaylight.controller.netconf.client.NetconfClientSession;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
return fromStrings(session.getServerCapabilities());
}
+ private static QName cachedQName(final String namespace, final String revision, final String moduleName) {
+ return QName.cachedReference(QName.create(namespace, revision, moduleName));
+ }
+
+ private static QName cachedQName(final String namespace, final String moduleName) {
+ return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
+ }
+
public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
final Set<QName> moduleBasedCaps = new HashSet<>();
final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
String revision = REVISION_PARAM.from(queryParams);
if (revision != null) {
- moduleBasedCaps.add(QName.create(namespace, revision, moduleName));
- nonModuleCaps.remove(capability);
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
continue;
}
* We have seen devices which mis-escape revision, but the revision may not
* even be there. First check if there is a substring that matches revision.
*/
- if (!Iterables.any(queryParams, CONTAINS_REVISION)) {
+ if (Iterables.any(queryParams, CONTAINS_REVISION)) {
+
+ LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
+ revision = BROKEN_REVISON_PARAM.from(queryParams);
+ if (revision == null) {
+ LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
+ } else {
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
+ }
continue;
}
- LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
- revision = BROKEN_REVISON_PARAM.from(queryParams);
- if (revision == null) {
- LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
- }
-
- // FIXME: do we really want to continue here?
- moduleBasedCaps.add(QName.cachedReference(QName.create(namespace, revision, moduleName)));
- nonModuleCaps.remove(capability);
+ // Fallback, no revision provided for module
+ addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
}
return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
}
+
+
+ private static void addModuleQName(final Set<QName> moduleBasedCaps, final Set<String> nonModuleCaps, final String capability, final QName qName) {
+ moduleBasedCaps.add(qName);
+ nonModuleCaps.remove(capability);
+ }
}
assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
}
+ @Test
+ public void testCapabilityNoRevision() throws Exception {
+ final List<String> caps1 = Lists.newArrayList(
+ "namespace:2?module=module2",
+ "namespace:2?module=module2&revision=2012-12-12",
+ "namespace:2?module=module1&RANDOMSTRING;revision=2013-12-12",
+ "namespace:2?module=module2&RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
+ );
+
+ final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+ assertCaps(sessionCaps1, 0, 3);
+ }
+
private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-slf4j_${scala.version}</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-persistence-experimental_${scala.version}</artifactId>
+ </dependency>
<!-- SAL Dependencies -->
<dependency>
package org.opendaylight.controller.config.yang.config.remote_rpc_connector;
+import org.opendaylight.controller.cluster.common.actor.DefaultAkkaConfigurationReader;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import org.opendaylight.controller.remote.rpc.RemoteRpcProviderFactory;
import org.opendaylight.controller.sal.core.api.Broker;
import org.osgi.framework.BundleContext;
@Override
public java.lang.AutoCloseable createInstance() {
Broker broker = getDomBrokerDependency();
- return RemoteRpcProviderFactory.createInstance(broker, bundleContext);
+
+ RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder(getActorSystemName())
+ .metricCaptureEnabled(getEnableMetricCapture())
+ .mailboxCapacity(getBoundedMailboxCapacity())
+ .withConfigReader(new DefaultAkkaConfigurationReader())
+ .build();
+
+ return RemoteRpcProviderFactory.createInstance(broker, bundleContext, config);
}
public void setBundleContext(BundleContext bundleContext) {
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
-import org.opendaylight.controller.remote.rpc.messages.Monitor;
-
-public abstract class AbstractUntypedActor extends UntypedActor {
- protected final LoggingAdapter LOG =
- Logging.getLogger(getContext().system(), this);
-
-
- public AbstractUntypedActor(){
- LOG.debug("Actor created {}", getSelf());
- getContext().
- system().
- actorSelection("user/termination-monitor").
- tell(new Monitor(getSelf()), getSelf());
- }
-
- @Override public void onReceive(Object message) throws Exception {
- LOG.debug("Received message {}", message);
- handleReceive(message);
- LOG.debug("Done handling message {}", message);
- }
-
- protected abstract void handleReceive(Object message) throws Exception;
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-
-public class ActorConstants {
- public static final String RPC_BROKER = "rpc-broker";
- public static final String RPC_REGISTRY = "rpc-registry";
- public static final String RPC_MANAGER = "rpc";
-
- public static final String RPC_BROKER_PATH= "/user/rpc/rpc-broker";
- public static final String RPC_REGISTRY_PATH = "/user/rpc/rpc-registry";
- public static final String RPC_MANAGER_PATH = "/user/rpc";
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorSystem;
-import akka.osgi.BundleDelegatingClassLoader;
-import org.opendaylight.controller.remote.rpc.utils.AkkaConfigurationReader;
-import org.osgi.framework.BundleContext;
-
-
-public class ActorSystemFactory {
-
- public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-rpc";
- public static final String CONFIGURATION_NAME = "odl-cluster-rpc";
-
- private static volatile ActorSystem actorSystem = null;
-
- public static final ActorSystem getInstance(){
- return actorSystem;
- }
-
- /**
- * This method should be called only once during initialization
- *
- * @param bundleContext
- */
- public static final void createInstance(final BundleContext bundleContext, AkkaConfigurationReader akkaConfigurationReader) {
- if(actorSystem == null) {
- // Create an OSGi bundle classloader for actor system
- BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
- Thread.currentThread().getContextClassLoader());
- synchronized (ActorSystemFactory.class) {
- // Double check
- if (actorSystem == null) {
- ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
- akkaConfigurationReader.read().getConfig(CONFIGURATION_NAME), classLoader);
- actorSystem = system;
- }
- }
- } else {
- throw new IllegalStateException("Actor system should be created only once. Use getInstance method to access existing actor system");
- }
- }
-
-}
package org.opendaylight.controller.remote.rpc;
-import static akka.pattern.Patterns.ask;
import akka.actor.ActorRef;
import akka.dispatch.OnComplete;
-import akka.util.Timeout;
-
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
-
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
-import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.controller.sal.core.api.RoutedRpcDefaultImplementation;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import scala.concurrent.ExecutionContext;
import java.util.Collections;
import java.util.Set;
+import static akka.pattern.Patterns.ask;
+
public class RemoteRpcImplementation implements RpcImplementation, RoutedRpcDefaultImplementation {
private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcImplementation.class);
private final ActorRef rpcBroker;
private final SchemaContext schemaContext;
+ private final RemoteRpcProviderConfig config;
- public RemoteRpcImplementation(ActorRef rpcBroker, SchemaContext schemaContext) {
+ public RemoteRpcImplementation(ActorRef rpcBroker, SchemaContext schemaContext, RemoteRpcProviderConfig config) {
this.rpcBroker = rpcBroker;
this.schemaContext = schemaContext;
+ this.config = config;
}
@Override
final SettableFuture<RpcResult<CompositeNode>> listenableFuture = SettableFuture.create();
- scala.concurrent.Future<Object> future = ask(rpcBroker, rpcMsg,
- new Timeout(ActorUtil.ASK_DURATION));
+ scala.concurrent.Future<Object> future = ask(rpcBroker, rpcMsg, config.getAskDuration());
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcProvider.class);
- private final ActorSystem actorSystem;
private final RpcProvisionRegistry rpcProvisionRegistry;
+
+ private ActorSystem actorSystem;
private Broker.ProviderSession brokerSession;
private SchemaContext schemaContext;
private ActorRef rpcManager;
+ private RemoteRpcProviderConfig config;
public RemoteRpcProvider(ActorSystem actorSystem, RpcProvisionRegistry rpcProvisionRegistry) {
this.actorSystem = actorSystem;
this.rpcProvisionRegistry = rpcProvisionRegistry;
+ this.config = new RemoteRpcProviderConfig(actorSystem.settings().config());
}
@Override
public void close() throws Exception {
- this.actorSystem.shutdown();
+ if (this.actorSystem != null)
+ this.actorSystem.shutdown();
}
@Override
}
private void start() {
- LOG.info("Starting all rpc listeners and actors.");
- // Create actor to handle and sync routing table in cluster
+ LOG.info("Starting remote rpc service...");
+
SchemaService schemaService = brokerSession.getService(SchemaService.class);
schemaContext = schemaService.getGlobalContext();
- rpcManager = actorSystem.actorOf(RpcManager.props(schemaContext, brokerSession, rpcProvisionRegistry), ActorConstants.RPC_MANAGER);
+ rpcManager = actorSystem.actorOf(RpcManager.props(schemaContext, brokerSession, rpcProvisionRegistry),
+ config.getRpcManagerName());
- LOG.debug("Rpc actors are created.");
+ LOG.debug("rpc manager started");
}
-
@Override
public void onGlobalContextUpdated(SchemaContext schemaContext) {
this.schemaContext = schemaContext;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.util.Timeout;
+import com.typesafe.config.Config;
+import org.opendaylight.controller.cluster.common.actor.CommonConfig;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class RemoteRpcProviderConfig extends CommonConfig {
+
+ protected static final String TAG_RPC_BROKER_NAME = "rpc-broker-name";
+ protected static final String TAG_RPC_REGISTRY_NAME = "registry-name";
+ protected static final String TAG_RPC_MGR_NAME = "rpc-manager-name";
+ protected static final String TAG_RPC_BROKER_PATH = "rpc-broker-path";
+ protected static final String TAG_RPC_REGISTRY_PATH = "rpc-registry-path";
+ protected static final String TAG_RPC_MGR_PATH = "rpc-manager-path";
+ protected static final String TAG_ASK_DURATION = "ask-duration";
+ private static final String TAG_GOSSIP_TICK_INTERVAL = "gossip-tick-interval";
+
+ //locally cached values
+ private Timeout cachedAskDuration;
+ private FiniteDuration cachedGossipTickInterval;
+
+ public RemoteRpcProviderConfig(Config config){
+ super(config);
+ }
+
+ public String getRpcBrokerName(){
+ return get().getString(TAG_RPC_BROKER_NAME);
+ }
+
+ public String getRpcRegistryName(){
+ return get().getString(TAG_RPC_REGISTRY_NAME);
+ }
+
+ public String getRpcManagerName(){
+ return get().getString(TAG_RPC_MGR_NAME);
+ }
+
+ public String getRpcBrokerPath(){
+ return get().getString(TAG_RPC_BROKER_PATH);
+ }
+
+ public String getRpcRegistryPath(){
+ return get().getString(TAG_RPC_REGISTRY_PATH);
+
+ }
+
+ public String getRpcManagerPath(){
+ return get().getString(TAG_RPC_MGR_PATH);
+ }
+
+
+ public Timeout getAskDuration(){
+ if (cachedAskDuration != null){
+ return cachedAskDuration;
+ }
+
+ cachedAskDuration = new Timeout(new FiniteDuration(
+ get().getDuration(TAG_ASK_DURATION, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS));
+
+ return cachedAskDuration;
+ }
+
+ public FiniteDuration getGossipTickInterval(){
+ if (cachedGossipTickInterval != null) {
+ return cachedGossipTickInterval;
+ }
+
+ cachedGossipTickInterval = new FiniteDuration(
+ get().getDuration(TAG_GOSSIP_TICK_INTERVAL, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
+
+ return cachedGossipTickInterval;
+ }
+
+ public static class Builder extends CommonConfig.Builder<Builder>{
+
+ public Builder(String actorSystemName){
+ super(actorSystemName);
+
+ //Actor names
+ configHolder.put(TAG_RPC_BROKER_NAME, "broker");
+ configHolder.put(TAG_RPC_REGISTRY_NAME, "registry");
+ configHolder.put(TAG_RPC_MGR_NAME, "rpc");
+
+ //Actor paths
+ configHolder.put(TAG_RPC_BROKER_PATH, "/user/rpc/broker");
+ configHolder.put(TAG_RPC_REGISTRY_PATH, "/user/rpc/registry");
+ configHolder.put(TAG_RPC_MGR_PATH, "/user/rpc");
+
+ //durations
+ configHolder.put(TAG_ASK_DURATION, "15s");
+ configHolder.put(TAG_GOSSIP_TICK_INTERVAL, "500ms");
+
+ }
+
+ public RemoteRpcProviderConfig build(){
+ return new RemoteRpcProviderConfig(merge());
+ }
+ }
+
+
+}
package org.opendaylight.controller.remote.rpc;
-
-import org.opendaylight.controller.remote.rpc.utils.DefaultAkkaConfigurationReader;
+import akka.actor.ActorSystem;
+import akka.osgi.BundleDelegatingClassLoader;
+import com.typesafe.config.Config;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class RemoteRpcProviderFactory {
- public static RemoteRpcProvider createInstance(final Broker broker, final BundleContext bundleContext){
+ private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcProviderFactory.class);
+
+ public static RemoteRpcProvider createInstance(
+ final Broker broker, final BundleContext bundleContext, final RemoteRpcProviderConfig config){
- ActorSystemFactory.createInstance(bundleContext, new DefaultAkkaConfigurationReader());
RemoteRpcProvider rpcProvider =
- new RemoteRpcProvider(ActorSystemFactory.getInstance(), (RpcProvisionRegistry) broker);
+ new RemoteRpcProvider(createActorSystem(bundleContext, config), (RpcProvisionRegistry) broker);
+
broker.registerProvider(rpcProvider);
return rpcProvider;
}
+
+ private static ActorSystem createActorSystem(BundleContext bundleContext, RemoteRpcProviderConfig config){
+
+ // Create an OSGi bundle classloader for actor system
+ BundleDelegatingClassLoader classLoader =
+ new BundleDelegatingClassLoader(bundleContext.getBundle(),
+ Thread.currentThread().getContextClassLoader());
+
+ Config actorSystemConfig = config.get();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render());
+ }
+ if (config.isMetricCaptureEnabled()) {
+ LOG.info("Instrumentation is enabled in actor system {}. Metrics can be viewed in JMX console.",
+ config.getActorSystemName());
+ }
+
+ return ActorSystem.create(config.getActorSystemName(), actorSystemConfig, classLoader);
+ }
}
* @param announcements
*/
private void announce(Set<RpcRouter.RouteIdentifier<?, ?, ?>> announcements) {
- LOG.debug("Announcing [{}]", announcements);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Announcing [{}]", announcements);
+ }
RpcRegistry.Messages.AddOrUpdateRoutes addRpcMsg = new RpcRegistry.Messages.AddOrUpdateRoutes(new ArrayList<>(announcements));
rpcRegistry.tell(addRpcMsg, ActorRef.noSender());
}
* @param removals
*/
private void remove(Set<RpcRouter.RouteIdentifier<?, ?, ?>> removals){
- LOG.debug("Removing [{}]", removals);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Removing [{}]", removals);
+ }
RpcRegistry.Messages.RemoveRoutes removeRpcMsg = new RpcRegistry.Messages.RemoveRoutes(new ArrayList<>(removals));
rpcRegistry.tell(removeRpcMsg, ActorRef.noSender());
}
package org.opendaylight.controller.remote.rpc;
-import static akka.pattern.Patterns.ask;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.japi.Pair;
-import akka.util.Timeout;
-
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
-import org.opendaylight.controller.remote.rpc.utils.LatestEntryRoutingLogic;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
+import org.opendaylight.controller.remote.rpc.utils.LatestEntryRoutingLogic;
import org.opendaylight.controller.remote.rpc.utils.RoutingLogic;
-import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
+import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.JdkFutureAdapters;
-import com.google.common.util.concurrent.ListenableFuture;
-
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Future;
+import static akka.pattern.Patterns.ask;
+
/**
* Actor to initiate execution of remote RPC on other nodes of the cluster.
*/
private final Broker.ProviderSession brokerSession;
private final ActorRef rpcRegistry;
private final SchemaContext schemaContext;
+ private final RemoteRpcProviderConfig config;
private RpcBroker(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
SchemaContext schemaContext) {
this.brokerSession = brokerSession;
this.rpcRegistry = rpcRegistry;
this.schemaContext = schemaContext;
+ config = new RemoteRpcProviderConfig(getContext().system().settings().config());
}
public static Props props(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
}
private void invokeRemoteRpc(final InvokeRpc msg) {
- LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
+ }
RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(
null, msg.getRpc(), msg.getIdentifier());
RpcRegistry.Messages.FindRouters findMsg = new RpcRegistry.Messages.FindRouters(routeId);
- scala.concurrent.Future<Object> future = ask(rpcRegistry, findMsg,
- new Timeout(ActorUtil.LOCAL_ASK_DURATION));
+ scala.concurrent.Future<Object> future = ask(rpcRegistry, findMsg, config.getAskDuration());
final ActorRef sender = getSender();
final ActorRef self = self();
ExecuteRpc executeMsg = new ExecuteRpc(XmlUtils.inputCompositeNodeToXml(msg.getInput(),
schemaContext), msg.getRpc());
- scala.concurrent.Future<Object> future = ask(logic.select(), executeMsg,
- new Timeout(ActorUtil.REMOTE_ASK_DURATION));
+ scala.concurrent.Future<Object> future = ask(logic.select(), executeMsg, config.getAskDuration());
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
}
private void executeRpc(final ExecuteRpc msg) {
- LOG.debug("Executing rpc {}", msg.getRpc());
-
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Executing rpc {}", msg.getRpc());
+ }
Future<RpcResult<CompositeNode>> future = brokerSession.rpc(msg.getRpc(),
XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(),
schemaContext));
@Override
public void onRpcImplementationAdded(QName rpc) {
- LOG.debug("Adding registration for [{}]", rpc);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Adding registration for [{}]", rpc);
+ }
RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
routeIds.add(routeId);
@Override
public void onRpcImplementationRemoved(QName rpc) {
- LOG.debug("Removing registration for [{}]", rpc);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Removing registration for [{}]", rpc);
+ }
RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
routeIds.add(routeId);
import akka.actor.SupervisorStrategy;
import akka.japi.Creator;
import akka.japi.Function;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.remote.rpc.messages.UpdateSchemaContext;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
import org.opendaylight.yangtools.yang.common.QName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.Duration;
+
import java.util.Set;
/**
private ActorRef rpcBroker;
private ActorRef rpcRegistry;
private final Broker.ProviderSession brokerSession;
+ private final RemoteRpcProviderConfig config;
private RpcListener rpcListener;
private RoutedRpcListener routeChangeListener;
private RemoteRpcImplementation rpcImplementation;
private final RpcProvisionRegistry rpcProvisionRegistry;
private RpcManager(SchemaContext schemaContext,
- Broker.ProviderSession brokerSession, RpcProvisionRegistry rpcProvisionRegistry) {
+ Broker.ProviderSession brokerSession,
+ RpcProvisionRegistry rpcProvisionRegistry) {
this.schemaContext = schemaContext;
this.brokerSession = brokerSession;
this.rpcProvisionRegistry = rpcProvisionRegistry;
+ this.config = new RemoteRpcProviderConfig(getContext().system().settings().config());
createRpcActors();
startListeners();
public static Props props(final SchemaContext schemaContext,
- final Broker.ProviderSession brokerSession, final RpcProvisionRegistry rpcProvisionRegistry) {
+ final Broker.ProviderSession brokerSession,
+ final RpcProvisionRegistry rpcProvisionRegistry) {
return Props.create(new Creator<RpcManager>() {
@Override
public RpcManager create() throws Exception {
private void createRpcActors() {
LOG.debug("Create rpc registry and broker actors");
- Config conf = ConfigFactory.load();
-
rpcRegistry =
getContext().actorOf(Props.create(RpcRegistry.class).
- withMailbox(ActorUtil.MAILBOX), ActorConstants.RPC_REGISTRY);
+ withMailbox(config.getMailBoxName()), config.getRpcRegistryName());
rpcBroker =
getContext().actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext).
- withMailbox(ActorUtil.MAILBOX),ActorConstants.RPC_BROKER);
+ withMailbox(config.getMailBoxName()), config.getRpcBrokerName());
RpcRegistry.Messages.SetLocalRouter localRouter = new RpcRegistry.Messages.SetLocalRouter(rpcBroker);
rpcRegistry.tell(localRouter, self());
rpcListener = new RpcListener(rpcRegistry);
routeChangeListener = new RoutedRpcListener(rpcRegistry);
- rpcImplementation = new RemoteRpcImplementation(rpcBroker, schemaContext);
+ rpcImplementation = new RemoteRpcImplementation(rpcBroker, schemaContext, config);
brokerSession.addRpcRegistrationListener(rpcListener);
rpcProvisionRegistry.registerRouteChangeListener(routeChangeListener);
import akka.actor.UntypedActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
-import org.opendaylight.controller.remote.rpc.messages.Monitor;
+import org.opendaylight.controller.cluster.common.actor.Monitor;
public class TerminationMonitor extends UntypedActor{
protected final LoggingAdapter LOG =
@Override public void onReceive(Object message) throws Exception {
if(message instanceof Terminated){
Terminated terminated = (Terminated) message;
- LOG.debug("Actor terminated : {}", terminated.actor());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor terminated : {}", terminated.actor());
+ }
}else if(message instanceof Monitor){
Monitor monitor = (Monitor) message;
getContext().watch(monitor.getActorRef());
import akka.actor.ActorRef;
import akka.actor.Address;
import akka.actor.Props;
-import akka.actor.UntypedActor;
import akka.dispatch.Mapper;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.japi.Pair;
import akka.pattern.Patterns;
import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
import scala.concurrent.Future;
import java.util.Map;
import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
-import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
* It uses {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} to maintain this
* cluster wide information.
*/
-public class RpcRegistry extends UntypedActor {
+public class RpcRegistry extends AbstractUntypedActorWithMetering {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
*/
private ActorRef localRouter;
+ private RemoteRpcProviderConfig config;
+
public RpcRegistry() {
bucketStore = getContext().actorOf(Props.create(BucketStore.class), "store");
-
+ this.config = new RemoteRpcProviderConfig(getContext().system().settings().config());
log.info("Bucket store path = {}", bucketStore.path().toString());
}
this.bucketStore = bucketStore;
}
- @Override
- public void onReceive(Object message) throws Exception {
-
- log.debug("Received message: message [{}]", message);
+ @Override
+ protected void handleReceive(Object message) throws Exception {
//TODO: if sender is remote, reject message
if (message instanceof SetLocalRouter)
Preconditions.checkState(localRouter != null, "Router must be set first");
- Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), ActorUtil.ASK_DURATION.toMillis());
+ Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), config.getAskDuration());
futureReply.map(getMapperToAddRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
}
*/
private void receiveRemoveRoutes(RemoveRoutes msg) {
- Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), ActorUtil.ASK_DURATION.toMillis());
+ Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), config.getAskDuration());
futureReply.map(getMapperToRemoveRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
}
private void receiveGetRouter(FindRouters msg) {
final ActorRef sender = getSender();
- Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), ActorUtil.ASK_DURATION.toMillis());
+ Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), config.getAskDuration());
futureReply.map(getMapperToGetRouter(msg.getRouteIdentifier(), sender), getContext().dispatcher());
}
* @param routeId
* @return
*/
- private Messages.FindRoutersReply createReplyWithRouters(Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
+ private Messages.FindRoutersReply createReplyWithRouters(
+ Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
List<Pair<ActorRef, Long>> routers = new ArrayList<>();
Option<Pair<ActorRef, Long>> routerWithUpdateTime = null;
* @param sender client who asked to find the routers.
* @return
*/
- private Mapper<Object, Void> getMapperToGetRouter(final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
+ private Mapper<Object, Void> getMapperToGetRouter(
+ final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
return new Mapper<Object, Void>() {
@Override
public Void apply(Object replyMessage) {
public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
- private Long version = System.currentTimeMillis();;
+ private Long version = System.currentTimeMillis();
private T data;
import akka.actor.ActorRefProvider;
import akka.actor.Address;
import akka.actor.Props;
-import akka.actor.UntypedActor;
import akka.cluster.ClusterActorRefProvider;
import akka.event.Logging;
import akka.event.LoggingAdapter;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import org.opendaylight.controller.utils.ConditionalProbe;
import java.util.HashMap;
* This store uses a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}.
*
*/
-public class BucketStore extends UntypedActor {
+public class BucketStore extends AbstractUntypedActorWithMetering {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
/**
* Bucket owned by the node
*/
- private BucketImpl localBucket = new BucketImpl();;
+ private BucketImpl localBucket = new BucketImpl();
/**
* Buckets ownded by other known nodes in the cluster
private ConditionalProbe probe;
+ private final RemoteRpcProviderConfig config;
+
+ public BucketStore(){
+ config = new RemoteRpcProviderConfig(getContext().system().settings().config());
+ }
+
@Override
public void preStart(){
ActorRefProvider provider = getContext().provider();
selfAddress = provider.getDefaultAddress();
if ( provider instanceof ClusterActorRefProvider)
- getContext().actorOf(Props.create(Gossiper.class).withMailbox(ActorUtil.MAILBOX), "gossiper");
+ getContext().actorOf(Props.create(Gossiper.class).withMailbox(config.getMailBoxName()), "gossiper");
}
- @Override
- public void onReceive(Object message) throws Exception {
-
- log.debug("Received message: node[{}], message[{}]", selfAddress, message);
+ @Override
+ protected void handleReceive(Object message) throws Exception {
if (probe != null) {
probe.tell(message, getSelf());
}
receiveGetLocalBucket();
} else if (message instanceof GetBucketsByMembers) {
receiveGetBucketsByMembers(
- ((GetBucketsByMembers) message).getMembers());
+ ((GetBucketsByMembers) message).getMembers());
} else if (message instanceof GetBucketVersions) {
receiveGetBucketVersions();
} else if (message instanceof UpdateRemoteBuckets) {
receiveUpdateRemoteBuckets(
- ((UpdateRemoteBuckets) message).getBuckets());
+ ((UpdateRemoteBuckets) message).getBuckets());
} else {
- log.debug("Unhandled message [{}]", message);
+ if(log.isDebugEnabled()) {
+ log.debug("Unhandled message [{}]", message);
+ }
unhandled(message);
}
-
}
/**
versions.put(entry.getKey(), remoteVersion);
}
}
-
- log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+ if(log.isDebugEnabled()) {
+ log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+ }
}
///
import akka.actor.ActorSelection;
import akka.actor.Address;
import akka.actor.Cancellable;
-import akka.actor.UntypedActor;
import akka.cluster.Cluster;
import akka.cluster.ClusterActorRefProvider;
import akka.cluster.ClusterEvent;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.pattern.Patterns;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
*
*/
-public class Gossiper extends UntypedActor {
+public class Gossiper extends AbstractUntypedActorWithMetering {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
private Boolean autoStartGossipTicks = true;
- public Gossiper(){}
+ private RemoteRpcProviderConfig config;
+
+ public Gossiper(){
+ config = new RemoteRpcProviderConfig(getContext().system().settings().config());
+ }
/**
* Helpful for testing
if (autoStartGossipTicks) {
gossipTask = getContext().system().scheduler().schedule(
new FiniteDuration(1, TimeUnit.SECONDS), //initial delay
- ActorUtil.GOSSIP_TICK_INTERVAL, //interval
+ config.getGossipTickInterval(), //interval
getSelf(), //target
new Messages.GossiperMessages.GossipTick(), //message
getContext().dispatcher(), //execution context
}
@Override
- public void onReceive(Object message) throws Exception {
-
- log.debug("Received message: node[{}], message[{}]", selfAddress, message);
-
+ protected void handleReceive(Object message) throws Exception {
//Usually sent by self via gossip task defined above. But its not enforced.
//These ticks can be sent by another actor as well which is esp. useful while testing
if (message instanceof GossipTick)
receiveGossipTick();
- //Message from remote gossiper with its bucket versions
+ //Message from remote gossiper with its bucket versions
else if (message instanceof GossipStatus)
receiveGossipStatus((GossipStatus) message);
- //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
- //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
- //message with its local versions
+ //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
+ //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
+ //message with its local versions
else if (message instanceof GossipEnvelope)
receiveGossip((GossipEnvelope) message);
}
clusterMembers.remove(member.address());
- log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+ if(log.isDebugEnabled()) {
+ log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+ }
}
/**
if (!clusterMembers.contains(member.address()))
clusterMembers.add(member.address());
-
- log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+ if(log.isDebugEnabled()) {
+ log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+ }
}
/**
void receiveGossipTick(){
if (clusterMembers.size() == 0) return; //no members to send gossip status to
- Address remoteMemberToGossipTo = null;
+ Address remoteMemberToGossipTo;
if (clusterMembers.size() == 1)
remoteMemberToGossipTo = clusterMembers.get(0);
Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
remoteMemberToGossipTo = clusterMembers.get(randomIndex);
}
-
- log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+ if(log.isDebugEnabled()) {
+ log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+ }
getLocalStatusAndSendTo(remoteMemberToGossipTo);
}
final ActorRef sender = getSender();
Future<Object> futureReply =
- Patterns.ask(getContext().parent(), new GetBucketVersions(), ActorUtil.ASK_DURATION.toMillis());
+ Patterns.ask(getContext().parent(), new GetBucketVersions(), config.getAskDuration());
futureReply.map(getMapperToProcessRemoteStatus(sender, status), getContext().dispatcher());
void receiveGossip(GossipEnvelope envelope){
//TODO: Add more validations
if (!selfAddress.equals(envelope.to())) {
- log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+ if(log.isDebugEnabled()) {
+ log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+ }
return;
}
void sendGossipTo(final ActorRef remote, final Set<Address> addresses){
Future<Object> futureReply =
- Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), ActorUtil.ASK_DURATION.toMillis());
+ Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), config.getAskDuration());
futureReply.map(getMapperToSendGossip(remote), getContext().dispatcher());
}
//Get local status from bucket store and send to remote
Future<Object> futureReply =
- Patterns.ask(getContext().parent(), new GetBucketVersions(), ActorUtil.ASK_DURATION.toMillis());
+ Patterns.ask(getContext().parent(), new GetBucketVersions(), config.getAskDuration());
//Find gossiper on remote system
ActorSelection remoteRef = getContext().system().actorSelection(
remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress());
- log.debug("Sending bucket versions to [{}]", remoteRef);
+ if(log.isDebugEnabled()) {
+ log.debug("Sending bucket versions to [{}]", remoteRef);
+ }
futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher());
localIsOlder.add(address);
else if (localVersions.get(address) > remoteVersions.get(address))
localIsNewer.add(address);
- else
- continue;
}
if (!localIsOlder.isEmpty())
public Void apply(Object msg) {
if (msg instanceof GetBucketsByMembersReply) {
Map<Address, Bucket> buckets = ((GetBucketsByMembersReply) msg).getBuckets();
- log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+ if(log.isDebugEnabled()) {
+ log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+ }
GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets);
sender.tell(envelope, getSelf());
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-package org.opendaylight.controller.remote.rpc.utils;
-
-import scala.concurrent.duration.Duration;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.concurrent.TimeUnit;
-
-public class ActorUtil {
- public static final FiniteDuration LOCAL_ASK_DURATION = Duration.create(2, TimeUnit.SECONDS);
- public static final FiniteDuration REMOTE_ASK_DURATION = Duration.create(15, TimeUnit.SECONDS);
- public static final FiniteDuration ASK_DURATION = Duration.create(17, TimeUnit.SECONDS);
- public static final FiniteDuration GOSSIP_TICK_INTERVAL = Duration.create(500, TimeUnit.MILLISECONDS);
- public static final String MAILBOX = "bounded-mailbox";
-}
odl-cluster-rpc {
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 100ms
}
import config { prefix config; revision-date 2013-04-05; }
import opendaylight-md-sal-dom {prefix dom;}
-
+
description
"This module contains the base YANG definitions for
the remote routed rpc";
-
+
revision "2014-07-07" {
description
"Initial revision";
augment "/config:modules/config:module/config:configuration" {
case remote-rpc-connector {
when "/config:modules/config:module/config:type = 'remote-rpc-connector'";
-
+
container dom-broker {
uses config:service-ref {
refine type {
}
}
}
+
+ leaf enable-metric-capture {
+ default false;
+ type boolean;
+ description "Enable or disable metric capture.";
+ }
+
+ leaf actor-system-name {
+ default odl-cluster-rpc;
+ type string;
+ description "Name by which actor system is identified. Its also used to find relevant configuration";
+ }
+
+ leaf bounded-mailbox-capacity {
+ default 1000;
+ type uint16;
+ description "Max queue size that an actor's mailbox can reach";
+ }
}
}
package org.opendaylight.controller.remote.rpc;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.testkit.JavaTestKit;
+import com.google.common.collect.ImmutableList;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.testkit.JavaTestKit;
+import java.io.File;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
-import com.google.common.collect.ImmutableList;
-import com.typesafe.config.ConfigFactory;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
/**
* Base class for RPC tests.
@BeforeClass
public static void setup() throws InterruptedException {
- node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
- node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
+ RemoteRpcProviderConfig config1 = new RemoteRpcProviderConfig.Builder("memberA").build();
+ RemoteRpcProviderConfig config2 = new RemoteRpcProviderConfig.Builder("memberB").build();
+ node1 = ActorSystem.create("opendaylight-rpc", config1.get());
+ node2 = ActorSystem.create("opendaylight-rpc", config2.get());
}
@AfterClass
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-
-import akka.actor.ActorSystem;
-import com.typesafe.config.ConfigFactory;
-import junit.framework.Assert;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.utils.AkkaConfigurationReader;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleContext;
-
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class ActorSystemFactoryTest {
- ActorSystem system = null;
-
- @Test
- public void testActorSystemCreation(){
- BundleContext context = mock(BundleContext.class);
- when(context.getBundle()).thenReturn(mock(Bundle.class));
-
- AkkaConfigurationReader reader = mock(AkkaConfigurationReader.class);
- when(reader.read()).thenReturn(ConfigFactory.load());
-
- ActorSystemFactory.createInstance(context, reader);
- system = ActorSystemFactory.getInstance();
- Assert.assertNotNull(system);
- // Check illegal state exception
-
- try {
- ActorSystemFactory.createInstance(context, reader);
- fail("Illegal State exception should be thrown, while creating actor system second time");
- } catch (IllegalStateException e) {
- }
- }
-
- @After
- public void cleanup() throws InterruptedException {
- if(system != null) {
- system.shutdown();
- }
- }
-}
package org.opendaylight.controller.remote.rpc;
-import static org.junit.Assert.assertEquals;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
+import akka.testkit.JavaTestKit;
+import com.google.common.util.concurrent.ListenableFuture;
import org.junit.Test;
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import akka.testkit.JavaTestKit;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
-import com.google.common.util.concurrent.ListenableFuture;
+import static org.junit.Assert.assertEquals;
/***
* Unit tests for RemoteRpcImplementation.
final AtomicReference<AssertionError> assertError = new AtomicReference<>();
try {
RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
- probeReg1.getRef(), schemaContext);
+ probeReg1.getRef(), schemaContext, getConfig());
final CompositeNode input = makeRPCInput("foo");
final CompositeNode output = makeRPCOutput("bar");
final AtomicReference<AssertionError> assertError = new AtomicReference<>();
try {
RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
- probeReg1.getRef(), schemaContext);
+ probeReg1.getRef(), schemaContext, getConfig());
QName instanceQName = new QName(new URI("ns"), "instance");
YangInstanceIdentifier identifier = YangInstanceIdentifier.of(instanceQName);
final AtomicReference<AssertionError> assertError = new AtomicReference<>();
try {
RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
- probeReg1.getRef(), schemaContext);
+ probeReg1.getRef(), schemaContext, getConfig());
final CompositeNode input = makeRPCInput("foo");
final AtomicReference<AssertionError> assertError = new AtomicReference<>();
try {
RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
- probeReg1.getRef(), schemaContext);
+ probeReg1.getRef(), schemaContext, getConfig());
final CompositeNode input = makeRPCInput("foo");
return invokeRpcMsg;
}
+
+ private RemoteRpcProviderConfig getConfig(){
+ return new RemoteRpcProviderConfig.Builder("unit-test").build();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.actor.UntypedActor;
+import akka.testkit.TestActorRef;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.io.File;
+import java.util.concurrent.TimeUnit;
+
+public class RemoteRpcProviderConfigTest {
+
+ @Test
+ public void testConfigDefaults() {
+
+ Config c = ConfigFactory.parseFile(new File("application.conf"));
+ RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("unit-test").build();
+
+ //Assert on configurations from common config
+ Assert.assertFalse(config.isMetricCaptureEnabled()); //should be disabled by default
+ Assert.assertNotNull(config.getMailBoxCapacity());
+ Assert.assertNotNull(config.getMailBoxName());
+ Assert.assertNotNull(config.getMailBoxPushTimeout());
+
+ //rest of the configurations should be set
+ Assert.assertNotNull(config.getActorSystemName());
+ Assert.assertNotNull(config.getRpcBrokerName());
+ Assert.assertNotNull(config.getRpcBrokerPath());
+ Assert.assertNotNull(config.getRpcManagerName());
+ Assert.assertNotNull(config.getRpcManagerPath());
+ Assert.assertNotNull(config.getRpcRegistryName());
+ Assert.assertNotNull(config.getRpcRegistryPath());
+ Assert.assertNotNull(config.getAskDuration());
+ Assert.assertNotNull(config.getGossipTickInterval());
+
+
+
+ }
+
+ @Test
+ public void testConfigCustomizations() {
+
+ AkkaConfigurationReader reader = new TestConfigReader();
+
+ final int expectedCapacity = 100;
+ String timeOutVal = "10ms";
+ FiniteDuration expectedTimeout = FiniteDuration.create(10, TimeUnit.MILLISECONDS);
+
+ RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("unit-test")
+ .metricCaptureEnabled(true)//enable metric capture
+ .mailboxCapacity(expectedCapacity)
+ .mailboxPushTimeout(timeOutVal)
+ .withConfigReader(reader)
+ .build();
+
+ Assert.assertTrue(config.isMetricCaptureEnabled());
+ Assert.assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+ Assert.assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+
+ //Now check this config inside an actor
+ ActorSystem system = ActorSystem.create("unit-test", config.get());
+ TestActorRef<ConfigTestActor> configTestActorTestActorRef =
+ TestActorRef.create(system, Props.create(ConfigTestActor.class));
+
+ ConfigTestActor actor = configTestActorTestActorRef.underlyingActor();
+ Config actorConfig = actor.getConfig();
+
+ config = new RemoteRpcProviderConfig(actorConfig);
+
+ Assert.assertTrue(config.isMetricCaptureEnabled());
+ Assert.assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+ Assert.assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+ }
+
+ public static class ConfigTestActor extends UntypedActor {
+
+ private Config actorSystemConfig;
+
+ public ConfigTestActor() {
+ this.actorSystemConfig = getContext().system().settings().config();
+ }
+
+ @Override
+ public void onReceive(Object message) throws Exception {
+ }
+
+ /**
+ * Only for testing. NEVER expose actor's internal state like this.
+ *
+ * @return
+ */
+ public Config getConfig() {
+ return actorSystemConfig;
+ }
+ }
+
+ public static class TestConfigReader implements AkkaConfigurationReader {
+
+ @Override
+ public Config read() {
+ return ConfigFactory.parseResources("application.conf");
+
+ }
+ }
+}
\ No newline at end of file
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.JavaTestKit;
-import com.typesafe.config.ConfigFactory;
+import com.typesafe.config.Config;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
public class RemoteRpcProviderTest {
static ActorSystem system;
-
+ static RemoteRpcProviderConfig moduleConfig;
@BeforeClass
public static void setup() throws InterruptedException {
- system = ActorSystem.create("odl-cluster-rpc", ConfigFactory.load().getConfig("odl-cluster-rpc"));
+ moduleConfig = new RemoteRpcProviderConfig.Builder("odl-cluster-rpc").build();
+ Config config = moduleConfig.get();
+ system = ActorSystem.create("odl-cluster-rpc", config);
+
}
@AfterClass
SchemaService schemaService = mock(SchemaService.class);
when(schemaService.getGlobalContext()). thenReturn(mock(SchemaContext.class));
when(session.getService(SchemaService.class)).thenReturn(schemaService);
+
rpcProvider.onSessionInitiated(session);
- ActorRef actorRef = Await.result(system.actorSelection(ActorConstants.RPC_MANAGER_PATH).resolveOne(Duration.create(1, TimeUnit.SECONDS)),
- Duration.create(2, TimeUnit.SECONDS));
- Assert.assertTrue(actorRef.path().toString().contains(ActorConstants.RPC_MANAGER_PATH));
+
+ ActorRef actorRef = Await.result(
+ system.actorSelection(
+ moduleConfig.getRpcManagerPath()).resolveOne(Duration.create(1, TimeUnit.SECONDS)),
+ Duration.create(2, TimeUnit.SECONDS));
+
+ Assert.assertTrue(actorRef.path().toString().contains(moduleConfig.getRpcManagerPath()));
}
}
import akka.actor.Props;
import akka.testkit.JavaTestKit;
import com.google.common.base.Predicate;
-import com.typesafe.config.ConfigFactory;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
import org.opendaylight.controller.remote.rpc.registry.gossip.Messages;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
@BeforeClass
public static void setup() throws InterruptedException {
- node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
- node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
- node3 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberC"));
+ RemoteRpcProviderConfig config1 = new RemoteRpcProviderConfig.Builder("memberA").build();
+ RemoteRpcProviderConfig config2 = new RemoteRpcProviderConfig.Builder("memberB").build();
+ RemoteRpcProviderConfig config3 = new RemoteRpcProviderConfig.Builder("memberC").build();
+ node1 = ActorSystem.create("opendaylight-rpc", config1.get());
+ node2 = ActorSystem.create("opendaylight-rpc", config2.get());
+ node3 = ActorSystem.create("opendaylight-rpc", config3.get());
}
@AfterClass
new ConditionalProbe(probe.getRef(), new Predicate() {
@Override
public boolean apply(@Nullable Object input) {
- return clazz.equals(input.getClass());
+ if (input != null)
+ return clazz.equals(input.getClass());
+ else
+ return false;
}
});
odl-cluster-rpc{
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 10ms
}
}
unit-test{
akka {
- loglevel = "INFO"
+ loglevel = "DEBUG"
#loggers = ["akka.event.slf4j.Slf4jLogger"]
}
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+ #mailbox-capacity is specified in config subsystem
mailbox-capacity = 1000
mailbox-push-timeout-time = 10ms
}
memberA{
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 10ms
}
}
memberB{
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 10ms
}
}
memberC{
bounded-mailbox {
- mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+ mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
mailbox-capacity = 1000
mailbox-push-timeout-time = 10ms
}
@GET
@Path("/modules")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModules(@Context UriInfo uriInfo);
@GET
@Path("/modules/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/modules/module/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/operations")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getOperations(@Context UriInfo uriInfo);
@GET
@Path("/operations/{identifier:.+}")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getOperations(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
@GET
@Path("/streams")
- @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+ @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public StructuredData getAvailableStreams(@Context UriInfo uriInfo);
DataContainerChild<? extends PathArgument, ?> child = iterator.next();
nnWriter.write(child);
nnWriter.flush();
- if(iterator.hasNext()) {
- outputWriter.write(",");
- }
}
}
LOG.debug("In toResponse: {}", exception.getMessage());
- // Default to the content type if there's no Accept header
- MediaType mediaType = headers.getMediaType();
List<MediaType> accepts = headers.getAcceptableMediaTypes();
+ accepts.remove(MediaType.WILDCARD_TYPE);
LOG.debug("Accept headers: {}", accepts);
+ final MediaType mediaType;
if (accepts != null && accepts.size() > 0) {
mediaType = accepts.get(0); // just pick the first one
+ } else {
+ // Default to the content type if there's no Accept header
+ mediaType = MediaType.APPLICATION_JSON_TYPE;
}
LOG.debug("Using MediaType: {}", mediaType);
import com.google.common.collect.Lists;
import java.math.BigInteger;
import java.net.URI;
+import java.net.URISyntaxException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
private static final String SCOPE_PARAM_NAME = "scope";
+ private static final String NETCONF_BASE = "urn:ietf:params:xml:ns:netconf:base:1.0";
+
+ private static final String NETCONF_BASE_PAYLOAD_NAME = "data";
+
+ private static final QName NETCONF_BASE_QNAME;
+
static {
try {
EVENT_SUBSCRIPTION_AUGMENT_REVISION = new SimpleDateFormat("yyyy-MM-dd").parse("2014-07-08");
+ NETCONF_BASE_QNAME = QName.create(QNameModule.create(new URI(NETCONF_BASE), null), NETCONF_BASE_PAYLOAD_NAME );
} catch (ParseException e) {
throw new RestconfDocumentedException(
"It wasn't possible to convert revision date of sal-remote-augment to date", ErrorType.APPLICATION,
ErrorTag.OPERATION_FAILED);
+ } catch (URISyntaxException e) {
+ throw new RestconfDocumentedException(
+ "It wasn't possible to create instance of URI class with "+NETCONF_BASE+" URI", ErrorType.APPLICATION,
+ ErrorTag.OPERATION_FAILED);
}
}
validateInput(iiWithData.getSchemaNode(), payload);
DOMMountPoint mountPoint = iiWithData.getMountPoint();
+ validateTopLevelNodeName(payload, iiWithData.getInstanceIdentifier());
final CompositeNode value = this.normalizeNode(payload, iiWithData.getSchemaNode(), mountPoint);
validateListKeysEqualityInPayloadAndUri(iiWithData, value);
final NormalizedNode<?, ?> datastoreNormalizedNode = compositeNodeToDatastoreNormalizedNode(value,
iiWithData.getSchemaNode());
+
YangInstanceIdentifier normalizedII;
if (mountPoint != null) {
normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(
return Response.status(Status.OK).build();
}
+ private void validateTopLevelNodeName(final Node<?> node,
+ final YangInstanceIdentifier identifier) {
+ final String payloadName = getName(node);
+ final Iterator<PathArgument> pathArguments = identifier.getReversePathArguments().iterator();
+
+ //no arguments
+ if (!pathArguments.hasNext()) {
+ //no "data" payload
+ if (!node.getNodeType().equals(NETCONF_BASE_QNAME)) {
+ throw new RestconfDocumentedException("Instance identifier has to contain at least one path argument",
+ ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE);
+ }
+ //any arguments
+ } else {
+ final String identifierName = pathArguments.next().getNodeType().getLocalName();
+ if (!payloadName.equals(identifierName)) {
+ throw new RestconfDocumentedException("Payload name (" + payloadName
+ + ") is different from identifier name (" + identifierName + ")", ErrorType.PROTOCOL,
+ ErrorTag.MALFORMED_MESSAGE);
+ }
+ }
+ }
+
/**
* Validates whether keys in {@code payload} are equal to values of keys in {@code iiWithData} for list schema node
*
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-config</artifactId>
+ <packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/20-clustering-test-app.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/modules.conf</file>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <configuration>
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider">
+ prefix:clustering-it-provider
+ </type>
+ <name>clustering-it-provider</name>
+
+ <rpc-registry>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+ <name>binding-rpc-broker</name>
+ </rpc-registry>
+ <data-broker>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+ <name>binding-data-broker</name>
+ </data-broker>
+ <notification-service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
+ binding:binding-notification-service
+ </type>
+ <name>binding-notification-broker</name>
+ </notification-service>
+ </module>
+ </modules>
+ </data>
+
+ </configuration>
+
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider?module=clustering-it-provider&revision=2014-08-19</capability>
+
+ </required-capabilities>
+
+</snapshot>
+
--- /dev/null
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+# name = "<friendly_name_of_the_module>"
+# shards = [
+# {
+# name="<any_name_that_is_unique_for_the_module>"
+# replicas = [
+# "<name_of_member_on_which_to_run>"
+# ]
+# ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+ {
+ name = "default"
+ shards = [
+ {
+ name="default"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "topology"
+ shards = [
+ {
+ name="topology"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "inventory"
+ shards = [
+ {
+ name="inventory"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ },
+ {
+ name = "toaster"
+ shards = [
+ {
+ name="toaster"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car"
+ shards = [
+ {
+ name="car"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "people"
+ shards = [
+ {
+ name="people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+ {
+ name = "car-people"
+ shards = [
+ {
+ name="car-people"
+ replicas = [
+ "member-1",
+ "member-2",
+ "member-3"
+ ]
+ }
+ ]
+ }
+
+]
--- /dev/null
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+# name = "<friendly_name_of_module>"
+# namespace = "<the yang namespace of the module>"
+# shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+ {
+ name = "inventory"
+ namespace = "urn:opendaylight:inventory"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "topology"
+ namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+ shard-strategy = "module"
+ },
+
+ {
+ name = "toaster"
+ namespace = "http://netconfcentral.org/ns/toaster"
+ shard-strategy = "module"
+ },
+ {
+ name = "car"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+ shard-strategy = "module"
+ }
+ {
+ name = "people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+ shard-strategy = "module"
+ }
+
+ {
+ name = "car-people"
+ namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+ shard-strategy = "module"
+ }
+]
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-model</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>org.opendaylight.controller.sal-clustering-it-model</Bundle-Name>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/generated-sources/sal</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <type>jar</type>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!--This plugin's configuration is used to store Eclipse
+ m2e settings only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore />
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-binding</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ <version>${ietf-inet-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ <version>${ietf-yang-types.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ <version>${yang-ext.version}</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+module car-people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "c"; revision-date 2014-08-18; }
+ import people { prefix "people"; revision-date 2014-08-18; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ container car-people {
+ description
+ "Top-level container for all people car map";
+
+ list car-person {
+ key "car-id person-id";
+ description "A mapping of cars and people.";
+ leaf car-id {
+ type c:car-id;
+ }
+
+ leaf person-id {
+ type people:person-id;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car-purchase {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
+
+ prefix cp;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+ import car { prefix "car"; revision-date 2014-08-18; }
+ import people { prefix "person"; revision-date 2014-08-18; }
+ import yang-ext {prefix "ext"; revision-date "2013-07-09";}
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car purchase for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ rpc buy-car {
+ description
+ "buy a new car";
+ input {
+ leaf person {
+ ext:context-reference "person:person-context";
+ type person:person-ref;
+ description "A reference to a particular person.";
+ }
+
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+ }
+
+ notification carBought {
+ description
+ "Indicates that a person bought a car.";
+ leaf car-id {
+ type car:car-id;
+ description "identifier of car.";
+ }
+ leaf person-id {
+ type person:person-id;
+ description "identifier of person.";
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module car {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car";
+
+ prefix car;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for car for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef car-id {
+ type inet:uri;
+ description "An identifier for car entry.";
+ }
+
+ grouping car-entry {
+ description "Describes the contents of a car entry -
+ Details of the car manufacturer, model etc";
+ leaf id {
+ type car-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf model {
+ type string;
+ }
+ leaf manufacturer {
+ type string;
+ }
+
+ leaf year {
+ type uint32;
+ }
+
+ leaf category {
+ type string;
+ }
+ }
+
+ container cars {
+ description
+ "Top-level container for all car objects.";
+ list car-entry {
+ key "id";
+ description "A list of cars (as defined by the 'grouping car-entry').";
+ uses car-entry;
+ }
+ }
+
+
+}
\ No newline at end of file
--- /dev/null
+module people {
+
+ yang-version 1;
+
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people";
+
+ prefix people;
+
+ import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+
+ organization "Netconf Central";
+
+ contact
+ "Harman Singh <harmasin@cisco.com>";
+
+ description
+ "YANG model for person for test application";
+
+ revision "2014-08-18" {
+ description
+ "Clustering sample app";
+ }
+
+ typedef person-id {
+ type inet:uri;
+ description "An identifier for person.";
+ }
+
+ typedef person-ref {
+ type instance-identifier;
+ description "A reference that points to an people:people/person in the data tree.";
+ }
+ identity person-context {
+ description "A person-context is a classifier for person elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
+ }
+
+ grouping person {
+ description "Describes the details of the person";
+
+ leaf id {
+ type person-id;
+ description "identifier of single list of entries.";
+ }
+
+ leaf gender {
+ type string;
+ }
+
+ leaf age {
+ type uint32;
+ }
+
+ leaf address {
+ type string;
+ }
+
+ leaf contactNo {
+ type string;
+ }
+ }
+
+ container people {
+ description
+ "Top-level container for all people";
+
+ list person {
+ key "id";
+ description "A list of people (as defined by the 'grouping person').";
+ uses person;
+ }
+ }
+
+ rpc add-person {
+ description
+ "Add a person entry into database";
+ input {
+ uses person;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sal-samples</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it</artifactId>
+ <packaging>pom</packaging>
+ <modules>
+ <module>configuration</module>
+ <module>model</module>
+ <module>provider</module>
+ </modules>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>clustering-it</artifactId>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <version>1.1-SNAPSHOT</version>
+ </parent>
+ <artifactId>clustering-it-provider</artifactId>
+ <packaging>bundle</packaging>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>org.opendaylight.controller.config.yang.config.clustering_it_provider</Export-Package>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ <executions>
+ <execution>
+ <id>config</id>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+ <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>maven-sal-api-gen-plugin</artifactId>
+ <version>${yangtools.version}</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-util</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>equinoxSDK381</groupId>
+ <artifactId>org.eclipse.osgi</artifactId>
+ <version>3.8.1.v20120830-144521</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.listener;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class PeopleCarListener implements CarPurchaseListener {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleCarListener.class);
+
+ private DataBroker dataProvider;
+
+
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+ @Override
+ public void onCarBought(CarBought notification) {
+ log.info("onCarBought notification : Adding car person entry");
+
+ final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
+ carPersonBuilder.setCarId(notification.getCarId());
+ carPersonBuilder.setPersonId(notification.getPersonId());
+ CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
+ carPersonBuilder.setKey(key);
+ final CarPerson carPerson = carPersonBuilder.build();
+
+ InstanceIdentifier<CarPerson> carPersonIId =
+ InstanceIdentifier.<CarPeople>builder(CarPeople.class).child(CarPerson.class, carPerson.getKey()).build();
+
+
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("Car bought, entry added to map of people and car [{}]", carPerson);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("Car bought, Failed entry addition to map of people and car [{}]", carPerson);
+ }
+ });
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+public class PeopleProvider implements PeopleService, AutoCloseable {
+
+ private static final Logger log = LoggerFactory.getLogger(PeopleProvider.class);
+
+ private DataBroker dataProvider;
+
+ private BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration;
+
+ public void setDataProvider(final DataBroker salDataProvider) {
+ this.dataProvider = salDataProvider;
+ }
+
+
+ public void setRpcRegistration(BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration) {
+ this.rpcRegistration = rpcRegistration;
+ }
+
+ @Override
+ public Future<RpcResult<Void>> addPerson(AddPersonInput input) {
+ log.info("RPC addPerson : adding person [{}]", input);
+
+ PersonBuilder builder = new PersonBuilder(input);
+ final Person person = builder.build();
+ final SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+
+ // Each entry will be identifiable by a unique key, we have to create that identifier
+ final InstanceIdentifier.InstanceIdentifierBuilder<Person> personIdBuilder =
+ InstanceIdentifier.<People>builder(People.class)
+ .child(Person.class, person.getKey());
+ final InstanceIdentifier personId = personIdBuilder.build();
+ // Place entry in data store tree
+ WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, personId, person);
+
+ Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ log.info("RPC addPerson : person added successfully [{}]", person);
+ rpcRegistration.registerPath(PersonContext.class, personId);
+ log.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ log.info("RPC addPerson : person addition failed [{}]", person);
+ futureResult.set(RpcResultBuilder.<Void>failed()
+ .withError(RpcError.ErrorType.APPLICATION, t.getMessage()).build());
+ }
+ });
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+
+public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable{
+
+ private static final Logger log = LoggerFactory.getLogger(PurchaseCarProvider.class);
+
+ private NotificationProviderService notificationProvider;
+
+
+ public void setNotificationProvider(final NotificationProviderService salService) {
+ this.notificationProvider = salService;
+ }
+
+
+ @Override
+ public Future<RpcResult<Void>> buyCar(BuyCarInput input) {
+ log.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+ SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+ CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder();
+ carBoughtBuilder.setCarId(input.getCarId());
+ carBoughtBuilder.setPersonId(input.getPersonId());
+ notificationProvider.publish(carBoughtBuilder.build());
+ futureResult.set(RpcResultBuilder.<Void>success().build());
+ return futureResult;
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+
+
+import org.opendaylight.controller.clustering.it.listener.PeopleCarListener;
+import org.opendaylight.controller.clustering.it.provider.PeopleProvider;
+import org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+public class ClusteringItProviderModule extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModule {
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.clustering_it_provider.ClusteringItProviderModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ DataBroker dataBrokerService = getDataBrokerDependency();
+ NotificationProviderService notificationProvider = getNotificationServiceDependency();
+
+ // Add routed RPC registration for car purchase
+ final PurchaseCarProvider purchaseCar = new PurchaseCarProvider();
+ purchaseCar.setNotificationProvider(notificationProvider);
+
+ final BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> purchaseCarRpc = getRpcRegistryDependency()
+ .addRoutedRpcImplementation(CarPurchaseService.class, purchaseCar);
+
+ // Add people provider registration
+ final PeopleProvider people = new PeopleProvider();
+ people.setDataProvider(dataBrokerService);
+
+ people.setRpcRegistration(purchaseCarRpc);
+
+ final BindingAwareBroker.RpcRegistration<PeopleService> peopleRpcReg = getRpcRegistryDependency()
+ .addRpcImplementation(PeopleService.class, people);
+
+
+
+ final PeopleCarListener peopleCarListener = new PeopleCarListener();
+ peopleCarListener.setDataProvider(dataBrokerService);
+
+ final ListenerRegistration<NotificationListener> listenerReg =
+ getNotificationServiceDependency().registerNotificationListener( peopleCarListener );
+
+ // Wrap toaster as AutoCloseable and close registrations to md-sal at
+ // close()
+ final class AutoCloseableToaster implements AutoCloseable {
+
+ @Override
+ public void close() throws Exception {
+ peopleRpcReg.close();
+ purchaseCarRpc.close();
+ people.close();
+ purchaseCar.close();
+ listenerReg.close();
+ }
+ }
+
+ AutoCloseable ret = new AutoCloseableToaster();
+ return ret;
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: clustering-it-provider yang module local name: clustering-it-provider
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Aug 19 14:44:46 PDT 2014
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+public class ClusteringItProviderModuleFactory extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModuleFactory {
+
+}
--- /dev/null
+module clustering-it-provider {
+
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider";
+ prefix "clustering-it-provider";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; }
+
+ description
+ "This module contains the base YANG definitions for
+ clustering-it-provider implementation.";
+
+ revision "2014-08-19" {
+ description
+ "Initial revision.";
+ }
+
+ // This is the definition of the service implementation as a module identity.
+ identity clustering-it-provider {
+ base config:module-type;
+
+ // Specifies the prefix for generated java classes.
+ config:java-name-prefix ClusteringItProvider;
+ }
+
+ // Augments the 'configuration' choice node under modules/module.
+ augment "/config:modules/config:module/config:configuration" {
+ case clustering-it-provider {
+ when "/config:modules/config:module/config:type = 'clustering-it-provider'";
+
+ container rpc-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-rpc-registry;
+ }
+ }
+ }
+
+ container notification-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity mdsal:binding-notification-service;
+ }
+ }
+ }
+
+ container data-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity mdsal:binding-async-data-broker;
+ }
+ }
+ }
+ }
+ }
+}
<module>toaster-provider</module>
<module>toaster-config</module>
<module>l2switch</module>
+ <module>clustering-test-app</module>
</modules>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<artifactId>org.osgi.core</artifactId>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
-
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
- private final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
private final InstanceIdentifier<Topology> topology;
private final OperationProcessor processor;
- FlowCapableTopologyExporter(final OperationProcessor processor, final InstanceIdentifier<Topology> topology) {
+ FlowCapableTopologyExporter(final OperationProcessor processor,
+ final InstanceIdentifier<Topology> topology) {
this.processor = Preconditions.checkNotNull(processor);
this.topology = Preconditions.checkNotNull(topology);
}
processor.enqueueOperation(new TopologyOperation() {
@Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- removeAffectedLinks(nodeId);
+ public void applyOperation(ReadWriteTransaction transaction) {
+ removeAffectedLinks(nodeId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
}
- });
- processor.enqueueOperation(new TopologyOperation() {
@Override
- public void applyOperation(ReadWriteTransaction transaction) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+ public String toString() {
+ return "onNodeRemoved";
}
});
}
final InstanceIdentifier<Node> path = getNodePath(toTopologyNodeId(notification.getId()));
transaction.merge(LogicalDatastoreType.OPERATIONAL, path, node, true);
}
+
+ @Override
+ public String toString() {
+ return "onNodeUpdated";
+ }
});
}
}
@Override
public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
- final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(notification
- .getNodeConnectorRef());
+ final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(
+ notification.getNodeConnectorRef());
- processor.enqueueOperation(new TopologyOperation() {
- @Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- final TpId tpId = toTerminationPointId(getNodeConnectorKey(notification.getNodeConnectorRef()).getId());
- removeAffectedLinks(tpId);
- }
- });
+ final TpId tpId = toTerminationPointId(getNodeConnectorKey(
+ notification.getNodeConnectorRef()).getId());
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(ReadWriteTransaction transaction) {
+ removeAffectedLinks(tpId, transaction);
transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
}
+
+ @Override
+ public String toString() {
+ return "onNodeConnectorRemoved";
+ }
});
}
@Override
public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
- final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(FlowCapableNodeConnectorUpdated.class);
+ final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(
+ FlowCapableNodeConnectorUpdated.class);
if (fcncu != null) {
processor.enqueueOperation(new TopologyOperation() {
@Override
transaction.merge(LogicalDatastoreType.OPERATIONAL, path, point, true);
if ((fcncu.getState() != null && fcncu.getState().isLinkDown())
|| (fcncu.getConfiguration() != null && fcncu.getConfiguration().isPORTDOWN())) {
- removeAffectedLinks(point.getTpId());
+ removeAffectedLinks(point.getTpId(), transaction);
}
}
+
+ @Override
+ public String toString() {
+ return "onNodeConnectorUpdated";
+ }
});
}
}
final InstanceIdentifier<Link> path = linkPath(link);
transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
}
+
+ @Override
+ public String toString() {
+ return "onLinkDiscovered";
+ }
});
}
public void applyOperation(final ReadWriteTransaction transaction) {
transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
}
+
+ @Override
+ public String toString() {
+ return "onLinkRemoved";
+ }
});
}
return tpPath(toTopologyNodeId(invNodeKey.getId()), toTerminationPointId(invNodeConnectorKey.getId()));
}
- private void removeAffectedLinks(final NodeId id) {
- processor.enqueueOperation(new TopologyOperation() {
+ private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
+ CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+ transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+ Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
@Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- if (topologyOptional.isPresent()) {
- List<Link> linkList = topologyOptional.get().getLink() != null
- ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
- for (Link link : linkList) {
- if (id.equals(link.getSource().getSourceNode()) || id.equals(link.getDestination().getDestNode())) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
- }
- }
- }
- }
+ public void onSuccess(Optional<Topology> topologyOptional) {
+ removeAffectedLinks(id, topologyOptional);
+ }
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
- }
- });
+ @Override
+ public void onFailure(Throwable throwable) {
+ LOG.error("Error reading topology data for topology {}", topology, throwable);
}
});
}
- private void removeAffectedLinks(final TpId id) {
- processor.enqueueOperation(new TopologyOperation() {
- @Override
- public void applyOperation(final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- if (topologyOptional.isPresent()) {
- List<Link> linkList = topologyOptional.get().getLink() != null
- ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
- for (Link link : linkList) {
- if (id.equals(link.getSource().getSourceTp()) || id.equals(link.getDestination().getDestTp())) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
- }
- }
- }
- }
+ private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional) {
+ if (!topologyOptional.isPresent()) {
+ return;
+ }
+
+ List<Link> linkList = topologyOptional.get().getLink() != null ?
+ topologyOptional.get().getLink() : Collections.<Link> emptyList();
+ final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+ for (Link link : linkList) {
+ if (id.equals(link.getSource().getSourceNode()) ||
+ id.equals(link.getDestination().getDestNode())) {
+ linkIDsToDelete.add(linkPath(link));
+ }
+ }
+
+ enqueueLinkDeletes(linkIDsToDelete);
+ }
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
+ private void enqueueLinkDeletes(final Collection<InstanceIdentifier<Link>> linkIDsToDelete) {
+ if(!linkIDsToDelete.isEmpty()) {
+ processor.enqueueOperation(new TopologyOperation() {
+ @Override
+ public void applyOperation(ReadWriteTransaction transaction) {
+ for(InstanceIdentifier<Link> linkID: linkIDsToDelete) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID);
}
- });
+ }
+
+ @Override
+ public String toString() {
+ return "Delete Links " + linkIDsToDelete.size();
+ }
+ });
+ }
+ }
+
+ private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
+ CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+ transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+ Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
+ @Override
+ public void onSuccess(Optional<Topology> topologyOptional) {
+ removeAffectedLinks(id, topologyOptional);
+ }
+
+ @Override
+ public void onFailure(Throwable throwable) {
+ LOG.error("Error reading topology data for topology {}", topology, throwable);
}
});
}
+ private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional) {
+ if (!topologyOptional.isPresent()) {
+ return;
+ }
+
+ List<Link> linkList = topologyOptional.get().getLink() != null
+ ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
+ final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+ for (Link link : linkList) {
+ if (id.equals(link.getSource().getSourceTp()) ||
+ id.equals(link.getDestination().getDestTp())) {
+ linkIDsToDelete.add(linkPath(link));
+ }
+ }
+
+ enqueueLinkDeletes(linkIDsToDelete);
+ }
+
private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
return topology.child(Node.class, new NodeKey(nodeId));
}
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
+
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
+
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
for (; ; ) {
TopologyOperation op = queue.take();
- LOG.debug("New operations available, starting transaction");
- final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
+ LOG.debug("New {} operation available, starting transaction", op);
+ final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
int ops = 0;
do {
} else {
op = null;
}
+
+ LOG.debug("Next operation {}", op);
} while (op != null);
LOG.debug("Processed {} operations, submitting transaction", ops);
- final CheckedFuture txResultFuture = tx.submit();
- Futures.addCallback(txResultFuture, new FutureCallback() {
+ CheckedFuture<Void, TransactionCommitFailedException> txResultFuture = tx.submit();
+ Futures.addCallback(txResultFuture, new FutureCallback<Void>() {
@Override
- public void onSuccess(Object o) {
+ public void onSuccess(Void notUsed) {
LOG.debug("Topology export successful for tx :{}", tx.getIdentifier());
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.md.controller.topology.manager;
+
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.StateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+public class FlowCapableTopologyExporterTest {
+
+ @Mock
+ private DataBroker mockDataBroker;
+
+ @Mock
+ private BindingTransactionChain mockTxChain;
+
+ private OperationProcessor processor;
+
+ private FlowCapableTopologyExporter exporter;
+
+ private InstanceIdentifier<Topology> topologyIID;
+
+ private final ExecutorService executor = Executors.newFixedThreadPool(1);
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.initMocks(this);
+
+ doReturn(mockTxChain).when(mockDataBroker)
+ .createTransactionChain(any(TransactionChainListener.class));
+
+ processor = new OperationProcessor(mockDataBroker);
+
+ topologyIID = InstanceIdentifier.create(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(new TopologyId("test")));
+ exporter = new FlowCapableTopologyExporter(processor, topologyIID);
+
+ executor.execute(processor);
+ }
+
+ @After
+ public void tearDown() {
+ executor.shutdownNow();
+ }
+
+ @SuppressWarnings({ "rawtypes" })
+ @Test
+ public void testOnNodeRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ List<Link> linkList = Arrays.asList(
+ newLink("link1", newSourceNode("node1"), newDestNode("dest")),
+ newLink("link2", newSourceNode("source"), newDestNode("node1")),
+ newLink("link2", newSourceNode("source2"), newDestNode("dest2")));
+ final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Link.class, linkList.get(0).getKey()),
+ topologyIID.child(Link.class, linkList.get(1).getKey()),
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ };
+
+ SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+ doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+ CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+ int expDeleteCalls = expDeletedIIDs.length;
+ CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+ ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+ setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+ CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+ doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+ waitForSubmit(submitLatch1);
+
+ setReadFutureAsync(topology, readFuture);
+
+ waitForDeletes(expDeleteCalls, deleteLatch);
+
+ waitForSubmit(submitLatch2);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+ verifyMockTx(mockTx1);
+ verifyMockTx(mockTx2);
+ }
+
+ @SuppressWarnings({ "rawtypes" })
+ @Test
+ public void testOnNodeRemovedWithNoTopology() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ };
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+ waitForSubmit(submitLatch);
+
+ waitForDeletes(1, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(
+ newLink("link1", newSourceTp("tp1"), newDestTp("dest")),
+ newLink("link2", newSourceTp("source"), newDestTp("tp1")),
+ newLink("link3", newSourceTp("source2"), newDestTp("dest2")));
+ final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Link.class, linkList.get(0).getKey()),
+ topologyIID.child(Link.class, linkList.get(1).getKey()),
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+ };
+
+ final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+ doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+ CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+ int expDeleteCalls = expDeletedIIDs.length;
+ CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+ ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+ setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+ CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+ doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).build());
+
+ waitForSubmit(submitLatch1);
+
+ setReadFutureAsync(topology, readFuture);
+
+ waitForDeletes(expDeleteCalls, deleteLatch);
+
+ waitForSubmit(submitLatch2);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+ verifyMockTx(mockTx1);
+ verifyMockTx(mockTx2);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorRemovedWithNoTopology() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ InstanceIdentifier[] expDeletedIIDs = {
+ topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+ .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+ };
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ waitForDeletes(1, deleteLatch);
+
+ assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+ }
+
+ @Test
+ public void testOnNodeUpdated() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+ InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeUpdated(new NodeUpdatedBuilder().setNodeRef(new NodeRef(invNodeID))
+ .setId(nodeKey.getId()).addAugmentation(FlowCapableNodeUpdated.class,
+ new FlowCapableNodeUpdatedBuilder().build()).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<Node> mergedNode = ArgumentCaptor.forClass(Node.class);
+ NodeId expNodeId = new NodeId("node1");
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(Node.class,
+ new NodeKey(expNodeId))), mergedNode.capture(), eq(true));
+ assertEquals("getNodeId", expNodeId, mergedNode.getValue().getNodeId());
+ InventoryNode augmentation = mergedNode.getValue().getAugmentation(InventoryNode.class);
+ assertNotNull("Missing augmentation", augmentation);
+ assertEquals("getInventoryNodeRef", new NodeRef(invNodeID), augmentation.getInventoryNodeRef());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdated() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().build()).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<TerminationPoint> mergedNode = ArgumentCaptor.forClass(TerminationPoint.class);
+ NodeId expNodeId = new NodeId("node1");
+ TpId expTpId = new TpId("tp1");
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(expNodeId)).child(TerminationPoint.class,
+ new TerminationPointKey(expTpId));
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ mergedNode.capture(), eq(true));
+ assertEquals("getTpId", expTpId, mergedNode.getValue().getTpId());
+ InventoryNodeConnector augmentation = mergedNode.getValue().getAugmentation(
+ InventoryNodeConnector.class);
+ assertNotNull("Missing augmentation", augmentation);
+ assertEquals("getInventoryNodeConnectorRef", new NodeConnectorRef(invNodeConnID),
+ augmentation.getInventoryNodeConnectorRef());
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdatedWithLinkStateDown() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+ Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().setState(
+ new StateBuilder().setLinkDown(true).build()).build()).build());
+
+ waitForDeletes(1, deleteLatch);
+
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+ new TerminationPointKey(new TpId("tp1")));
+
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ any(TerminationPoint.class), eq(true));
+
+ assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+ linkList.get(0).getKey())}, deletedLinkIDs);
+ }
+
+
+ @SuppressWarnings("rawtypes")
+ @Test
+ public void testOnNodeConnectorUpdatedWithPortDown() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ nodeKey = newInvNodeKey("node1");
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+ newInvNodeConnKey("tp1");
+
+ InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+ List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+ Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ setupStubbedSubmit(mockTx);
+
+ CountDownLatch deleteLatch = new CountDownLatch(1);
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+ ArgumentCaptor.forClass(InstanceIdentifier.class);
+ setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+ new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+ FlowCapableNodeConnectorUpdated.class,
+ new FlowCapableNodeConnectorUpdatedBuilder().setConfiguration(
+ new PortConfig(true, true, true, true)).build()).build());
+
+ waitForDeletes(1, deleteLatch);
+
+ InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+ Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+ new TerminationPointKey(new TpId("tp1")));
+
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+ any(TerminationPoint.class), eq(true));
+
+ assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+ linkList.get(0).getKey())}, deletedLinkIDs);
+ }
+
+ @Test
+ public void testOnLinkDiscovered() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onLinkDiscovered(new LinkDiscoveredBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
+ verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+ mergedNode.capture(), eq(true));
+ assertEquals("Source node ID", "sourceNode",
+ mergedNode.getValue().getSource().getSourceNode().getValue());
+ assertEquals("Dest TP ID", "sourceTP",
+ mergedNode.getValue().getSource().getSourceTp().getValue());
+ assertEquals("Dest node ID", "destNode",
+ mergedNode.getValue().getDestination().getDestNode().getValue());
+ assertEquals("Dest TP ID", "destTP",
+ mergedNode.getValue().getDestination().getDestTp().getValue());
+ }
+
+ @Test
+ public void testOnLinkRemoved() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+ exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ verify(mockTx).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+ }
+
+ private void verifyMockTx(ReadWriteTransaction mockTx) {
+ InOrder inOrder = inOrder(mockTx);
+ inOrder.verify(mockTx, atLeast(0)).submit();
+ inOrder.verify(mockTx, never()).delete(eq(LogicalDatastoreType.OPERATIONAL),
+ any(InstanceIdentifier.class));
+ }
+
+ @SuppressWarnings("rawtypes")
+ private void assertDeletedIDs(InstanceIdentifier[] expDeletedIIDs,
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs) {
+ Set<InstanceIdentifier> actualIIDs = new HashSet<>(deletedLinkIDs.getAllValues());
+ for(InstanceIdentifier id: expDeletedIIDs) {
+ assertTrue("Missing expected deleted IID " + id, actualIIDs.contains(id));
+ }
+ }
+
+ private void setReadFutureAsync(final Topology topology,
+ final SettableFuture<Optional<Topology>> readFuture) {
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ readFuture.set(Optional.of(topology));
+ }
+
+ }.start();
+ }
+
+ private void waitForSubmit(CountDownLatch latch) {
+ assertEquals("Transaction submitted", true,
+ Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+ }
+
+ private void waitForDeletes(int expDeleteCalls, final CountDownLatch latch) {
+ boolean done = Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS);
+ if(!done) {
+ fail("Expected " + expDeleteCalls + " delete calls. Actual: " +
+ (expDeleteCalls - latch.getCount()));
+ }
+ }
+
+ private CountDownLatch setupStubbedSubmit(ReadWriteTransaction mockTx) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ doAnswer(new Answer<CheckedFuture<Void, TransactionCommitFailedException>>() {
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> answer(
+ InvocationOnMock invocation) {
+ latch.countDown();
+ return Futures.immediateCheckedFuture(null);
+ }
+ }).when(mockTx).submit();
+
+ return latch;
+ }
+
+ @SuppressWarnings("rawtypes")
+ private void setupStubbedDeletes(ReadWriteTransaction mockTx,
+ ArgumentCaptor<InstanceIdentifier> deletedLinkIDs, final CountDownLatch latch) {
+ doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) {
+ latch.countDown();
+ return null;
+ }
+ }).when(mockTx).delete(eq(LogicalDatastoreType.OPERATIONAL), deletedLinkIDs.capture());
+ }
+
+ private org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ newInvNodeKey(String id) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey =
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+ rev130819.NodeId(id));
+ return nodeKey;
+ }
+
+ private NodeConnectorKey newInvNodeConnKey(String id) {
+ return new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.
+ NodeConnectorId(id));
+ }
+
+ private KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> newNodeConnID(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey) {
+ return InstanceIdentifier.create(Nodes.class).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+ nodeKey).child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+ rev130819.node.NodeConnector.class, ncKey);
+ }
+
+ private Link newLink(String id, Source source, Destination dest) {
+ return new LinkBuilder().setLinkId(new LinkId(id))
+ .setSource(source).setDestination(dest).build();
+ }
+
+ private Destination newDestTp(String id) {
+ return new DestinationBuilder().setDestTp(new TpId(id)).build();
+ }
+
+ private Source newSourceTp(String id) {
+ return new SourceBuilder().setSourceTp(new TpId(id)).build();
+ }
+
+ private Destination newDestNode(String id) {
+ return new DestinationBuilder().setDestNode(new NodeId(id)).build();
+ }
+
+ private Source newSourceNode(String id) {
+ return new SourceBuilder().setSourceNode(new NodeId(id)).build();
+ }
+}
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import java.util.List;
import java.util.Map;
public static String checkPrefixAndExtractServiceName(XmlElement typeElement, Map.Entry<String, String> prefixNamespace) throws NetconfDocumentedException {
String serviceName = typeElement.getTextContent();
// FIXME: comparing Entry with String:
- Preconditions.checkState(!prefixNamespace.equals(""), "Service %s value not prefixed with namespace",
+ Preconditions.checkState(!Strings.isNullOrEmpty(prefixNamespace.getKey()), "Service %s value not prefixed with namespace",
XmlNetconfConstants.TYPE_KEY);
String prefix = prefixNamespace.getKey() + PREFIX_SEPARATOR;
Preconditions.checkState(serviceName.startsWith(prefix),
Date revision = null;
Map<Date, EditConfig.IdentityMapping> revisions = identityMap.get(namespace);
if(revisions.keySet().size() > 1) {
- for (Date date : revisions.keySet()) {
- if(revisions.get(date).containsIdName(localName)) {
+ for (Map.Entry<Date, EditConfig.IdentityMapping> revisionToIdentityEntry : revisions.entrySet()) {
+ if(revisionToIdentityEntry.getValue().containsIdName(localName)) {
Preconditions.checkState(revision == null, "Duplicate identity %s, in namespace %s, with revisions: %s, %s detected. Cannot map attribute",
- localName, namespace, revision, date);
- revision = date;
+ localName, namespace, revision, revisionToIdentityEntry.getKey());
+ revision = revisionToIdentityEntry.getKey();
}
}
} else {
Map<String, Map<String, Collection<ObjectName>>> retVal = Maps.newLinkedHashMap();
- for (String namespace : configs.keySet()) {
+ for (Entry<String, Map<String, ModuleConfig>> namespaceToModuleToConfigEntry : configs.entrySet()) {
Map<String, Collection<ObjectName>> innerRetVal = Maps.newHashMap();
- for (Entry<String, ModuleConfig> mbeEntry : configs.get(namespace).entrySet()) {
+ for (Entry<String, ModuleConfig> mbeEntry : namespaceToModuleToConfigEntry.getValue().entrySet()) {
String moduleName = mbeEntry.getKey();
Collection<ObjectName> instances = moduleToInstances.get(moduleName);
}
- retVal.put(namespace, innerRetVal);
+ retVal.put(namespaceToModuleToConfigEntry.getKey(), innerRetVal);
}
return retVal;
}
Element modulesElement = XmlUtil.createElement(document, XmlNetconfConstants.MODULES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
dataElement.appendChild(modulesElement);
- for (String moduleNamespace : moduleToInstances.keySet()) {
- for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstances.get(moduleNamespace)
+ for (Entry<String, Map<String, Collection<ObjectName>>> moduleToInstanceEntry : moduleToInstances.entrySet()) {
+ for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstanceEntry.getValue()
.entrySet()) {
- ModuleConfig mapping = moduleConfigs.get(moduleNamespace).get(moduleMappingEntry.getKey());
+ ModuleConfig mapping = moduleConfigs.get(moduleToInstanceEntry.getKey()).get(moduleMappingEntry.getKey());
if (moduleMappingEntry.getValue().isEmpty()) {
continue;
}
for (ObjectName objectName : moduleMappingEntry.getValue()) {
- modulesElement.appendChild(mapping.toXml(objectName, document, moduleNamespace));
+ modulesElement.appendChild(mapping.toXml(objectName, document, moduleToInstanceEntry.getKey()));
}
}
this.configServiceRefRegistry = configServiceRefRegistry;
}
-
public ObjectName getByServiceAndRefName(String namespace, String serviceName, String refName) {
Map<String, Map<String, String>> serviceNameToRefNameToInstance = getMappedServices().get(namespace);
Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
Map<String, Map<String, ObjectName>> serviceMapping = configServiceRefRegistry.getServiceMapping();
- for (String serviceQName : serviceMapping.keySet()){
- for (String refName : serviceMapping.get(serviceQName).keySet()) {
+ for (Map.Entry<String, Map<String, ObjectName>> qNameToRefNameEntry : serviceMapping.entrySet()){
+ for (String refName : qNameToRefNameEntry.getValue().keySet()) {
- ObjectName on = serviceMapping.get(serviceQName).get(refName);
+ ObjectName on = qNameToRefNameEntry.getValue().get(refName);
Services.ServiceInstance si = Services.ServiceInstance.fromObjectName(on);
- QName qname = QName.create(serviceQName);
+ QName qname = QName.create(qNameToRefNameEntry.getKey());
String namespace = qname.getNamespace().toString();
Map<String, Map<String, String>> serviceToRefs = retVal.get(namespace);
if(serviceToRefs==null) {
Element root = XmlUtil.createElement(document, XmlNetconfConstants.SERVICES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
Map<String, Map<String, Map<String, String>>> mappedServices = serviceRegistryWrapper.getMappedServices();
- for (String namespace : mappedServices.keySet()) {
+ for (Entry<String, Map<String, Map<String, String>>> namespaceToRefEntry : mappedServices.entrySet()) {
- for (Entry<String, Map<String, String>> serviceEntry : mappedServices.get(namespace).entrySet()) {
+ for (Entry<String, Map<String, String>> serviceEntry : namespaceToRefEntry.getValue().entrySet()) {
// service belongs to config.yang namespace
Element serviceElement = XmlUtil.createElement(document, SERVICE_KEY, Optional.<String>absent());
root.appendChild(serviceElement);
// type belongs to config.yang namespace
String serviceType = serviceEntry.getKey();
Element typeElement = XmlUtil.createTextElementWithNamespacedContent(document, XmlNetconfConstants.TYPE_KEY,
- XmlNetconfConstants.PREFIX, namespace, serviceType);
+ XmlNetconfConstants.PREFIX, namespaceToRefEntry.getKey(), serviceType);
serviceElement.appendChild(typeElement);
Map<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceNameToRefNameToInstance = services
.getNamespaceToServiceNameToRefNameToInstance();
- for (String serviceNamespace : namespaceToServiceNameToRefNameToInstance.keySet()) {
- for (String serviceName : namespaceToServiceNameToRefNameToInstance.get(serviceNamespace).keySet()) {
+ for (Map.Entry<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceToRefEntry : namespaceToServiceNameToRefNameToInstance.entrySet()) {
+ for (Map.Entry<String, Map<String, Services.ServiceInstance>> serviceToRefEntry : namespaceToServiceToRefEntry.getValue().entrySet()) {
- String qnameOfService = getQname(ta, serviceNamespace, serviceName);
- Map<String, Services.ServiceInstance> refNameToInstance = namespaceToServiceNameToRefNameToInstance
- .get(serviceNamespace).get(serviceName);
+ String qnameOfService = getQname(ta, namespaceToServiceToRefEntry.getKey(), serviceToRefEntry.getKey());
+ Map<String, Services.ServiceInstance> refNameToInstance = serviceToRefEntry.getValue();
- for (String refName : refNameToInstance.keySet()) {
- ObjectName on = refNameToInstance.get(refName).getObjectName(ta.getTransactionName());
+ for (Map.Entry<String, Services.ServiceInstance> refNameToServiceEntry : refNameToInstance.entrySet()) {
+ ObjectName on = refNameToServiceEntry.getValue().getObjectName(ta.getTransactionName());
try {
- ObjectName saved = ta.saveServiceReference(qnameOfService, refName, on);
+ ObjectName saved = ta.saveServiceReference(qnameOfService, refNameToServiceEntry.getKey(), on);
logger.debug("Saving service {} with on {} under name {} with service on {}", qnameOfService,
- on, refName, saved);
+ on, refNameToServiceEntry.getKey(), saved);
} catch (InstanceNotFoundException e) {
- throw new NetconfDocumentedException(String.format("Unable to save ref name " + refName + " for instance " + on, e),
+ throw new NetconfDocumentedException(String.format("Unable to save ref name " + refNameToServiceEntry.getKey() + " for instance " + on, e),
ErrorType.application,
ErrorTag.operation_failed,
ErrorSeverity.error);
Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
- for (String namespace : mBeanEntries.keySet()) {
- for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : mBeanEntries.get(namespace).entrySet()) {
+ for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleToMbe : mBeanEntries.entrySet()) {
+ for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : namespaceToModuleToMbe.getValue().entrySet()) {
String moduleName = moduleNameToMbe.getKey();
ModuleMXBeanEntry moduleMXBeanEntry = moduleNameToMbe.getValue();
ModuleConfig moduleConfig = new ModuleConfig(moduleName,
new InstanceConfig(configRegistryClient,moduleMXBeanEntry.getAttributes(), moduleMXBeanEntry.getNullableDummyContainerName()));
- Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespace);
+ Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespaceToModuleToMbe.getKey());
if(moduleNameToModuleConfig == null) {
moduleNameToModuleConfig = Maps.newHashMap();
- namespaceToModuleNameToModuleConfig.put(namespace, moduleNameToModuleConfig);
+ namespaceToModuleNameToModuleConfig.put(namespaceToModuleToMbe.getKey(), moduleNameToModuleConfig);
}
moduleNameToModuleConfig.put(moduleName, moduleConfig);
Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleRuntime>> retVal = Maps.newHashMap();
- for (String namespace : mBeanEntries.keySet()) {
+ for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
Map<String, ModuleRuntime> innerMap = Maps.newHashMap();
- Map<String, ModuleMXBeanEntry> entriesFromNamespace = mBeanEntries.get(namespace);
- for (String module : entriesFromNamespace.keySet()) {
+ Map<String, ModuleMXBeanEntry> entriesFromNamespace = namespaceToModuleEntry.getValue();
+ for (Map.Entry<String, ModuleMXBeanEntry> moduleToMXEntry : entriesFromNamespace.entrySet()) {
- ModuleMXBeanEntry mbe = entriesFromNamespace.get(module);
+ ModuleMXBeanEntry mbe = moduleToMXEntry.getValue();
Map<RuntimeBeanEntry, InstanceConfig> cache = Maps.newHashMap();
RuntimeBeanEntry root = null;
InstanceRuntime rootInstanceRuntime = createInstanceRuntime(root, cache);
ModuleRuntime moduleRuntime = new ModuleRuntime(rootInstanceRuntime);
- innerMap.put(module, moduleRuntime);
+ innerMap.put(moduleToMXEntry.getKey(), moduleRuntime);
}
- retVal.put(namespace, innerMap);
+ retVal.put(namespaceToModuleEntry.getKey(), innerMap);
}
return retVal;
}
final String[] signature = new String[attributes.size()];
int i = 0;
- for (final String attrName : attributes.keySet()) {
- final AttributeConfigElement attribute = attributes.get(attrName);
+ for (final AttributeConfigElement attribute : attributes.values()) {
final Optional<?> resolvedValueOpt = attribute.getResolvedValue();
params[i] = resolvedValueOpt.isPresent() ? resolvedValueOpt.get() : attribute.getResolvedDefaultValue();
final Map<String, Map<String, ModuleRpcs>> map = Maps.newHashMap();
- for (final String namespace : mBeanEntries.keySet()) {
+ for (final Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
- Map<String, ModuleRpcs> namespaceToModules = map.get(namespace);
+ Map<String, ModuleRpcs> namespaceToModules = map.get(namespaceToModuleEntry.getKey());
if (namespaceToModules == null) {
namespaceToModules = Maps.newHashMap();
- map.put(namespace, namespaceToModules);
+ map.put(namespaceToModuleEntry.getKey(), namespaceToModules);
}
- for (final String moduleName : mBeanEntries.get(namespace).keySet()) {
+ for (final Map.Entry<String, ModuleMXBeanEntry> moduleEntry : namespaceToModuleEntry.getValue().entrySet()) {
- ModuleRpcs rpcMapping = namespaceToModules.get(moduleName);
+ ModuleRpcs rpcMapping = namespaceToModules.get(moduleEntry.getKey());
if (rpcMapping == null) {
rpcMapping = new ModuleRpcs();
- namespaceToModules.put(moduleName, rpcMapping);
+ namespaceToModules.put(moduleEntry.getKey(), rpcMapping);
}
- final ModuleMXBeanEntry entry = mBeanEntries.get(namespace).get(moduleName);
+ final ModuleMXBeanEntry entry = moduleEntry.getValue();
for (final RuntimeBeanEntry runtimeEntry : entry.getRuntimeBeans()) {
rpcMapping.addNameMapping(runtimeEntry);
allOpenedTransactions.clear();
}
- public Optional<ObjectName> getTransaction() {
+ public synchronized Optional<ObjectName> getTransaction() {
if (transaction == null){
return Optional.absent();
private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterNotificationHandler.class);
private final MBeanServerConnection mBeanServerConnection;
- private final ConfigPersisterNotificationListener listener;
+ private final NotificationListener listener;
- public ConfigPersisterNotificationHandler(MBeanServerConnection mBeanServerConnection,
- Persister persisterAggregator) {
+ public ConfigPersisterNotificationHandler(final MBeanServerConnection mBeanServerConnection, final Persister persisterAggregator) {
+ this(mBeanServerConnection, new ConfigPersisterNotificationListener(persisterAggregator));
+ }
+
+ public ConfigPersisterNotificationHandler(final MBeanServerConnection mBeanServerConnection, final NotificationListener notificationListener) {
this.mBeanServerConnection = mBeanServerConnection;
- listener = new ConfigPersisterNotificationListener(persisterAggregator);
+ this.listener = notificationListener;
registerAsJMXListener(mBeanServerConnection, listener);
-
}
- private static void registerAsJMXListener(MBeanServerConnection mBeanServerConnection, ConfigPersisterNotificationListener listener) {
+ private static void registerAsJMXListener(final MBeanServerConnection mBeanServerConnection, final NotificationListener listener) {
logger.trace("Called registerAsJMXListener");
try {
mBeanServerConnection.addNotificationListener(DefaultCommitOperationMXBean.OBJECT_NAME, listener, null, null);
@Override
public synchronized void close() {
// unregister from JMX
- ObjectName on = DefaultCommitOperationMXBean.OBJECT_NAME;
+ final ObjectName on = DefaultCommitOperationMXBean.OBJECT_NAME;
try {
if (mBeanServerConnection.isRegistered(on)) {
mBeanServerConnection.removeNotificationListener(on, listener);
}
- } catch (Exception e) {
+ } catch (final Exception e) {
logger.warn("Unable to unregister {} as listener for {}", listener, on, e);
}
}
private final Persister persisterAggregator;
- ConfigPersisterNotificationListener(Persister persisterAggregator) {
+ ConfigPersisterNotificationListener(final Persister persisterAggregator) {
this.persisterAggregator = persisterAggregator;
}
@Override
- public void handleNotification(Notification notification, Object handback) {
+ public void handleNotification(final Notification notification, final Object handback) {
if (!(notification instanceof NetconfJMXNotification))
return;
if (notification instanceof CommitJMXNotification) {
try {
handleAfterCommitNotification((CommitJMXNotification) notification);
- } catch (Exception e) {
+ } catch (final Exception e) {
// log exceptions from notification Handler here since
// notificationBroadcastSupport logs only DEBUG level
logger.warn("Failed to handle notification {}", notification, e);
persisterAggregator.persistConfig(new CapabilityStrippingConfigSnapshotHolder(notification.getConfigSnapshot(),
notification.getCapabilities()));
logger.trace("Configuration persisted successfully");
- } catch (IOException e) {
+ } catch (final IOException e) {
throw new RuntimeException("Unable to persist configuration snapshot", e);
}
}
import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
-
+import javax.annotation.Nonnull;
import javax.annotation.concurrent.Immutable;
import javax.management.MBeanServerConnection;
-
import org.opendaylight.controller.config.api.ConflictingVersionException;
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
-
@Immutable
public class ConfigPusherImpl implements ConfigPusher {
private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationService serviceCandidate) {
Collection<String> actual = Collections2.transform(serviceCandidate.getCapabilities(), new Function<Capability, String>() {
@Override
- public String apply(Capability input) {
+ public String apply(@Nonnull final Capability input) {
return input.getCapabilityUri();
}
});
public void persistConfig(ConfigSnapshotHolder holder) throws IOException {
for (PersisterWithConfiguration persisterWithConfiguration: persisterWithConfigurations){
if (!persisterWithConfiguration.readOnly){
- logger.debug("Calling {}.persistConfig",persisterWithConfiguration.storage);
- persisterWithConfiguration.storage.persistConfig(holder);
+ logger.debug("Calling {}.persistConfig", persisterWithConfiguration.getStorage());
+ persisterWithConfiguration.getStorage().persistConfig(holder);
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.persist.impl;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import javax.management.MBeanServerConnection;
+
+import javax.management.NotificationFilter;
+import javax.management.NotificationListener;
+import javax.management.ObjectName;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.config.persist.api.Persister;
+
+public class ConfigPersisterNotificationHandlerTest {
+
+ @Mock
+ private MBeanServerConnection mBeanServer;
+ @Mock
+ private Persister notificationListener;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doNothing().when(mBeanServer).addNotificationListener(any(ObjectName.class), any(NotificationListener.class),
+ any(NotificationFilter.class), anyObject());
+ }
+
+ @Test
+ public void testNotificationHandler() throws Exception {
+ doReturn(true).when(mBeanServer).isRegistered(any(ObjectName.class));
+ doThrow(Exception.class).when(mBeanServer).removeNotificationListener(any(ObjectName.class), any(NotificationListener.class));
+
+ final ConfigPersisterNotificationHandler testedHandler = new ConfigPersisterNotificationHandler(mBeanServer, notificationListener);
+ verify(mBeanServer).addNotificationListener(any(ObjectName.class), any(NotificationListener.class),
+ any(NotificationFilter.class), anyObject());
+
+ testedHandler.close();
+ verify(mBeanServer).removeNotificationListener(any(ObjectName.class), any(NotificationListener.class));
+ }
+
+ @Test
+ public void testNotificationHandlerCloseNotRegistered() throws Exception {
+ doReturn(false).when(mBeanServer).isRegistered(any(ObjectName.class));
+
+ final ConfigPersisterNotificationHandler testedHandler = new ConfigPersisterNotificationHandler(mBeanServer, notificationListener);
+
+ testedHandler.close();
+ verify(mBeanServer, times(0)).removeNotificationListener(any(ObjectName.class), any(NotificationListener.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.persist.impl;
+
+import java.util.Collections;
+
+import javax.management.Notification;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.api.Persister;
+import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
+import org.opendaylight.controller.netconf.api.jmx.NetconfJMXNotification;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+
+import com.google.common.collect.Lists;
+
+public class ConfigPersisterNotificationListenerTest {
+
+ @Mock
+ private Persister mockPersister;
+ private PersisterAggregator persisterAggregator;
+
+ @Mock
+ private NetconfJMXNotification unknownNetconfNotif;
+ @Mock
+ private CommitJMXNotification commitNetconfNotif;
+ @Mock
+ private Notification unknownNotif;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+
+ Mockito.doNothing().when(mockPersister).persistConfig(Matchers.any(ConfigSnapshotHolder.class));
+ Mockito.doReturn("persister").when(mockPersister).toString();
+ final PersisterAggregator.PersisterWithConfiguration withCfg = new PersisterAggregator.PersisterWithConfiguration(mockPersister, false);
+ persisterAggregator = new PersisterAggregator(Lists.newArrayList(withCfg));
+
+ Mockito.doReturn("netconfUnknownNotification").when(unknownNetconfNotif).toString();
+ Mockito.doReturn("netconfCommitNotification").when(commitNetconfNotif).toString();
+
+ Mockito.doReturn(XmlUtil.readXmlToElement("<config-snapshot/>")).when(commitNetconfNotif).getConfigSnapshot();
+ Mockito.doReturn(Collections.emptySet()).when(commitNetconfNotif).getCapabilities();
+
+ }
+
+ @Test
+ public void testNotificationListenerUnknownNotification() throws Exception {
+ final ConfigPersisterNotificationListener testeListener = new ConfigPersisterNotificationListener(persisterAggregator);
+ testeListener.handleNotification(unknownNotif, null);
+ Mockito.verifyZeroInteractions(mockPersister);
+ }
+
+ @Test
+ public void testNotificationListenerUnknownNetconfNotification() throws Exception {
+ final ConfigPersisterNotificationListener testeListener = new ConfigPersisterNotificationListener(persisterAggregator);
+ try {
+ testeListener.handleNotification(unknownNetconfNotif, null);
+ Assert.fail("Unknown netconf notification should fail");
+ } catch (final IllegalStateException e) {
+ Mockito.verifyZeroInteractions(mockPersister);
+ }
+ }
+
+ @Test
+ public void testNotificationListenerCommitNetconfNotification() throws Exception {
+ final ConfigPersisterNotificationListener testeListener = new ConfigPersisterNotificationListener(persisterAggregator);
+ testeListener.handleNotification(commitNetconfNotif, null);
+ Mockito.verify(mockPersister).persistConfig(Matchers.any(ConfigSnapshotHolder.class));
+ }
+}
package org.opendaylight.controller.netconf.persist.impl;
+import com.google.common.collect.Lists;
+
import org.junit.Test;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
assertEquals(1, DummyAdapter.props);
}
+ @Test
+ public void testNoopAdapter() throws Exception {
+ final NoOpStorageAdapter noOpStorageAdapter = new NoOpStorageAdapter();
+ final PersisterAggregator persisterAggregator =
+ new PersisterAggregator(Lists.newArrayList(new PersisterWithConfiguration(noOpStorageAdapter, false)));
+
+ noOpStorageAdapter.instantiate(null);
+
+ persisterAggregator.persistConfig(null);
+ persisterAggregator.loadLastConfigs();
+ persisterAggregator.persistConfig(null);
+ persisterAggregator.loadLastConfigs();
+
+ noOpStorageAdapter.close();
+ }
+
@Test
public void testLoadFromPropertyFile() throws Exception {
PersisterAggregator persisterAggregator = PersisterAggregator.createFromProperties(loadFile("test2.properties"));
<groupId>${project.groupId}</groupId>
<artifactId>netconf-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
package org.opendaylight.controller.netconf.client;
+import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import io.netty.channel.Channel;
logger.debug("Netconf session {} should use exi.", session);
NetconfStartExiMessage startExiMessage = (NetconfStartExiMessage) sessionPreferences.getStartExiMessage();
tryToInitiateExi(session, startExiMessage);
- // Exi is not supported, release session immediately
} else {
+ // Exi is not supported, release session immediately
logger.debug("Netconf session {} isn't capable of using exi.", session);
negotiationSuccessful(session);
}
private long extractSessionId(final Document doc) {
final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+ Preconditions.checkState(sessionIdNode != null, "");
String textContent = sessionIdNode.getTextContent();
if (textContent == null || textContent.equals("")) {
throw new IllegalStateException("Session id not received from server");
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+
+import java.net.InetSocketAddress;
+
+public class NetconfClientConfigurationTest {
+ @Test
+ public void testNetconfClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategy strategy = Mockito.mock(ReconnectStrategy.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ NetconfClientConfiguration cfg = NetconfClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(strategy).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getReconnectStrategy());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelPromise;
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+
+public class NetconfClientDispatcherImplTest {
+ @Test
+ public void testNetconfClientDispatcherImpl() throws Exception {
+ EventLoopGroup bossGroup = Mockito.mock(EventLoopGroup.class);
+ EventLoopGroup workerGroup = Mockito.mock(EventLoopGroup.class);
+ Timer timer = new HashedWheelTimer();
+
+ ChannelFuture chf = Mockito.mock(ChannelFuture.class);
+ Channel ch = Mockito.mock(Channel.class);
+ doReturn(ch).when(chf).channel();
+ Throwable thr = Mockito.mock(Throwable.class);
+ doReturn(chf).when(workerGroup).register(any(Channel.class));
+
+ ChannelPromise promise = Mockito.mock(ChannelPromise.class);
+ doReturn(promise).when(chf).addListener(any(GenericFutureListener.class));
+ doReturn(thr).when(chf).cause();
+
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory reconnectStrategyFactory = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ doReturn(5).when(reconnect).getConnectTimeout();
+ doReturn("").when(reconnect).toString();
+ doReturn("").when(handler).toString();
+ doReturn("").when(reconnectStrategyFactory).toString();
+ doReturn(reconnect).when(reconnectStrategyFactory).createReconnectStrategy();
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfReconnectingClientConfiguration cfg2 = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.TCP).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(reconnectStrategyFactory).
+ withAuthHandler(handler).build();
+
+ NetconfClientDispatcherImpl dispatcher = new NetconfClientDispatcherImpl(bossGroup, workerGroup, timer);
+ Future<NetconfClientSession> sshSession = dispatcher.createClient(cfg);
+ Future<NetconfClientSession> tcpSession = dispatcher.createClient(cfg2);
+
+ Future<Void> sshReconn = dispatcher.createReconnectingClient(cfg);
+ Future<Void> tcpReconn = dispatcher.createReconnectingClient(cfg2);
+
+ assertNotNull(sshSession);
+ assertNotNull(tcpSession);
+ assertNotNull(sshReconn);
+ assertNotNull(tcpReconn);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.Promise;
+import org.apache.sshd.common.SessionListener;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionNegotiatorFactoryTest {
+ @Test
+ public void testGetSessionNegotiator() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class);
+ doReturn(sessionListener).when(listenerFactory).getSessionListener();
+
+ Channel channel = mock(Channel.class);
+ Promise promise = mock(Promise.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer,
+ Optional.<NetconfHelloMessageAdditionalHeader>absent(), 200L);
+
+ SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
+ assertNotNull(sessionNegotiator);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.*;
+import io.netty.handler.ssl.SslHandler;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.Promise;
+import org.apache.mina.handler.demux.ExceptionHandler;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.collections.Sets;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import io.netty.util.Timer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.w3c.dom.Document;
+import java.util.Set;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfClientSessionNegotiatorTest {
+
+ private NetconfHelloMessage helloMessage;
+ private ChannelPipeline pipeline;
+ private ChannelFuture future;
+ private Channel channel;
+ private ChannelInboundHandlerAdapter channelInboundHandlerAdapter;
+
+ @Before
+ public void setUp() throws Exception {
+ helloMessage = NetconfHelloMessage.createClientHello(Sets.newSet("exi:1.0"), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+ pipeline = mockChannelPipeline();
+ future = mockChannelFuture();
+ channel = mockChannel();
+ System.out.println("setup done");
+ }
+
+ private ChannelHandler mockChannelHandler() {
+ ChannelHandler handler = mock(ChannelHandler.class);
+ return handler;
+ }
+
+ private Channel mockChannel() {
+ Channel channel = mock(Channel.class);
+ ChannelHandler channelHandler = mockChannelHandler();
+ doReturn("").when(channel).toString();
+ doReturn(future).when(channel).close();
+ doReturn(future).when(channel).writeAndFlush(anyObject());
+ doReturn(true).when(channel).isOpen();
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(pipeline).toString();
+ doReturn(pipeline).when(pipeline).remove(any(ChannelHandler.class));
+ doReturn(channelHandler).when(pipeline).remove(anyString());
+ return channel;
+ }
+
+ private ChannelFuture mockChannelFuture() {
+ ChannelFuture future = mock(ChannelFuture.class);
+ doReturn(future).when(future).addListener(any(GenericFutureListener.class));
+ return future;
+ }
+
+ private ChannelPipeline mockChannelPipeline() {
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ ChannelHandler handler = mock(ChannelHandler.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ doReturn(null).when(pipeline).get(SslHandler.class);
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+ doReturn(handler).when(pipeline).replace(anyString(), anyString(), any(ChunkedFramingMechanismEncoder.class));
+
+ NetconfXMLToHelloMessageDecoder messageDecoder = new NetconfXMLToHelloMessageDecoder();
+ doReturn(messageDecoder).when(pipeline).replace(anyString(), anyString(), any(NetconfXMLToMessageDecoder.class));
+ doReturn(pipeline).when(pipeline).replace(any(ChannelHandler.class), anyString(), any(NetconfClientSession.class));
+ return pipeline;
+ }
+
+ private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise,
+ NetconfMessage startExi) {
+ ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class);
+ NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi);
+ doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class));
+
+ long timeout = 10L;
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ Timer timer = new HashedWheelTimer();
+ return new NetconfClientSessionNegotiator(preferences, promise, channel, timer, sessionListener, timeout);
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiator() throws Exception {
+ Promise promise = mock(Promise.class);
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("a", "b");
+ NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ negotiator.handleMessage(helloServerMessage);
+ verify(promise).setSuccess(anyObject());
+ }
+
+ @Test
+ public void testNetconfClientSessionNegotiatorWithEXI() throws Exception {
+ Promise promise = mock(Promise.class);
+ EXIOptions exiOptions = new EXIOptions();
+ NetconfStartExiMessage exiMessage = NetconfStartExiMessage.create(exiOptions, "msg-id");
+ doReturn(promise).when(promise).setSuccess(anyObject());
+ NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage);
+
+ negotiator.channelActive(null);
+ Set caps = Sets.newSet("exi:1.0");
+ NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]);
+ return null;
+ }
+ }).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+
+ ChannelHandlerContext handlerContext = mock(ChannelHandlerContext.class);
+ doReturn(pipeline).when(handlerContext).pipeline();
+ negotiator.handleMessage(helloMessage);
+ Document expectedResult = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ channelInboundHandlerAdapter.channelRead(handlerContext, new NetconfMessage(expectedResult));
+
+ verify(promise).setSuccess(anyObject());
+
+ // two calls for exiMessage, 2 for hello message
+ verify(pipeline, times(4)).replace(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.openexi.proc.common.EXIOptions;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionTest {
+
+ @Mock
+ ChannelHandler channelHandler;
+
+ @Mock
+ Channel channel;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testNetconfClientSession() throws Exception {
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+ long sessId = 20L;
+ Collection<String> caps = Lists.newArrayList("cap1", "cap2");
+
+ NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+
+ Mockito.doReturn(pipeline).when(channel).pipeline();
+ Mockito.doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+ Mockito.doReturn("").when(channelHandler).toString();
+
+ NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps);
+ session.addExiHandlers(codec);
+ session.stopExiCommunication();
+
+ assertEquals(caps, session.getServerCapabilities());
+ assertEquals(session, session.thisInstance());
+
+ Mockito.verify(pipeline, Mockito.times(4)).replace(anyString(), anyString(), Mockito.any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+
+public class NetconfReconnectingClientConfigurationTest {
+ @Test
+ public void testNetconfReconnectingClientConfiguration() throws Exception {
+ Long timeout = 200L;
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+ NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+ InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+ ReconnectStrategyFactory strategy = Mockito.mock(ReconnectStrategyFactory.class);
+ AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+ ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+ NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+ withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+ withAddress(address).
+ withConnectionTimeoutMillis(timeout).
+ withReconnectStrategy(reconnect).
+ withAdditionalHeader(header).
+ withSessionListener(listener).
+ withConnectStrategyFactory(strategy).
+ withAuthHandler(handler).build();
+
+ Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+ Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+ Assert.assertEquals(listener, cfg.getSessionListener());
+ Assert.assertEquals(handler, cfg.getAuthHandler());
+ Assert.assertEquals(strategy, cfg.getConnectStrategyFactory());
+ Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+ Assert.assertEquals(address, cfg.getAddress());
+ Assert.assertEquals(reconnect, cfg.getReconnectStrategy());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+import io.netty.channel.*;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Promise;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+
+import java.util.Set;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.*;
+
+public class SimpleNetconfClientSessionListenerTest {
+
+ private Channel channel;
+ private ChannelFuture channelFuture;
+ Set caps;
+ private NetconfHelloMessage helloMessage;
+ private NetconfMessage message;
+ private NetconfClientSessionListener sessionListener;
+ private NetconfClientSession clientSession;
+
+ @Before
+ public void setUp() throws Exception {
+ channel = mock(Channel.class);
+ channelFuture = mock(ChannelFuture.class);
+ doReturn(channelFuture).when(channel).writeAndFlush(anyObject());
+ caps = Sets.newSet("a", "b");
+ helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+ message = new NetconfMessage(helloMessage.getDocument());
+ sessionListener = mock(NetconfClientSessionListener.class);
+ clientSession = new NetconfClientSession(sessionListener, channel, 20L, caps);
+ }
+
+ @Test
+ public void testSessionDown() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.onSessionDown(clientSession, new Exception());
+ assertFalse(promise.isSuccess());
+ }
+
+ @Test
+ public void testSendRequest() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.sendRequest(message);
+ assertFalse(promise.isSuccess());
+ }
+
+ @Test
+ public void testOnMessage() throws Exception {
+ SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+ Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+ simpleListener.onSessionUp(clientSession);
+ verify(channel, times(1)).writeAndFlush(anyObject());
+
+ simpleListener.onMessage(clientSession, message);
+ assertTrue(promise.isSuccess());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class SshClientChannelInitializerTest {
+ @Test
+ public void test() throws Exception {
+
+ AuthenticationHandler authenticationHandler = mock(AuthenticationHandler.class);
+ NetconfClientSessionNegotiatorFactory negotiatorFactory = mock(NetconfClientSessionNegotiatorFactory.class);
+ NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+
+ SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ doReturn("").when(sessionNegotiator).toString();
+ doReturn(sessionNegotiator).when(negotiatorFactory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ Channel channel = mock(Channel.class);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(channel).toString();
+ doReturn(pipeline).when(pipeline).addFirst(any(ChannelHandler.class));
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+
+ Promise<NetconfClientSession> promise = mock(Promise.class);
+ doReturn("").when(promise).toString();
+
+ SshClientChannelInitializer initializer = new SshClientChannelInitializer(authenticationHandler, negotiatorFactory,
+ sessionListener);
+ initializer.initialize(channel, promise);
+ verify(pipeline, times(1)).addFirst(any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Mockito.*;
+
+public class TcpClientChannelInitializerTest {
+ @Test
+ public void testInitializeSessionNegotiator() throws Exception {
+ NetconfClientSessionNegotiatorFactory factory = mock(NetconfClientSessionNegotiatorFactory.class);
+ SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ doReturn("").when(sessionNegotiator).toString();
+ doReturn(sessionNegotiator).when(factory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+ NetconfClientSessionListener listener = mock(NetconfClientSessionListener.class);
+ TcpClientChannelInitializer initializer = new TcpClientChannelInitializer(factory, listener);
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ Channel channel = mock(Channel.class);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn("").when(channel).toString();
+
+ Promise<NetconfClientSession> promise = mock(Promise.class);
+ doReturn("").when(promise).toString();
+
+ initializer.initializeSessionNegotiator(channel, promise);
+ verify(pipeline, times(1)).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.netconf.client.test;
+package org.opendaylight.controller.netconf.client;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
-import org.opendaylight.controller.netconf.client.NetconfClientSession;
-import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
-import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration.NetconfClientProtocol;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import io.netty.util.internal.ConcurrentSet;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
import org.opendaylight.controller.netconf.mapping.api.Capability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, SessionMonitoringService {
private static final Logger logger = LoggerFactory.getLogger(NetconfMonitoringServiceImpl.class);
private List<Session> transformSessions(Set<NetconfManagementSession> sessions) {
return Lists.newArrayList(Collections2.transform(sessions, new Function<NetconfManagementSession, Session>() {
- @Nullable
@Override
- public Session apply(@Nullable NetconfManagementSession input) {
+ public Session apply(@Nonnull NetconfManagementSession input) {
return input.toManagementSession();
}
}));
this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
}
- private void initNetconfOperations(Set<NetconfOperation> allOperations) {
+ private synchronized void initNetconfOperations(Set<NetconfOperation> allOperations) {
allNetconfOperations = allOperations;
}
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import org.opendaylight.controller.netconf.mapping.api.Capability;
<artifactId>config-util</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-api</artifactId>
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import io.netty.channel.local.LocalAddress;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.auth.AuthProvider;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.xml.sax.SAXException;
public class NetconfITSecureTest extends AbstractNetconfConfigTest {
@Test
public void testSecure() throws Exception {
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
- try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+ try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(new SimpleNetconfClientSessionListener()))) {
NetconfMessage response = netconfClient.sendMessage(getGetConfig());
assertFalse("Unexpected error message " + XmlUtil.toString(response.getDocument()),
NetconfMessageUtil.isErrorMessage(response));
/**
* Test all requests are handled properly and no mismatch occurs in listener
*/
- @Test(timeout = 3*60*1000)
+ @Test(timeout = 5*60*1000)
public void testSecureStress() throws Exception {
+ final int requests = 10000;
+
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
- try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+ final NetconfDeviceCommunicator sessionListener = getSessionListener();
+ try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(sessionListener))) {
final AtomicInteger responseCounter = new AtomicInteger(0);
- final List<Future<?>> futures = Lists.newArrayList();
+ final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
- final int requests = 1000;
for (int i = 0; i < requests; i++) {
- final Future<NetconfMessage> netconfMessageFuture = netconfClient.sendRequest(getGetConfig());
+ NetconfMessage getConfig = getGetConfig();
+ getConfig = changeMessageId(getConfig, i);
+ final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture = sessionListener.sendRequest(getConfig, QName.create("namespace", "2012-12-12", "get"));
futures.add(netconfMessageFuture);
- netconfMessageFuture.addListener(new GenericFutureListener<Future<? super NetconfMessage>>() {
+ Futures.addCallback(netconfMessageFuture, new FutureCallback<RpcResult<NetconfMessage>>() {
@Override
- public void operationComplete(final Future<? super NetconfMessage> future) throws Exception {
- assertTrue("Request unsuccessful " + future.cause(), future.isSuccess());
+ public void onSuccess(final RpcResult<NetconfMessage> result) {
responseCounter.incrementAndGet();
}
+
+ @Override
+ public void onFailure(final Throwable t) {
+ throw new RuntimeException(t);
+ }
});
}
- for (final Future<?> future : futures) {
- future.await();
+ // Wait for every future
+ for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+ try {
+ future.get(3, TimeUnit.MINUTES);
+ } catch (final TimeoutException e) {
+ fail("Request " + futures.indexOf(future) + " is not responding");
+ }
}
// Give future listeners some time to finish counter incrementation
}
}
- public NetconfClientConfiguration getClientConfiguration() throws IOException {
+ private NetconfMessage changeMessageId(final NetconfMessage getConfig, final int i) throws IOException, SAXException {
+ String s = XmlUtil.toString(getConfig.getDocument(), false);
+ s = s.replace("101", Integer.toString(i));
+ return new NetconfMessage(XmlUtil.readXmlToDocument(s));
+ }
+
+ public NetconfClientConfiguration getClientConfiguration(final NetconfClientSessionListener sessionListener) throws IOException {
final NetconfClientConfigurationBuilder b = NetconfClientConfigurationBuilder.create();
b.withAddress(TLS_ADDRESS);
- b.withSessionListener(new SimpleNetconfClientSessionListener());
+ // Using session listener from sal-netconf-connector since stress test cannot be performed with simple listener
+ b.withSessionListener(sessionListener);
b.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
b.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
b.withConnectionTimeoutMillis(5000);
return b.build();
}
+ @Mock
+ private RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice;
+
+ private NetconfDeviceCommunicator getSessionListener() {
+ MockitoAnnotations.initMocks(this);
+ doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+ doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
+ return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
+ }
+
public AuthProvider getAuthProvider() throws Exception {
final AuthProvider mockAuth = mock(AuthProvider.class);
doReturn("mockedAuth").when(mockAuth).toString();
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
</appender>
<logger name="org.opendaylight.controller.netconf" level="TRACE"/>
+ <logger name="org.opendaylight.controller.sal.connect.netconf" level="TRACE"/>
<root level="error">
<appender-ref ref="STDOUT" />
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Collections2;
+import javax.annotation.Nonnull;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.schemas.Schema;
return Collections2.transform(schema.getLocation(), new Function<Schema.Location, String>() {
@Nullable
@Override
- public String apply(@Nullable Schema.Location input) {
+ public String apply(@Nonnull Schema.Location input) {
return input.getEnumeration().toString();
}
});
public class JaxBSerializerTest {
@Test
- public void testName() throws Exception {
+ public void testSerialization() throws Exception {
final NetconfMonitoringService service = new NetconfMonitoringService() {
}
};
final NetconfState model = new NetconfState(service);
- final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model));
+ final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)).replaceAll("\\s", "");
assertThat(xml, CoreMatchers.containsString(
- "<schema>\n" +
- "<format>yang</format>\n" +
- "<identifier>id</identifier>\n" +
- "<location>NETCONF</location>\n" +
- "<namespace>localhost</namespace>\n" +
- "<version>v1</version>\n" +
- "</schema>\n"));
+ "<schema>" +
+ "<format>yang</format>" +
+ "<identifier>id</identifier>" +
+ "<location>NETCONF</location>" +
+ "<namespace>localhost</namespace>" +
+ "<version>v1</version>" +
+ "</schema>"));
assertThat(xml, CoreMatchers.containsString(
- "<session>\n" +
- "<session-id>1</session-id>\n" +
- "<in-bad-rpcs>0</in-bad-rpcs>\n" +
- "<in-rpcs>0</in-rpcs>\n" +
- "<login-time>loginTime</login-time>\n" +
- "<out-notifications>0</out-notifications>\n" +
- "<out-rpc-errors>0</out-rpc-errors>\n" +
- "<ncme:session-identifier>client</ncme:session-identifier>\n" +
- "<source-host>address/port</source-host>\n" +
- "<transport>ncme:netconf-tcp</transport>\n" +
- "<username>username</username>\n" +
+ "<session>" +
+ "<session-id>1</session-id>" +
+ "<in-bad-rpcs>0</in-bad-rpcs>" +
+ "<in-rpcs>0</in-rpcs>" +
+ "<login-time>loginTime</login-time>" +
+ "<out-notifications>0</out-notifications>" +
+ "<out-rpc-errors>0</out-rpc-errors>" +
+ "<ncme:session-identifier>client</ncme:session-identifier>" +
+ "<source-host>address/port</source-host>" +
+ "<transport>ncme:netconf-tcp</transport>" +
+ "<username>username</username>" +
"</session>"));
}
*/
package org.opendaylight.controller.netconf.nettyutil.handler.exi;
-import com.google.common.base.Preconditions;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.openexi.proc.common.AlignmentType;
import org.openexi.proc.common.EXIOptions;
import org.openexi.proc.common.EXIOptionsException;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+
+import com.google.common.base.Preconditions;
public final class EXIParameters {
private static final String EXI_PARAMETER_ALIGNMENT = "alignment";
- private static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned";
- private static final String EXI_PARAMETER_BIT_PACKED = "bit-packed";
- private static final String EXI_PARAMETER_COMPRESSED = "compressed";
- private static final String EXI_PARAMETER_PRE_COMPRESSION = "pre-compression";
+ static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned";
+ static final String EXI_PARAMETER_BIT_PACKED = "bit-packed";
+ static final String EXI_PARAMETER_COMPRESSED = "compressed";
+ static final String EXI_PARAMETER_PRE_COMPRESSION = "pre-compression";
private static final String EXI_PARAMETER_FIDELITY = "fidelity";
private static final String EXI_FIDELITY_DTD = "dtd";
final EXIOptions options = new EXIOptions();
options.setAlignmentType(AlignmentType.bitPacked);
- if (root.getElementsByTagName(EXI_PARAMETER_ALIGNMENT).getLength() > 0) {
- if (root.getElementsByTagName(EXI_PARAMETER_BIT_PACKED).getLength() > 0) {
- options.setAlignmentType(AlignmentType.bitPacked);
- } else if (root.getElementsByTagName(EXI_PARAMETER_BYTE_ALIGNED).getLength() > 0) {
- options.setAlignmentType(AlignmentType.byteAligned);
- } else if (root.getElementsByTagName(EXI_PARAMETER_COMPRESSED).getLength() > 0) {
- options.setAlignmentType(AlignmentType.compress);
- } else if (root.getElementsByTagName(EXI_PARAMETER_PRE_COMPRESSION).getLength() > 0) {
- options.setAlignmentType(AlignmentType.preCompress);
+
+ final NodeList alignmentElements = root.getElementsByTagName(EXI_PARAMETER_ALIGNMENT);
+ if (alignmentElements.getLength() > 0) {
+ final Element alignmentElement = (Element) alignmentElements.item(0);
+ final String alignmentTextContent = alignmentElement.getTextContent().trim();
+
+ switch (alignmentTextContent) {
+ case EXI_PARAMETER_BIT_PACKED:
+ options.setAlignmentType(AlignmentType.bitPacked);
+ break;
+ case EXI_PARAMETER_BYTE_ALIGNED:
+ options.setAlignmentType(AlignmentType.byteAligned);
+ break;
+ case EXI_PARAMETER_COMPRESSED:
+ options.setAlignmentType(AlignmentType.compress);
+ break;
+ case EXI_PARAMETER_PRE_COMPRESSION:
+ options.setAlignmentType(AlignmentType.preCompress);
+ break;
}
}
package org.opendaylight.controller.netconf.nettyutil.handler.exi;
+import com.google.common.collect.Lists;
import java.util.List;
-
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
-import com.google.common.collect.Lists;
-
/**
* Start-exi netconf message.
*/
public static final String PIS_KEY = "pis";
public static final String PREFIXES_KEY = "prefixes";
- private NetconfStartExiMessage(Document doc) {
+ private NetconfStartExiMessage(final Document doc) {
super(doc);
}
- public static NetconfStartExiMessage create(EXIOptions exiOptions, String messageId) {
- Document doc = XmlUtil.newDocument();
- Element rpcElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ public static NetconfStartExiMessage create(final EXIOptions exiOptions, final String messageId) {
+ final Document doc = XmlUtil.newDocument();
+ final Element rpcElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
XmlNetconfConstants.RPC_KEY);
rpcElement.setAttributeNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
XmlNetconfConstants.MESSAGE_ID, messageId);
// TODO draft http://tools.ietf.org/html/draft-varga-netconf-exi-capability-02#section-3.5.1 has no namespace for start-exi element in xml
- Element startExiElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
+ final Element startExiElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
START_EXI);
addAlignment(exiOptions, doc, startExiElement);
return new NetconfStartExiMessage(doc);
}
- private static void addFidelity(EXIOptions exiOptions, Document doc, Element startExiElement) {
- List<Element> fidelityElements = Lists.newArrayList();
+ private static void addFidelity(final EXIOptions exiOptions, final Document doc, final Element startExiElement) {
+ final List<Element> fidelityElements = Lists.newArrayList();
createFidelityElement(doc, fidelityElements, exiOptions.getPreserveComments(), COMMENTS_KEY);
createFidelityElement(doc, fidelityElements, exiOptions.getPreserveDTD(), DTD_KEY);
createFidelityElement(doc, fidelityElements, exiOptions.getPreserveLexicalValues(), LEXICAL_VALUES_KEY);
createFidelityElement(doc, fidelityElements, exiOptions.getPreserveNS(), PREFIXES_KEY);
if (fidelityElements.isEmpty() == false) {
- Element fidelityElement = doc.createElementNS(
+ final Element fidelityElement = doc.createElementNS(
XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, FIDELITY_KEY);
- for (Element element : fidelityElements) {
+ for (final Element element : fidelityElements) {
fidelityElement.appendChild(element);
}
startExiElement.appendChild(fidelityElement);
}
}
- private static void addAlignment(EXIOptions exiOptions, Document doc, Element startExiElement) {
- Element alignmentElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
+ private static void addAlignment(final EXIOptions exiOptions, final Document doc, final Element startExiElement) {
+ final Element alignmentElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
ALIGNMENT_KEY);
- alignmentElement.setTextContent(exiOptions.getAlignmentType().toString());
+
+ String alignmentString = EXIParameters.EXI_PARAMETER_BIT_PACKED;
+ switch (exiOptions.getAlignmentType()) {
+ case byteAligned: {
+ alignmentString = EXIParameters.EXI_PARAMETER_BYTE_ALIGNED;
+ break;
+ }
+ case bitPacked: {
+ alignmentString = EXIParameters.EXI_PARAMETER_BIT_PACKED;
+ break;
+ }
+ case compress: {
+ alignmentString = EXIParameters.EXI_PARAMETER_COMPRESSED;
+ break;
+ }
+ case preCompress: {
+ alignmentString = EXIParameters.EXI_PARAMETER_PRE_COMPRESSION;
+ break;
+ }
+ }
+
+ alignmentElement.setTextContent(alignmentString);
startExiElement.appendChild(alignmentElement);
}
- private static void createFidelityElement(Document doc, List<Element> fidelityElements, boolean fidelity, String fidelityName) {
+ private static void createFidelityElement(final Document doc, final List<Element> fidelityElements, final boolean fidelity, final String fidelityName) {
if (fidelity) {
fidelityElements.add(doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
connectPromise = null;
sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut());
- sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
-
- ctx.fireChannelActive();
+ // if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
+ if(channel != null) {
+ sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
+ ctx.fireChannelActive();
+ }
}
private synchronized void handleSshSetupFailure(final ChannelHandlerContext ctx, final Throwable e) {
@Override
public synchronized void operationComplete(final IoReadFuture future) {
if(future.getException() != null) {
-
if(asyncOut.isClosed() || asyncOut.isClosing()) {
-
// Ssh dropped
logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
- invokeDisconnect();
- return;
} else {
logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
- invokeDisconnect();
}
+ invokeDisconnect();
+ return;
}
if (future.getRead() > 0) {
// Check limit for pending writes
pendingWriteCounter++;
if(pendingWriteCounter > MAX_PENDING_WRITES) {
+ promise.setFailure(e);
handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() +
", remote window is not getting read or is too small"));
}
+ // We need to reset buffer read index, since we've already read it when we tried to write it the first time
+ ((ByteBuf) msg).resetReaderIndex();
logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
// In case of pending, re-invoke write after pending is finished
+ Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e);
lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
@Override
public void operationComplete(final IoWriteFuture future) {
+ // FIXME possible minor race condition, we cannot guarantee that this callback when pending is finished will be executed first
+ // External thread could trigger write on this instance while we are on this line
+ // Verify
if (future.isWritten()) {
synchronized (SshWriteAsyncHandler.this) {
// Pending done, decrease counter
pendingWriteCounter--;
+ write(ctx, msg, promise);
}
- write(ctx, msg, promise);
} else {
// Cannot reschedule pending, fail
handlePendingFailed(ctx, e);
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+
+public class AbstractChannelInitializerTest {
+
+ @Mock
+ private Channel channel;
+ @Mock
+ private ChannelPipeline pipeline;
+ @Mock
+ private Promise<NetconfSession> sessionPromise;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+ }
+
+ @Test
+ public void testInit() throws Exception {
+ final TestingInitializer testingInitializer = new TestingInitializer();
+ testingInitializer.initialize(channel, sessionPromise);
+ verify(pipeline, times(4)).addLast(anyString(), any(ChannelHandler.class));
+ }
+
+ private static final class TestingInitializer extends AbstractChannelInitializer<NetconfSession> {
+
+ @Override
+ protected void initializeSessionNegotiator(final Channel ch, final Promise<NetconfSession> promise) {
+ }
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import java.util.Collections;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.api.NetconfSessionListener;
+import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.openexi.proc.common.EXIOptions;
+
+public class AbstractNetconfSessionTest {
+
+ @Mock
+ private NetconfSessionListener<NetconfSession> listener;
+ @Mock
+ private Channel channel;
+ @Mock
+ private ChannelPipeline pipeline;
+ private NetconfHelloMessage clientHello;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doNothing().when(listener).onMessage(any(NetconfSession.class), any(NetconfMessage.class));
+ doNothing().when(listener).onSessionUp(any(NetconfSession.class));
+ doNothing().when(listener).onSessionDown(any(NetconfSession.class), any(Exception.class));
+ doNothing().when(listener).onSessionTerminated(any(NetconfSession.class), any(NetconfTerminationReason.class));
+
+ doReturn(mock(ChannelFuture.class)).when(channel).writeAndFlush(any(NetconfMessage.class));
+ doReturn(pipeline).when(channel).pipeline();
+ doReturn(mock(ChannelFuture.class)).when(channel).close();
+
+ doReturn(null).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+
+ clientHello = NetconfHelloMessage.createClientHello(Collections.<String>emptySet(), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+ }
+
+ @Test
+ public void testHandleMessage() throws Exception {
+ final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ testingNetconfSession.handleMessage(clientHello);
+ verify(listener).onMessage(testingNetconfSession, clientHello);
+ }
+
+ @Test
+ public void testSessionUp() throws Exception {
+ final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ testingNetconfSession.sessionUp();
+ verify(listener).onSessionUp(testingNetconfSession);
+ assertEquals(1L, testingNetconfSession.getSessionId());
+ }
+
+ @Test
+ public void testClose() throws Exception {
+ final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ testingNetconfSession.sessionUp();
+ testingNetconfSession.close();
+ verify(channel).close();
+ verify(listener).onSessionTerminated(any(NetconfSession.class), any(NetconfTerminationReason.class));
+ }
+
+ @Test
+ public void testReplaceHandlers() throws Exception {
+ final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ final ChannelHandler mock = mock(ChannelHandler.class);
+ doReturn("handler").when(mock).toString();
+
+ testingNetconfSession.replaceMessageDecoder(mock);
+ verify(pipeline).replace(AbstractChannelInitializer.NETCONF_MESSAGE_DECODER, AbstractChannelInitializer.NETCONF_MESSAGE_DECODER, mock);
+ testingNetconfSession.replaceMessageEncoder(mock);
+ verify(pipeline).replace(AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, mock);
+ testingNetconfSession.replaceMessageEncoderAfterNextMessage(mock);
+ verifyNoMoreInteractions(pipeline);
+
+ testingNetconfSession.sendMessage(clientHello);
+ verify(pipeline, times(2)).replace(AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, mock);
+ }
+
+ @Test
+ public void testStartExi() throws Exception {
+ TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ testingNetconfSession = spy(testingNetconfSession);
+
+ testingNetconfSession.startExiCommunication(NetconfStartExiMessage.create(new EXIOptions(), "4"));
+ verify(testingNetconfSession).addExiHandlers(any(NetconfEXICodec.class));
+ }
+
+ @Test
+ public void testEndOfInput() throws Exception {
+ final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ testingNetconfSession.endOfInput();
+ verifyZeroInteractions(listener);
+ testingNetconfSession.sessionUp();
+ testingNetconfSession.endOfInput();
+ verify(listener).onSessionDown(any(NetconfSession.class), any(Exception.class));
+ }
+
+ @Test
+ public void testSendMessage() throws Exception {
+ final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+ final NetconfHelloMessage clientHello = NetconfHelloMessage.createClientHello(Collections.<String>emptySet(), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+ testingNetconfSession.sendMessage(clientHello);
+ verify(channel).writeAndFlush(clientHello);
+ }
+
+ private static class TestingNetconfSession extends AbstractNetconfSession<NetconfSession, NetconfSessionListener<NetconfSession>> {
+
+ protected TestingNetconfSession(final NetconfSessionListener<NetconfSession> sessionListener, final Channel channel, final long sessionId) {
+ super(sessionListener, channel, sessionId);
+ }
+
+ @Override
+ protected NetconfSession thisInstance() {
+ return this;
+ }
+
+ @Override
+ protected void addExiHandlers(final NetconfEXICodec exiCodec) {}
+
+ @Override
+ public void stopExiCommunication() {}
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler;
+
+import static org.junit.Assert.*;
+
+import com.google.common.collect.Lists;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.openexi.proc.common.EXIOptionsException;
+import org.openexi.sax.Transmogrifier;
+import org.openexi.sax.TransmogrifierException;
+import org.xml.sax.InputSource;
+
+public class NetconfEXIHandlersTest {
+
+ private final String msgAsString = "<netconf-message/>";
+ private NetconfMessageToEXIEncoder netconfMessageToEXIEncoder;
+ private NetconfEXIToMessageDecoder netconfEXIToMessageDecoder;
+ private NetconfMessage msg;
+ private byte[] msgAsExi;
+
+ @Before
+ public void setUp() throws Exception {
+ final NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+ netconfMessageToEXIEncoder = new NetconfMessageToEXIEncoder(codec);
+ netconfEXIToMessageDecoder = new NetconfEXIToMessageDecoder(codec);
+
+ msg = new NetconfMessage(XmlUtil.readXmlToDocument(msgAsString));
+ this.msgAsExi = msgToExi(msgAsString, codec);
+ }
+
+ private byte[] msgToExi(final String msgAsString, final NetconfEXICodec codec) throws EXIOptionsException, TransmogrifierException, IOException {
+ final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ final Transmogrifier transmogrifier = codec.getTransmogrifier();
+ transmogrifier.setOutputStream(byteArrayOutputStream);
+ transmogrifier.encode(new InputSource(new ByteArrayInputStream(msgAsString.getBytes())));
+ return byteArrayOutputStream.toByteArray();
+ }
+
+ @Test
+ public void testEncodeDecode() throws Exception {
+ final ByteBuf buffer = Unpooled.buffer();
+ netconfMessageToEXIEncoder.encode(null, msg, buffer);
+ final int exiLength = msgAsExi.length;
+ // array from buffer is cca 256 n length, compare only subarray
+ assertArrayEquals(msgAsExi, Arrays.copyOfRange(buffer.array(), 0, exiLength));
+
+ // assert all other bytes in buffer be 0
+ for (int i = exiLength; i < buffer.array().length; i++) {
+ assertEquals((byte)0, buffer.array()[i]);
+ }
+
+ final List<Object> out = Lists.newArrayList();
+ netconfEXIToMessageDecoder.decode(null, buffer, out);
+
+ XMLUnit.compareXML(msg.getDocument(), ((NetconfMessage) out.get(0)).getDocument());
+ }
+}
\ No newline at end of file
assertThat(out.get(0), CoreMatchers.instanceOf(NetconfHelloMessage.class));
final NetconfHelloMessage hello = (NetconfHelloMessage) out.get(0);
assertTrue(hello.getAdditionalHeader().isPresent());
- assertEquals("[tomas;10.0.0.0:10000;tcp;client;]\n", hello.getAdditionalHeader().get().toFormattedString());
+ assertEquals("[tomas;10.0.0.0:10000;tcp;client;]" + System.lineSeparator(), hello.getAdditionalHeader().get().toFormattedString());
assertThat(XmlUtil.toString(hello.getDocument()), CoreMatchers.containsString("<hello xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\""));
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.exi;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.AlignmentType;
+import org.openexi.proc.common.EXIOptions;
+
+@RunWith(Parameterized.class)
+public class EXIParametersTest {
+
+ @Parameterized.Parameters
+ public static Iterable<Object[]> data() throws Exception {
+ final String noChangeXml =
+ "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+ "<alignment>bit-packed</alignment>\n" +
+ "</start-exi>\n";
+
+
+ final String fullOptionsXml =
+ "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+ "<alignment>byte-aligned</alignment>\n" +
+ "<fidelity>\n" +
+ "<comments/>\n" +
+ "<dtd/>\n" +
+ "<lexical-values/>\n" +
+ "<pis/>\n" +
+ "<prefixes/>\n" +
+ "</fidelity>\n" +
+ "</start-exi>\n";
+
+ final EXIOptions fullOptions = new EXIOptions();
+ fullOptions.setAlignmentType(AlignmentType.byteAligned);
+ fullOptions.setPreserveLexicalValues(true);
+ fullOptions.setPreserveDTD(true);
+ fullOptions.setPreserveComments(true);
+ fullOptions.setPreserveNS(true);
+ fullOptions.setPreservePIs(true);
+
+ return Arrays.asList(new Object[][]{
+ {noChangeXml, new EXIOptions()},
+ {fullOptionsXml, fullOptions},
+ });
+ }
+
+ private final String sourceXml;
+ private final EXIOptions exiOptions;
+
+ public EXIParametersTest(final String sourceXml, final EXIOptions exiOptions) {
+ this.sourceXml = sourceXml;
+ this.exiOptions = exiOptions;
+ }
+
+ @Test
+ public void testFromXmlElement() throws Exception {
+ final EXIParameters opts =
+ EXIParameters.fromXmlElement(
+ XmlElement.fromDomElement(
+ XmlUtil.readXmlToElement(sourceXml)));
+
+
+ assertEquals(opts.getOptions().getAlignmentType(), exiOptions.getAlignmentType());
+ assertEquals(opts.getOptions().getPreserveComments(), exiOptions.getPreserveComments());
+ assertEquals(opts.getOptions().getPreserveLexicalValues(), exiOptions.getPreserveLexicalValues());
+ assertEquals(opts.getOptions().getPreserveNS(), exiOptions.getPreserveNS());
+ assertEquals(opts.getOptions().getPreserveDTD(), exiOptions.getPreserveDTD());
+ assertEquals(opts.getOptions().getPreserveNS(), exiOptions.getPreserveNS());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.exi;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.openexi.proc.common.AlignmentType;
+import org.openexi.proc.common.EXIOptions;
+
+@RunWith(Parameterized.class)
+public class NetconfStartExiMessageTest {
+
+ @Parameterized.Parameters
+ public static Iterable<Object[]> data() throws Exception {
+ final String noChangeXml = "<rpc xmlns:ns0=\"urn:ietf:params:xml:ns:netconf:base:1.0\" ns0:message-id=\"id\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+ "<alignment>bit-packed</alignment>\n" +
+ "</start-exi>\n" +
+ "</rpc>";
+
+
+ final String fullOptionsXml = "<rpc xmlns:ns0=\"urn:ietf:params:xml:ns:netconf:base:1.0\" ns0:message-id=\"id\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+ "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+ "<alignment>byte-aligned</alignment>\n" +
+ "<fidelity>\n" +
+ "<comments/>\n" +
+ "<dtd/>\n" +
+ "<lexical-values/>\n" +
+ "<pis/>\n" +
+ "<prefixes/>\n" +
+ "</fidelity>\n" +
+ "</start-exi>\n" +
+ "</rpc>";
+
+ final EXIOptions fullOptions = new EXIOptions();
+ fullOptions.setAlignmentType(AlignmentType.byteAligned);
+ fullOptions.setPreserveLexicalValues(true);
+ fullOptions.setPreserveDTD(true);
+ fullOptions.setPreserveComments(true);
+ fullOptions.setPreserveNS(true);
+ fullOptions.setPreservePIs(true);
+
+ return Arrays.asList(new Object[][]{
+ {noChangeXml, new EXIOptions()},
+ {fullOptionsXml, fullOptions},
+ });
+ }
+
+ private final String controlXml;
+ private final EXIOptions exiOptions;
+
+ public NetconfStartExiMessageTest(final String controlXml, final EXIOptions exiOptions) {
+ this.controlXml = controlXml;
+ this.exiOptions = exiOptions;
+ }
+
+ @Test
+ public void testCreate() throws Exception {
+ final NetconfStartExiMessage startExiMessage = NetconfStartExiMessage.create(exiOptions, "id");
+
+ XMLUnit.setIgnoreWhitespace(true);
+ XMLUnit.setIgnoreAttributeOrder(true);
+ final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(controlXml), startExiMessage.getDocument());
+ assertTrue(diff.toString(), diff.similar());
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import java.io.IOException;
+import java.net.SocketAddress;
+
+import java.nio.channels.WritePendingException;
+import org.apache.sshd.ClientChannel;
+import org.apache.sshd.ClientSession;
+import org.apache.sshd.SshClient;
+import org.apache.sshd.client.channel.ChannelSubsystem;
+import org.apache.sshd.client.future.AuthFuture;
+import org.apache.sshd.client.future.ConnectFuture;
+import org.apache.sshd.client.future.OpenFuture;
+import org.apache.sshd.common.future.CloseFuture;
+import org.apache.sshd.common.future.SshFuture;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.common.io.IoReadFuture;
+import org.apache.sshd.common.io.IoWriteFuture;
+import org.apache.sshd.common.util.Buffer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+
+public class AsyncSshHandlerTest {
+
+ @Mock
+ private SshClient sshClient;
+ @Mock
+ private AuthenticationHandler authHandler;
+ @Mock
+ private ChannelHandlerContext ctx;
+ @Mock
+ private Channel channel;
+ @Mock
+ private SocketAddress remoteAddress;
+ @Mock
+ private SocketAddress localAddress;
+
+ private AsyncSshHandler asyncSshHandler;
+
+ private SshFutureListener<ConnectFuture> sshConnectListener;
+ private SshFutureListener<AuthFuture> sshAuthListener;
+ private SshFutureListener<OpenFuture> sshChannelOpenListener;
+
+ private ChannelPromise promise;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ stubAuth();
+ stubSshClient();
+ stubChannel();
+ stubCtx();
+ stubRemoteAddress();
+
+ promise = getMockedPromise();
+
+ asyncSshHandler = new AsyncSshHandler(authHandler, sshClient);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ sshConnectListener = null;
+ sshAuthListener = null;
+ sshChannelOpenListener = null;
+ promise = null;
+ asyncSshHandler.close(ctx, getMockedPromise());
+ }
+
+ private void stubAuth() throws IOException {
+ doReturn("usr").when(authHandler).getUsername();
+
+ final AuthFuture authFuture = mock(AuthFuture.class);
+ Futures.addCallback(stubAddListener(authFuture), new SuccessFutureListener<AuthFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<AuthFuture> result) {
+ sshAuthListener = result;
+ }
+ });
+ doReturn(authFuture).when(authHandler).authenticate(any(ClientSession.class));
+ }
+
+ @SuppressWarnings("unchecked")
+ private <T extends SshFuture<T>> ListenableFuture<SshFutureListener<T>> stubAddListener(final T future) {
+ final SettableFuture<SshFutureListener<T>> listenerSettableFuture = SettableFuture.create();
+
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(final InvocationOnMock invocation) throws Throwable {
+ listenerSettableFuture.set((SshFutureListener<T>) invocation.getArguments()[0]);
+ return null;
+ }
+ }).when(future).addListener(any(SshFutureListener.class));
+
+ return listenerSettableFuture;
+ }
+
+ private void stubRemoteAddress() {
+ doReturn("remote").when(remoteAddress).toString();
+ }
+
+ private void stubCtx() {
+ doReturn(channel).when(ctx).channel();
+ doReturn(ctx).when(ctx).fireChannelActive();
+ doReturn(ctx).when(ctx).fireChannelInactive();
+ doReturn(ctx).when(ctx).fireChannelRead(anyObject());
+ doReturn(getMockedPromise()).when(ctx).newPromise();
+ }
+
+ private void stubChannel() {
+ doReturn("channel").when(channel).toString();
+ }
+
+ private void stubSshClient() {
+ doNothing().when(sshClient).start();
+ final ConnectFuture connectFuture = mock(ConnectFuture.class);
+ Futures.addCallback(stubAddListener(connectFuture), new SuccessFutureListener<ConnectFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<ConnectFuture> result) {
+ sshConnectListener = result;
+ }
+ });
+ doReturn(connectFuture).when(sshClient).connect("usr", remoteAddress);
+ }
+
+ @Test
+ public void testConnectSuccess() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
+
+ verify(promise).setSuccess();
+ verifyNoMoreInteractions(promise);
+ verify(ctx).fireChannelActive();
+ }
+
+ @Test
+ public void testRead() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ verify(ctx).fireChannelRead(any(ByteBuf.class));
+ }
+
+ @Test
+ public void testReadClosed() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoReadFuture mockedReadFuture = asyncOut.read(null);
+
+ Futures.addCallback(stubAddListener(mockedReadFuture), new SuccessFutureListener<IoReadFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<IoReadFuture> result) {
+ doReturn(new IllegalStateException()).when(mockedReadFuture).getException();
+ doReturn(mockedReadFuture).when(mockedReadFuture).removeListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+ doReturn(true).when(asyncOut).isClosing();
+ doReturn(true).when(asyncOut).isClosed();
+ result.operationComplete(mockedReadFuture);
+ }
+ });
+
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ verify(ctx).fireChannelInactive();
+ }
+
+ @Test
+ public void testReadFail() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoReadFuture mockedReadFuture = asyncOut.read(null);
+
+ Futures.addCallback(stubAddListener(mockedReadFuture), new SuccessFutureListener<IoReadFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<IoReadFuture> result) {
+ doReturn(new IllegalStateException()).when(mockedReadFuture).getException();
+ doReturn(mockedReadFuture).when(mockedReadFuture).removeListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+ result.operationComplete(mockedReadFuture);
+ }
+ });
+
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ verify(ctx).fireChannelInactive();
+ }
+
+ @Test
+ public void testWrite() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ final ChannelPromise writePromise = getMockedPromise();
+ asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), writePromise);
+
+ verify(writePromise).setSuccess();
+ }
+
+ @Test
+ public void testWriteClosed() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+
+ final IoWriteFuture ioWriteFuture = asyncIn.write(null);
+
+ Futures.addCallback(stubAddListener(ioWriteFuture), new SuccessFutureListener<IoWriteFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<IoWriteFuture> result) {
+ doReturn(false).when(ioWriteFuture).isWritten();
+ doReturn(new IllegalStateException()).when(ioWriteFuture).getException();
+ doReturn(true).when(asyncIn).isClosing();
+ doReturn(true).when(asyncIn).isClosed();
+ result.operationComplete(ioWriteFuture);
+ }
+ });
+
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ final ChannelPromise writePromise = getMockedPromise();
+ asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), writePromise);
+
+ verify(writePromise).setFailure(any(Throwable.class));
+ }
+
+ @Test
+ public void testWritePendingOne() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final IoWriteFuture ioWriteFuture = asyncIn.write(null);
+
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ final ChannelPromise firstWritePromise = getMockedPromise();
+
+ // intercept listener for first write, so we can invoke successful write later thus simulate pending of the first write
+ final ListenableFuture<SshFutureListener<IoWriteFuture>> firstWriteListenerFuture = stubAddListener(ioWriteFuture);
+ asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), firstWritePromise);
+ final SshFutureListener<IoWriteFuture> firstWriteListener = firstWriteListenerFuture.get();
+ // intercept second listener, this is the listener for pending write for the pending write to know when pending state ended
+ final ListenableFuture<SshFutureListener<IoWriteFuture>> pendingListener = stubAddListener(ioWriteFuture);
+
+ final ChannelPromise secondWritePromise = getMockedPromise();
+ // now make write throw pending exception
+ doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
+ asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
+
+ doReturn(ioWriteFuture).when(asyncIn).write(any(Buffer.class));
+
+ verifyZeroInteractions(firstWritePromise, secondWritePromise);
+
+ // make first write stop pending
+ firstWriteListener.operationComplete(ioWriteFuture);
+ // intercept third listener, this is regular listener for second write to determine success or failure
+ final ListenableFuture<SshFutureListener<IoWriteFuture>> afterPendingListener = stubAddListener(ioWriteFuture);
+
+ // notify listener for second write that pending has ended
+ pendingListener.get().operationComplete(ioWriteFuture);
+ // Notify third listener (regular listener for second write) that second write succeeded
+ afterPendingListener.get().operationComplete(ioWriteFuture);
+
+ // verify both write promises successful
+ verify(firstWritePromise).setSuccess();
+ verify(secondWritePromise).setSuccess();
+ }
+
+ @Test
+ public void testWritePendingMax() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final IoWriteFuture ioWriteFuture = asyncIn.write(null);
+
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ final ChannelPromise firstWritePromise = getMockedPromise();
+
+ // intercept listener for first write, so we can invoke successful write later thus simulate pending of the first write
+ final ListenableFuture<SshFutureListener<IoWriteFuture>> firstWriteListenerFuture = stubAddListener(ioWriteFuture);
+ asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), firstWritePromise);
+
+ final ChannelPromise secondWritePromise = getMockedPromise();
+ // now make write throw pending exception
+ doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
+ for (int i = 0; i < 1000; i++) {
+ asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
+ }
+
+ verify(ctx).fireChannelInactive();
+ }
+
+ @Test
+ public void testDisconnect() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+ sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+ final ChannelPromise disconnectPromise = getMockedPromise();
+ asyncSshHandler.disconnect(ctx, disconnectPromise);
+
+ verify(sshSession).close(anyBoolean());
+ verify(disconnectPromise).setSuccess();
+ verify(ctx).fireChannelInactive();
+ }
+
+ private OpenFuture getSuccessOpenFuture() {
+ final OpenFuture failedOpenFuture = mock(OpenFuture.class);
+ doReturn(true).when(failedOpenFuture).isOpened();
+ return failedOpenFuture;
+ }
+
+ private AuthFuture getSuccessAuthFuture() {
+ final AuthFuture authFuture = mock(AuthFuture.class);
+ doReturn(true).when(authFuture).isSuccess();
+ return authFuture;
+ }
+
+ private ConnectFuture getSuccessConnectFuture(final ClientSession sshSession) {
+ final ConnectFuture connectFuture = mock(ConnectFuture.class);
+ doReturn(true).when(connectFuture).isConnected();
+
+ doReturn(sshSession).when(connectFuture).getSession();
+ return connectFuture;
+ }
+
+ private ClientSession getMockedSshSession(final ChannelSubsystem subsystemChannel) throws IOException {
+ final ClientSession sshSession = mock(ClientSession.class);
+
+ doReturn("sshSession").when(sshSession).toString();
+ doReturn("serverVersion").when(sshSession).getServerVersion();
+ doReturn(false).when(sshSession).isClosed();
+ doReturn(false).when(sshSession).isClosing();
+ final CloseFuture closeFuture = mock(CloseFuture.class);
+ Futures.addCallback(stubAddListener(closeFuture), new SuccessFutureListener<CloseFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<CloseFuture> result) {
+ doReturn(true).when(closeFuture).isClosed();
+ result.operationComplete(closeFuture);
+ }
+ });
+ doReturn(closeFuture).when(sshSession).close(false);
+
+ doReturn(subsystemChannel).when(sshSession).createSubsystemChannel(anyString());
+
+ return sshSession;
+ }
+
+ private ChannelSubsystem getMockedSubsystemChannel(final IoInputStream asyncOut, final IoOutputStream asyncIn) throws IOException {
+ final ChannelSubsystem subsystemChannel = mock(ChannelSubsystem.class);
+ doNothing().when(subsystemChannel).setStreaming(any(ClientChannel.Streaming.class));
+ final OpenFuture openFuture = mock(OpenFuture.class);
+
+ Futures.addCallback(stubAddListener(openFuture), new SuccessFutureListener<OpenFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<OpenFuture> result) {
+ sshChannelOpenListener = result;
+ }
+ });
+
+ doReturn(asyncOut).when(subsystemChannel).getAsyncOut();
+
+ doReturn(openFuture).when(subsystemChannel).open();
+ doReturn(asyncIn).when(subsystemChannel).getAsyncIn();
+ return subsystemChannel;
+ }
+
+ private IoOutputStream getMockedIoOutputStream() {
+ final IoOutputStream mock = mock(IoOutputStream.class);
+ final IoWriteFuture ioWriteFuture = mock(IoWriteFuture.class);
+ doReturn(ioWriteFuture).when(ioWriteFuture).addListener(Matchers.<SshFutureListener<IoWriteFuture>>any());
+ doReturn(true).when(ioWriteFuture).isWritten();
+
+ Futures.addCallback(stubAddListener(ioWriteFuture), new SuccessFutureListener<IoWriteFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<IoWriteFuture> result) {
+ result.operationComplete(ioWriteFuture);
+ }
+ });
+
+ doReturn(ioWriteFuture).when(mock).write(any(Buffer.class));
+ doReturn(false).when(mock).isClosed();
+ doReturn(false).when(mock).isClosing();
+ return mock;
+ }
+
+ private IoInputStream getMockedIoInputStream() {
+ final IoInputStream mock = mock(IoInputStream.class);
+ final IoReadFuture ioReadFuture = mock(IoReadFuture.class);
+ doReturn(null).when(ioReadFuture).getException();
+ doReturn(ioReadFuture).when(ioReadFuture).removeListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+ doReturn(5).when(ioReadFuture).getRead();
+ doReturn(new Buffer(new byte[]{0, 1, 2, 3, 4})).when(ioReadFuture).getBuffer();
+ doReturn(ioReadFuture).when(ioReadFuture).addListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+
+ // Always success for read
+ Futures.addCallback(stubAddListener(ioReadFuture), new SuccessFutureListener<IoReadFuture>() {
+ @Override
+ public void onSuccess(final SshFutureListener<IoReadFuture> result) {
+ result.operationComplete(ioReadFuture);
+ }
+ });
+
+ doReturn(ioReadFuture).when(mock).read(any(Buffer.class));
+ doReturn(false).when(mock).isClosed();
+ doReturn(false).when(mock).isClosing();
+ return mock;
+ }
+
+ @Test
+ public void testConnectFailOpenChannel() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final IoInputStream asyncOut = getMockedIoInputStream();
+ final IoOutputStream asyncIn = getMockedIoOutputStream();
+ final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+ final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+
+ sshAuthListener.operationComplete(getSuccessAuthFuture());
+
+ verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
+
+ try {
+ sshChannelOpenListener.operationComplete(getFailedOpenFuture());
+ fail("Exception expected");
+ } catch (final Exception e) {
+ verify(promise).setFailure(any(Throwable.class));
+ verifyNoMoreInteractions(promise);
+ // TODO should ctx.channelInactive be called if we throw exception ?
+ }
+ }
+
+ @Test
+ public void testConnectFailAuth() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final ClientSession sshSession = mock(ClientSession.class);
+ doReturn(true).when(sshSession).isClosed();
+ final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+ sshConnectListener.operationComplete(connectFuture);
+
+ final AuthFuture authFuture = getFailedAuthFuture();
+
+ try {
+ sshAuthListener.operationComplete(authFuture);
+ fail("Exception expected");
+ } catch (final Exception e) {
+ verify(promise).setFailure(any(Throwable.class));
+ verifyNoMoreInteractions(promise);
+ // TODO should ctx.channelInactive be called ?
+ }
+ }
+
+ private AuthFuture getFailedAuthFuture() {
+ final AuthFuture authFuture = mock(AuthFuture.class);
+ doReturn(false).when(authFuture).isSuccess();
+ doReturn(new IllegalStateException()).when(authFuture).getException();
+ return authFuture;
+ }
+
+ private OpenFuture getFailedOpenFuture() {
+ final OpenFuture authFuture = mock(OpenFuture.class);
+ doReturn(false).when(authFuture).isOpened();
+ doReturn(new IllegalStateException()).when(authFuture).getException();
+ return authFuture;
+ }
+
+ @Test
+ public void testConnectFail() throws Exception {
+ asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+ final ConnectFuture connectFuture = getFailedConnectFuture();
+ try {
+ sshConnectListener.operationComplete(connectFuture);
+ fail("Exception expected");
+ } catch (final Exception e) {
+ verify(promise).setFailure(any(Throwable.class));
+ verifyNoMoreInteractions(promise);
+ // TODO should ctx.channelInactive be called ?
+ }
+ }
+
+ private ConnectFuture getFailedConnectFuture() {
+ final ConnectFuture connectFuture = mock(ConnectFuture.class);
+ doReturn(false).when(connectFuture).isConnected();
+ doReturn(new IllegalStateException()).when(connectFuture).getException();
+ return connectFuture;
+ }
+
+ private ChannelPromise getMockedPromise() {
+ final ChannelPromise promise = mock(ChannelPromise.class);
+ doReturn(promise).when(promise).setSuccess();
+ doReturn(promise).when(promise).setFailure(any(Throwable.class));
+ return promise;
+ }
+
+ private static abstract class SuccessFutureListener<T extends SshFuture<T>> implements FutureCallback<SshFutureListener<T>> {
+
+ @Override
+ public abstract void onSuccess(final SshFutureListener<T> result);
+
+ @Override
+ public void onFailure(final Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+}
final AuthProvider authService = bundleContext.getService(reference);
final Integer newServicePreference = getPreference(reference);
if(isBetter(newServicePreference)) {
+ maxPreference = newServicePreference;
server.setAuthProvider(authService);
if(sshThread == null) {
sshThread = runNetconfSshThread(server);
netconfSSHServer.setAuthProvider(authProvider);
InetSocketAddress address = netconfSSHServer.getLocalSocketAddress();
- final EchoClientHandler echoClientHandler = connectClient(address);
+
+ final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort()));
+
Stopwatch stopwatch = new Stopwatch().start();
while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
Thread.sleep(100);
this.hashedWheelTimer = hashedWheelTimer;
}
- private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi) {
+ private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
final Set<Capability> capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function<ModuleBuilder, Capability>() {
@Override
: Sets.newHashSet(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0, XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
- hashedWheelTimer, simulatedOperationProvider, idProvider, CONNECTION_TIMEOUT_MILLIS, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
+ hashedWheelTimer, simulatedOperationProvider, idProvider, generateConfigsTimeout, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
serverNegotiatorFactory);
public List<Integer> start(final Main.Params params) {
final Map<ModuleBuilder, String> moduleBuilders = parseSchemasToModuleBuilders(params);
- final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi);
+ final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout);
int currentPort = params.startingPort;
}
@VisibleForTesting
- void setNullableUserManager(final IUserManager nullableUserManager) {
+ synchronized void setNullableUserManager(final IUserManager nullableUserManager) {
this.nullableUserManager = nullableUserManager;
}
}
<artifactId>xmlunit</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ </dependency>
</dependencies>
<build>
rpcReply.appendChild(responseNS);
}
- for (String attrName : attributes.keySet()) {
- rpcReply.setAttributeNode((Attr) document.importNode(attributes.get(attrName), true));
+ for (Attr attribute : attributes.values()) {
+ rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
}
document.appendChild(rpcReply);
return document;
Document doc = XmlUtil.newDocument();
Element helloElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
HELLO_TAG);
- Element capabilitiesElement = doc.createElement(XmlNetconfConstants.CAPABILITIES);
+ Element capabilitiesElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.CAPABILITIES);
for (String capability : Sets.newHashSet(capabilities)) {
- Element capElement = doc.createElement(XmlNetconfConstants.CAPABILITY);
+ Element capElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.CAPABILITY);
capElement.setTextContent(capability);
capabilitiesElement.appendChild(capElement);
}
public static NetconfHelloMessage createServerHello(Set<String> capabilities, long sessionId) throws NetconfDocumentedException {
Document doc = createHelloMessageDoc(capabilities);
- Element sessionIdElement = doc.createElement(XmlNetconfConstants.SESSION_ID);
+ Element sessionIdElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+ XmlNetconfConstants.SESSION_ID);
sessionIdElement.setTextContent(Long.toString(sessionId));
doc.getDocumentElement().appendChild(sessionIdElement);
return new NetconfHelloMessage(doc);
import com.google.common.base.Function;
import com.google.common.collect.Collections2;
-
+import java.util.Collection;
+import java.util.List;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
-import javax.annotation.Nullable;
-
-import java.util.Collection;
-import java.util.List;
-
public final class NetconfMessageUtil {
private static final Logger logger = LoggerFactory.getLogger(NetconfMessageUtil.class);
List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
return Collections2.transform(caps, new Function<XmlElement, String>() {
- @Nullable
@Override
- public String apply(@Nullable XmlElement input) {
+ public String apply(@Nonnull XmlElement input) {
// Trim possible leading/tailing whitespace
try {
return input.getTextContent().trim();
}
}
- /**
- * Get extracted address or default.
- *
- * @throws java.lang.IllegalStateException if neither address is present.
- */
- private static InetSocketAddress getNetconfAddress(final InetSocketAddress defaultAddress, Optional<InetSocketAddress> extractedAddress, InfixProp infix) {
- InetSocketAddress inetSocketAddress;
-
- if (extractedAddress.isPresent() == false) {
- logger.debug("Netconf {} address not found, falling back to default {}", infix, defaultAddress);
-
- if (defaultAddress == null) {
- logger.warn("Netconf {} address not found, default address not provided", infix);
- throw new IllegalStateException("Netconf " + infix + " address not found, default address not provided");
- }
- inetSocketAddress = defaultAddress;
- } else {
- inetSocketAddress = extractedAddress.get();
- }
-
- return inetSocketAddress;
- }
-
public static String getPrivateKeyPath(final BundleContext context) {
return getPropertyValue(context, getPrivateKeyKey());
}
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import javax.annotation.Nullable;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
return Lists.newArrayList(Collections2.filter(getChildElementsWithinNamespace(namespace),
new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
List<XmlElement> children = getChildElementsWithinNamespace(namespace);
children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
List<XmlElement> children = getChildElementsWithinNamespace(getNamespace());
return Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
@Override
- public boolean apply(@Nullable XmlElement xmlElement) {
+ public boolean apply(XmlElement xmlElement) {
return xmlElement.getName().equals(childName);
}
}));
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+
+import com.google.common.collect.Lists;
+import org.junit.Test;
+
+public class CloseableUtilTest {
+
+ @Test
+ public void testCloseAllFail() throws Exception {
+ final AutoCloseable failingCloseable = new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ throw new RuntimeException("testing failing close");
+ }
+ };
+
+ try {
+ CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+ fail("Exception with suppressed should be thrown");
+ } catch (final RuntimeException e) {
+ assertEquals(1, e.getSuppressed().length);
+ }
+ }
+
+ @Test
+ public void testCloseAll() throws Exception {
+ final AutoCloseable failingCloseable = mock(AutoCloseable.class);
+ doNothing().when(failingCloseable).close();
+ CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class AbstractLastNetconfOperationTest {
+ class LastNetconfOperationImplTest extends AbstractLastNetconfOperation {
+
+ boolean handleWithNoSubsequentOperationsRun;
+
+ protected LastNetconfOperationImplTest(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ handleWithNoSubsequentOperationsRun = false;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ handleWithNoSubsequentOperationsRun = true;
+ return null;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return "";
+ }
+ }
+
+ LastNetconfOperationImplTest netconfOperation;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfOperation = new LastNetconfOperationImplTest("");
+ }
+
+ @Test
+ public void testNetconfOperation() throws Exception {
+ netconfOperation.handleWithNoSubsequentOperations(null, null);
+ assertTrue(netconfOperation.handleWithNoSubsequentOperationsRun);
+ assertEquals(HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY, netconfOperation.getHandlingPriority());
+ }
+
+ @Test(expected = NetconfDocumentedException.class)
+ public void testHandle() throws Exception {
+ NetconfOperationChainedExecution operation = mock(NetconfOperationChainedExecution.class);
+ doReturn("").when(operation).toString();
+
+ doReturn(false).when(operation).isExecutionTermination();
+ netconfOperation.handle(null, null, operation);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import java.io.IOException;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+public class AbstractNetconfOperationTest {
+
+ class NetconfOperationImpl extends AbstractNetconfOperation {
+
+ public boolean handleRun;
+
+ protected NetconfOperationImpl(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ this.handleRun = false;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return null;
+ }
+
+ @Override
+ protected Element handle(Document document, XmlElement message, NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ this.handleRun = true;
+ try {
+ return XmlUtil.readXmlToElement("<element/>");
+ } catch (SAXException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ private NetconfOperationImpl netconfOperation;
+ private NetconfOperationChainedExecution operation;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfOperation = new NetconfOperationImpl("str");
+ operation = mock(NetconfOperationChainedExecution.class);
+ }
+
+ @Test
+ public void testAbstractNetconfOperation() throws Exception {
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/edit_config.xml");
+ assertEquals(netconfOperation.getNetconfSessionIdForReporting(), "str");
+ assertNotNull(netconfOperation.canHandle(helloMessage));
+ assertEquals(netconfOperation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY);
+
+ netconfOperation.handle(helloMessage, operation);
+ assertTrue(netconfOperation.handleRun);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+
+public class AbstractSingletonNetconfOperationTest {
+ class SingletonNCOperationImpl extends AbstractSingletonNetconfOperation {
+
+ protected SingletonNCOperationImpl(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ return null;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return null;
+ }
+ }
+
+ @Test
+ public void testAbstractSingletonNetconfOperation() throws Exception {
+ SingletonNCOperationImpl operation = new SingletonNCOperationImpl("");
+ assertEquals(operation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_MAX_PRIORITY);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NetconfHelloMessageAdditionalHeaderTest {
+
+
+ private String customHeader = "[user;1.1.1.1:40;tcp;client;]";
+ private NetconfHelloMessageAdditionalHeader header;
+
+ @Before
+ public void setUp() throws Exception {
+ header = new NetconfHelloMessageAdditionalHeader("user", "1.1.1.1", "40", "tcp", "client");
+ }
+
+ @Test
+ public void testGetters() throws Exception {
+ assertEquals(header.getAddress(), "1.1.1.1");
+ assertEquals(header.getUserName(), "user");
+ assertEquals(header.getPort(), "40");
+ assertEquals(header.getTransport(), "tcp");
+ assertEquals(header.getSessionIdentifier(), "client");
+ }
+
+ @Test
+ public void testStaticConstructor() throws Exception {
+ NetconfHelloMessageAdditionalHeader h = NetconfHelloMessageAdditionalHeader.fromString(customHeader);
+ assertEquals(h.toString(), header.toString());
+ assertEquals(h.toFormattedString(), header.toFormattedString());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+
+import com.google.common.base.Optional;
+import java.util.Set;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfHelloMessageTest {
+
+ Set<String> caps;
+
+ @Before
+ public void setUp() throws Exception {
+ caps = Sets.newSet("cap1");
+ }
+
+ @Test
+ public void testConstructor() throws Exception {
+ NetconfHelloMessageAdditionalHeader additionalHeader = new NetconfHelloMessageAdditionalHeader("name","host","1","transp","id");
+ NetconfHelloMessage message = NetconfHelloMessage.createClientHello(caps, Optional.of(additionalHeader));
+ assertTrue(message.isHelloMessage(message));
+ assertEquals(Optional.of(additionalHeader), message.getAdditionalHeader());
+
+ NetconfHelloMessage serverMessage = NetconfHelloMessage.createServerHello(caps, 100L);
+ assertTrue(serverMessage.isHelloMessage(serverMessage));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import com.google.common.base.Charsets;
+import java.util.Arrays;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+public class NetconfMessageHeaderTest {
+ @Test
+ public void testGet() throws Exception {
+ NetconfMessageHeader header = new NetconfMessageHeader(10);
+ assertEquals(header.getLength(), 10);
+
+ byte[] expectedValue = "\n#10\n".getBytes(Charsets.US_ASCII);
+ assertArrayEquals(expectedValue, header.toBytes());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import java.util.Collection;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfMessageUtilTest {
+ @Test
+ public void testNetconfMessageUtil() throws Exception {
+ Document okMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+ assertTrue(NetconfMessageUtil.isOKMessage(new NetconfMessage(okMessage)));
+ assertFalse(NetconfMessageUtil.isErrorMessage(new NetconfMessage(okMessage)));
+
+ Document errorMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/communicationError/testClientSendsRpcReply_expectedResponse.xml");
+ assertTrue(NetconfMessageUtil.isErrorMessage(new NetconfMessage(errorMessage)));
+ assertFalse(NetconfMessageUtil.isOKMessage(new NetconfMessage(errorMessage)));
+
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/client_hello.xml");
+ Collection<String> caps = NetconfMessageUtil.extractCapabilitiesFromHello(new NetconfMessage(helloMessage).getDocument());
+ assertTrue(caps.contains("urn:ietf:params:netconf:base:1.0"));
+ assertTrue(caps.contains("urn:ietf:params:netconf:base:1.1"));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class SendErrorExceptionUtilTest {
+
+ NetconfSession netconfSession;
+ ChannelFuture channelFuture;
+ Channel channel;
+ private NetconfDocumentedException exception;
+
+ @Before
+ public void setUp() throws Exception {
+ netconfSession = mock(NetconfSession.class);
+ channelFuture = mock(ChannelFuture.class);
+ channel = mock(Channel.class);
+ doReturn(channelFuture).when(netconfSession).sendMessage(any(NetconfMessage.class));
+ doReturn(channelFuture).when(channelFuture).addListener(any(GenericFutureListener.class));
+ doReturn(channelFuture).when(channel).writeAndFlush(any(NetconfMessage.class));
+ exception = new NetconfDocumentedException("err");
+ }
+
+ @Test
+ public void testSendErrorMessage1() throws Exception {
+ SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception);
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ verify(netconfSession, times(1)).sendMessage(any(NetconfMessage.class));
+ }
+
+ @Test
+ public void testSendErrorMessage2() throws Exception {
+ SendErrorExceptionUtil.sendErrorMessage(channel, exception);
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ }
+
+ @Test
+ public void testSendErrorMessage3() throws Exception {
+ Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc.xml");
+ SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception, new NetconfMessage(helloMessage));
+ verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.osgi;
+
+import com.google.common.base.Optional;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.NetconfUtil;
+import org.osgi.framework.BundleContext;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfConfigUtilTest {
+
+ private BundleContext bundleContext;
+
+ @Before
+ public void setUp() throws Exception {
+ bundleContext = mock(BundleContext.class);
+ }
+
+ @Test
+ public void testNetconfConfigUtil() throws Exception {
+ assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
+
+ doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+
+ doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+ assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+ }
+
+ @Test
+ public void testgetPrivateKeyKey() throws Exception {
+ assertEquals(NetconfConfigUtil.getPrivateKeyKey(), "netconf.ssh.pk.path");
+ }
+
+ @Test
+ public void testgetNetconfServerAddressKey() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ assertEquals(NetconfConfigUtil.getNetconfServerAddressKey(prop), "netconf.tcp.address");
+ }
+
+ @Test
+ public void testExtractNetconfServerAddress() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ doReturn("").when(bundleContext).getProperty(anyString());
+ assertEquals(NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop), Optional.absent());
+ }
+
+ @Test
+ public void testExtractNetconfServerAddress2() throws Exception {
+ NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+ doReturn("1.1.1.1").when(bundleContext).getProperty("netconf.tcp.address");
+ doReturn("20").when(bundleContext).getProperty("netconf.tcp.port");
+ Optional<InetSocketAddress> inetSocketAddressOptional = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop);
+ assertTrue(inetSocketAddressOptional.isPresent());
+ assertEquals(inetSocketAddressOptional.get(), new InetSocketAddress("1.1.1.1", 20));
+ }
+
+ @Test
+ public void testGetPrivateKeyPath() throws Exception {
+ doReturn("path").when(bundleContext).getProperty("netconf.ssh.pk.path");
+ assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testGetPrivateKeyPath2() throws Exception {
+ doReturn(null).when(bundleContext).getProperty("netconf.ssh.pk.path");
+ assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
+
+public class HardcodedNamespaceResolverTest {
+
+ @Test
+ public void testResolver() throws Exception {
+ final HardcodedNamespaceResolver hardcodedNamespaceResolver = new HardcodedNamespaceResolver("prefix", "namespace");
+
+ assertEquals("namespace", hardcodedNamespaceResolver.getNamespaceURI("prefix"));
+ try{
+ hardcodedNamespaceResolver.getNamespaceURI("unknown");
+ fail("Unknown namespace lookup should fail");
+ } catch(IllegalStateException e) {}
+
+ assertNull(hardcodedNamespaceResolver.getPrefix("any"));
+ assertNull(hardcodedNamespaceResolver.getPrefixes("any"));
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.hamcrest.CoreMatchers.both;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Map;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import com.google.common.base.Optional;
+
+public class XmlElementTest {
+
+ private final String elementAsString = "<top xmlns=\"namespace\" xmlns:a=\"attrNamespace\" a:attr1=\"value1\" attr2=\"value2\">" +
+ "<inner>" +
+ "<deepInner>deepValue</deepInner>" +
+ "</inner>" +
+ "<innerNamespace xmlns=\"innerNamespace\">innerNamespaceValue</innerNamespace>" +
+ "<innerPrefixed xmlns:b=\"prefixedValueNamespace\">b:valueWithPrefix</innerPrefixed>" +
+ "</top>";
+ private Document document;
+ private Element element;
+ private XmlElement xmlElement;
+
+ @Before
+ public void setUp() throws Exception {
+ document = XmlUtil.readXmlToDocument(elementAsString);
+ element = document.getDocumentElement();
+ xmlElement = XmlElement.fromDomElement(element);
+ }
+
+ @Test
+ public void testConstruct() throws Exception {
+ final XmlElement fromString = XmlElement.fromString(elementAsString);
+ assertEquals(fromString, xmlElement);
+ XmlElement.fromDomDocument(document);
+ XmlElement.fromDomElement(element);
+ XmlElement.fromDomElementWithExpected(element, "top");
+ XmlElement.fromDomElementWithExpected(element, "top", "namespace");
+
+ try {
+ XmlElement.fromString("notXml");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+
+ try {
+ XmlElement.fromDomElementWithExpected(element, "notTop");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+
+ try {
+ XmlElement.fromDomElementWithExpected(element, "top", "notNamespace");
+ fail();
+ } catch (final NetconfDocumentedException e) {}
+ }
+
+ @Test
+ public void testGetters() throws Exception {
+ assertEquals(element, xmlElement.getDomElement());
+ assertEquals(element.getElementsByTagName("inner").getLength(), xmlElement.getElementsByTagName("inner").getLength());
+
+ assertEquals("top", xmlElement.getName());
+ assertTrue(xmlElement.hasNamespace());
+ assertEquals("namespace", xmlElement.getNamespace());
+ assertEquals("namespace", xmlElement.getNamespaceAttribute());
+ assertEquals(Optional.of("namespace"), xmlElement.getNamespaceOptionally());
+
+ assertEquals("value1", xmlElement.getAttribute("attr1", "attrNamespace"));
+ assertEquals("value2", xmlElement.getAttribute("attr2"));
+ assertEquals(2 + 2/*Namespace definition*/, xmlElement.getAttributes().size());
+
+ assertEquals(3, xmlElement.getChildElements().size());
+ assertEquals(1, xmlElement.getChildElements("inner").size());
+ assertTrue(xmlElement.getOnlyChildElementOptionally("inner").isPresent());
+ assertTrue(xmlElement.getOnlyChildElementWithSameNamespaceOptionally("inner").isPresent());
+ assertEquals(0, xmlElement.getChildElements("unknown").size());
+ assertFalse(xmlElement.getOnlyChildElementOptionally("unknown").isPresent());
+ assertEquals(1, xmlElement.getChildElementsWithSameNamespace("inner").size());
+ assertEquals(0, xmlElement.getChildElementsWithSameNamespace("innerNamespace").size());
+ assertEquals(1, xmlElement.getChildElementsWithinNamespace("innerNamespace", "innerNamespace").size());
+ assertTrue(xmlElement.getOnlyChildElementOptionally("innerNamespace", "innerNamespace").isPresent());
+ assertFalse(xmlElement.getOnlyChildElementOptionally("innerNamespace", "unknownNamespace").isPresent());
+
+ final XmlElement noNamespaceElement = XmlElement.fromString("<noNamespace/>");
+ assertFalse(noNamespaceElement.hasNamespace());
+ try {
+ noNamespaceElement.getNamespace();
+ fail();
+ } catch (final MissingNameSpaceException e) {}
+
+ final XmlElement inner = xmlElement.getOnlyChildElement("inner");
+ final XmlElement deepInner = inner.getOnlyChildElementWithSameNamespaceOptionally().get();
+ assertEquals(deepInner, inner.getOnlyChildElementWithSameNamespace());
+ assertEquals(Optional.<XmlElement>absent(), xmlElement.getOnlyChildElementOptionally("unknown"));
+ assertEquals("deepValue", deepInner.getTextContent());
+ assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+ assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+ }
+
+ @Test
+ public void testExtractNamespaces() throws Exception {
+ final XmlElement innerPrefixed = xmlElement.getOnlyChildElement("innerPrefixed");
+ Map.Entry<String, String> namespaceOfTextContent = innerPrefixed.findNamespaceOfTextContent();
+
+ assertNotNull(namespaceOfTextContent);
+ assertEquals("b", namespaceOfTextContent.getKey());
+ assertEquals("prefixedValueNamespace", namespaceOfTextContent.getValue());
+ final XmlElement innerNamespace = xmlElement.getOnlyChildElement("innerNamespace");
+ namespaceOfTextContent = innerNamespace.findNamespaceOfTextContent();
+
+ assertEquals("", namespaceOfTextContent.getKey());
+ assertEquals("innerNamespace", namespaceOfTextContent.getValue());
+ }
+
+ @Test
+ public void testUnrecognisedElements() throws Exception {
+ xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"), xmlElement.getOnlyChildElement("innerPrefixed"), xmlElement.getOnlyChildElement("innerNamespace"));
+
+ try {
+ xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"));
+ fail();
+ } catch (final NetconfDocumentedException e) {
+ assertThat(e.getMessage(), both(containsString("innerNamespace")).and(containsString("innerNamespace")));
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import com.google.common.base.Optional;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXParseException;
+
+public class XmlUtilTest {
+
+ private final String xml = "<top xmlns=\"namespace\">\n" +
+ "<innerText>value</innerText>\n" +
+ "<innerPrefixedText xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+ "<innerPrefixedText xmlns=\"randomNamespace\" xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+ "</top>";
+
+ @Test
+ public void testCreateElement() throws Exception {
+ final Document document = XmlUtil.newDocument();
+ final Element top = XmlUtil.createElement(document, "top", Optional.of("namespace"));
+
+ top.appendChild(XmlUtil.createTextElement(document, "innerText", "value", Optional.of("namespace")));
+ top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("namespace")));
+ top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("randomNamespace")));
+
+ document.appendChild(top);
+ assertEquals("top", XmlUtil.createDocumentCopy(document).getDocumentElement().getTagName());
+
+ XMLUnit.setIgnoreAttributeOrder(true);
+ XMLUnit.setIgnoreWhitespace(true);
+
+ final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(xml), document);
+ assertTrue(diff.toString(), diff.similar());
+ }
+
+ @Test
+ public void testLoadSchema() throws Exception {
+ XmlUtil.loadSchema();
+ try {
+ XmlUtil.loadSchema(getClass().getResourceAsStream("/netconfMessages/commit.xml"));
+ fail("Input stream does not contain xsd");
+ } catch (final IllegalStateException e) {
+ assertTrue(e.getCause() instanceof SAXParseException);
+ }
+
+ }
+
+ @Test
+ public void testXPath() throws Exception {
+ final XPathExpression correctXPath = XMLNetconfUtil.compileXPath("/top/innerText");
+ try {
+ XMLNetconfUtil.compileXPath("!@(*&$!");
+ fail("Incorrect xpath should fail");
+ } catch (IllegalStateException e) {}
+ final Object value = XmlUtil.evaluateXPath(correctXPath, XmlUtil.readXmlToDocument("<top><innerText>value</innerText></top>"), XPathConstants.NODE);
+ assertEquals("value", ((Element) value).getTextContent());
+ }
+}
\ No newline at end of file
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
+//import javax.xml.bind.annotation.XmlElementWrapper;
import java.io.Serializable;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
* healthmonitor_id String
* admin_state_up Bool
* status String
- * members List <String>
+ * members List <NeutronLoadBalancerPoolMember>
* http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
*/
@XmlElement (name="status")
String loadBalancerPoolStatus;
- @XmlElement (name="members")
- List loadBalancerPoolMembers;
-
- HashMap<String, NeutronLoadBalancerPoolMember> member;
+ @XmlElement(name="members")
+ List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers;
public NeutronLoadBalancerPool() {
- member = new HashMap<String, NeutronLoadBalancerPoolMember>();
}
public String getLoadBalancerPoolID() {
this.loadBalancerPoolStatus = loadBalancerPoolStatus;
}
- public List getLoadBalancerPoolMembers() {
+ public List<NeutronLoadBalancerPoolMember> getLoadBalancerPoolMembers() {
+ /*
+ * Update the pool_id of the member to that this.loadBalancerPoolID
+ */
+ for (NeutronLoadBalancerPoolMember member: loadBalancerPoolMembers)
+ member.setPoolID(loadBalancerPoolID);
return loadBalancerPoolMembers;
}
- public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) {
+ public void setLoadBalancerPoolMembers(List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers) {
this.loadBalancerPoolMembers = loadBalancerPoolMembers;
}
+ public void addLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+ this.loadBalancerPoolMembers.add(loadBalancerPoolMember);
+ }
+
+ public void removeLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+ this.loadBalancerPoolMembers.remove(loadBalancerPoolMember);
+ }
+
public NeutronLoadBalancerPool extractFields(List<String> fields) {
NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool();
Iterator<String> i = fields.iterator();
}
return ans;
}
-}
\ No newline at end of file
+}
import org.opendaylight.controller.configuration.ConfigurationObject;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
import java.io.Serializable;
import java.util.Iterator;
import java.util.List;
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
public class NeutronLoadBalancerPoolMember extends ConfigurationObject implements Serializable {
private static final long serialVersionUID = 1L;
@XmlElement (name="status")
String poolMemberStatus;
+ String poolID;
+
public NeutronLoadBalancerPoolMember() {
}
+ @XmlTransient
+ public String getPoolID() {
+ return poolID;
+ }
+
+ public void setPoolID(String poolID) {
+ this.poolID = poolID;
+ }
+
public String getPoolMemberID() {
return poolMemberID;
}
if (s.equals("id")) {
ans.setPoolMemberID(this.getPoolMemberID());
}
+ if (s.equals("pool_id")) {
+ ans.setPoolID(this.getPoolID());
+ }
if (s.equals("tenant_id")) {
ans.setPoolMemberTenantID(this.getPoolMemberTenantID());
}
@Override public String toString() {
return "NeutronLoadBalancerPoolMember{" +
"poolMemberID='" + poolMemberID + '\'' +
+ ", poolID='" + poolID + '\'' +
", poolMemberTenantID='" + poolMemberTenantID + '\'' +
", poolMemberAddress='" + poolMemberAddress + '\'' +
", poolMemberProtoPort=" + poolMemberProtoPort +
import java.util.List;
/**
- * Neutron Northbound REST APIs for LoadBalancer Policies.<br>
- * This class provides REST APIs for managing neutron LoadBalancer Policies
+ * Neutron Northbound REST APIs for LoadBalancers.<br>
+ * This class provides REST APIs for managing neutron LoadBalancers
*
* <br>
* <br>
@QueryParam("page_reverse") String pageReverse
// sorting not supported
) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
- this);
- // INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this);
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- List<NeutronLoadBalancer> allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers();
+ List<NeutronLoadBalancer> allLoadBalancers = loadBalancerInterface.getAllNeutronLoadBalancers();
// List<NeutronLoadBalancerRule> allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules();
List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
// List<NeutronLoadBalancerRule> rules = new ArrayList<NeutronLoadBalancerRule>();
/**
* Returns a specific LoadBalancer */
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@GET
@Produces({ MediaType.APPLICATION_JSON })
@ResponseCode(code = 401, condition = "Unauthorized"),
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented") })
- public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+ public Response showLoadBalancer(@PathParam("loadBalancerID") String loadBalancerID,
// return fields
@QueryParam("fields") List<String> fields) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
if (fields.size() > 0) {
- NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer ans = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
return Response.status(200).entity(
new NeutronLoadBalancerRequest(extractFields(ans, fields))).build();
} else {
- return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID))).build();
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID))).build();
}
}
@ResponseCode(code = 409, condition = "Conflict"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response createLoadBalancers(final NeutronLoadBalancerRequest input) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* Verify that the LoadBalancer doesn't already exist.
*/
- if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
+ if (loadBalancerInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
throw new BadRequestException("LoadBalancer UUID already exists");
}
- loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
-
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {
}
}
}
- loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+ loadBalancerInterface.addNeutronLoadBalancer(singleton);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
NeutronLoadBalancer test = i.next();
/*
- * Verify that the firewall policy doesn't already exist
+ * Verify that the loadbalancer doesn't already exist
*/
- if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
+ if (loadBalancerInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
throw new BadRequestException("Load Balancer Pool UUID already is already created");
}
if (testMap.containsKey(test.getLoadBalancerID())) {
i = bulk.iterator();
while (i.hasNext()) {
NeutronLoadBalancer test = i.next();
- loadBalancerPoolInterface.addNeutronLoadBalancer(test);
+ loadBalancerInterface.addNeutronLoadBalancer(test);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
/**
* Updates a LoadBalancer Policy
*/
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@PUT
@Produces({ MediaType.APPLICATION_JSON })
@Consumes({ MediaType.APPLICATION_JSON })
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response updateLoadBalancer(
- @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ @PathParam("loadBalancerID") String loadBalancerID, final NeutronLoadBalancerRequest input) {
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* verify the LoadBalancer exists and there is only one delta provided
*/
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
if (!input.isSingleton()) {
throw new BadRequestException("Only singleton edit supported");
}
NeutronLoadBalancer delta = input.getSingleton();
- NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer original = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
/*
* updates restricted by Neutron
/*
* update the object and return it
*/
- loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta);
- NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID);
+ loadBalancerInterface.updateNeutronLoadBalancer(loadBalancerID, delta);
+ NeutronLoadBalancer updatedLoadBalancer = loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
service.neutronLoadBalancerUpdated(updatedLoadBalancer);
}
}
- return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
- loadBalancerPoolID))).build();
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+ loadBalancerID))).build();
}
/**
* Deletes a LoadBalancer */
- @Path("{loadBalancerPoolID}")
+ @Path("{loadBalancerID}")
@DELETE
@StatusCodes({
@ResponseCode(code = 204, condition = "No Content"),
@ResponseCode(code = 409, condition = "Conflict"),
@ResponseCode(code = 501, condition = "Not Implemented") })
public Response deleteLoadBalancer(
- @PathParam("loadBalancerPoolID") String loadBalancerPoolID) {
- INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ @PathParam("loadBalancerID") String loadBalancerID) {
+ INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
this);
- if (loadBalancerPoolInterface == null) {
+ if (loadBalancerInterface == null) {
throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
/*
* verify the LoadBalancer exists and it isn't currently in use
*/
- if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
}
- if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) {
+ if (loadBalancerInterface.neutronLoadBalancerInUse(loadBalancerID)) {
return Response.status(409).build();
}
- NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ NeutronLoadBalancer singleton = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {
}
}
- loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID);
+ loadBalancerInterface.removeNeutronLoadBalancer(loadBalancerID);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
import javax.xml.bind.annotation.XmlElement;
import java.util.List;
-public class INeutronLoadBalancerPoolMemberRequest {
+public class NeutronLoadBalancerPoolMemberRequest {
/**
* See OpenStack Network API v2.0 Reference for description of
@XmlElement(name="members")
List<NeutronLoadBalancerPoolMember> bulkRequest;
- INeutronLoadBalancerPoolMemberRequest() {
+ NeutronLoadBalancerPoolMemberRequest() {
}
- INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+ NeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
bulkRequest = bulk;
singletonLoadBalancerPoolMember = null;
}
- INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+ NeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
singletonLoadBalancerPoolMember = group;
}
/*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
*/
package org.opendaylight.controller.networkconfig.neutron.northbound;
import org.codehaus.enunciate.jaxrs.ResponseCode;
import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware;
-import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
import org.opendaylight.controller.sal.utils.ServiceHelper;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
-
-@Path("/pools/{loadBalancerPoolID}/members")
+@Path("/pools/{loadBalancerPoolUUID}/members")
public class NeutronLoadBalancerPoolMembersNorthbound {
-
private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List<String> fields) {
return o.extractFields(fields);
}
/**
- * Returns a list of all LoadBalancerPool
+ * Returns a list of all LoadBalancerPoolMembers in specified pool
*/
@GET
@Produces({MediaType.APPLICATION_JSON})
@ResponseCode(code = 501, condition = "Not Implemented")})
public Response listMembers(
+ //Path param
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+
// return fields
@QueryParam("fields") List<String> fields,
+
// OpenStack LoadBalancerPool attributes
@QueryParam("id") String queryLoadBalancerPoolMemberID,
@QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID,
@QueryParam("page_reverse") String pageReverse
// sorting not supported
) {
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces
- .getINeutronLoadBalancerPoolMemberCRUD(this);
- if (loadBalancerPoolMemberInterface == null) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
- List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface
- .getAllNeutronLoadBalancerPoolMembers();
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
- Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ Iterator<NeutronLoadBalancerPoolMember> i = members.iterator();
while (i.hasNext()) {
NeutronLoadBalancerPoolMember nsg = i.next();
if ((queryLoadBalancerPoolMemberID == null ||
queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) &&
+ loadBalancerPoolUUID.equals(nsg.getPoolID()) &&
(queryLoadBalancerPoolMemberTenantID == null ||
queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) &&
(queryLoadBalancerPoolMemberAddress == null ||
}
}
return Response.status(200).entity(
- new INeutronLoadBalancerPoolMemberRequest(ans)).build();
+ new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+}
+
+/**
+ * Returns a specific LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@GET
+@Produces({ MediaType.APPLICATION_JSON })
+//@TypeHint(OpenStackLoadBalancerPoolMembers.class)
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response showLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+ // return fields
+ @QueryParam("fields") List<String> fields ) {
+
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember ans: members) {
+ if (!ans.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ continue;
+
+ if (fields.size() > 0) {
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolMemberRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+ }
+ }
+ return Response.status(204).build();
}
/**
* Adds a Member to an LBaaS Pool member
*/
-@Path("/pools/{loadBalancerPoolID}/members")
@PUT
@Produces({MediaType.APPLICATION_JSON})
@Consumes({MediaType.APPLICATION_JSON})
@ResponseCode(code = 401, condition = "Unauthorized"),
@ResponseCode(code = 404, condition = "Not Found"),
@ResponseCode(code = 501, condition = "Not Implemented")})
-public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberRequest input) {
+public Response createLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ final NeutronLoadBalancerPoolMemberRequest input) {
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(
- this);
- if (loadBalancerPoolMemberInterface == null) {
- throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface "
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ RestMessages.SERVICEUNAVAILABLE.toString());
}
+ // Verify that the loadBalancerPool exists, for the member to be added to its cache
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+ NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+
if (input.isSingleton()) {
NeutronLoadBalancerPoolMember singleton = input.getSingleton();
+ singleton.setPoolID(loadBalancerPoolUUID);
+ String loadBalancerPoolMemberUUID = singleton.getPoolMemberID();
/*
* Verify that the LoadBalancerPoolMember doesn't already exist.
*/
- if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
- singleton.getPoolMemberID())) {
- throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
+ List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
}
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
if (instances != null) {
}
}
}
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
service.neutronLoadBalancerPoolMemberCreated(singleton);
}
}
+
+ /**
+ * Add the member from the neutron load balancer pool as well
+ */
+ singletonPool.addLoadBalancerPoolMember(singleton);
+
} else {
List<NeutronLoadBalancerPoolMember> bulk = input.getBulk();
Iterator<NeutronLoadBalancerPoolMember> i = bulk.iterator();
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
while (i.hasNext()) {
NeutronLoadBalancerPoolMember test = i.next();
+ String loadBalancerPoolMemberUUID = test.getPoolMemberID();
/*
- * Verify that the firewall doesn't already exist
+ * Verify that the LoadBalancerPoolMember doesn't already exist.
*/
-
- if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
- test.getPoolMemberID())) {
- throw new BadRequestException("Load Balancer PoolMember UUID already is already created");
+ List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
}
+
if (testMap.containsKey(test.getPoolMemberID())) {
throw new BadRequestException("Load Balancer PoolMember UUID already exists");
}
i = bulk.iterator();
while (i.hasNext()) {
NeutronLoadBalancerPoolMember test = i.next();
- loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test);
if (instances != null) {
for (Object instance : instances) {
INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
service.neutronLoadBalancerPoolMemberCreated(test);
}
}
+ singletonPool.addLoadBalancerPoolMember(test);
}
}
return Response.status(201).entity(input).build();
}
+
+/**
+ * Updates a LB member pool
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@PUT
+@Produces({ MediaType.APPLICATION_JSON })
+@Consumes({ MediaType.APPLICATION_JSON })
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response updateLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+ final NeutronLoadBalancerPoolMemberRequest input) {
+
+ //TODO: Implement update LB member pool
+ return Response.status(501).entity(input).build();
+}
+
+/**
+ * Deletes a LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@DELETE
+@StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response deleteLoadBalancerPoolMember(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+ @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ // Verify that the loadBalancerPool exists, for the member to be removed from its cache
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+ }
+
+ //Verify that the LB pool member exists
+ NeutronLoadBalancerPoolMember singleton = null;
+ List<NeutronLoadBalancerPoolMember> members =
+ loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+ for (NeutronLoadBalancerPoolMember member: members) {
+ if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) {
+ singleton = member;
+ break;
+ }
+ }
+ if (singleton == null)
+ throw new BadRequestException("LoadBalancerPoolMember UUID does not exist.");
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ service.neutronLoadBalancerPoolMemberDeleted(singleton);
+ }
+ }
+
+ /**
+ * Remove the member from the neutron load balancer pool
+ */
+ NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ singletonPool.removeLoadBalancerPoolMember(singleton);
+
+ return Response.status(204).build();
+}
}
/*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
*/
package org.opendaylight.controller.networkconfig.neutron.northbound;
import org.codehaus.enunciate.jaxrs.StatusCodes;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
import org.opendaylight.controller.sal.utils.ServiceHelper;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
* http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
*
*/
+
+/**
+ * For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD,
+ * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache.
+ * TODO: Consolidate and maintain a single copy
+ */
+
@Path("/pools")
public class NeutronLoadBalancerPoolNorthbound {
@QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID,
@QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp,
@QueryParam("status") String queryLoadBalancerPoolStatus,
- @QueryParam("members") List queryLoadBalancerPoolMembers,
+ @QueryParam("members") List<NeutronLoadBalancerPoolMember> queryLoadBalancerPoolMembers,
// pagination
@QueryParam("limit") String limit,
@QueryParam("marker") String marker,
NeutronLoadBalancerPool test = i.next();
/*
- * Verify that the firewall doesn't already exist
+ * Verify that the loadBalancerPool doesn't already exist
*/
if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) {
}
return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
}
+
+ /**
+ * Deletes a LoadBalancerPool
+ */
+
+ @Path("{loadBalancerPoolUUID}")
+ @DELETE
+ @StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response deleteLoadBalancerPool(
+ @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerPool exists and it isn't currently in use
+ */
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+ throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+ }
+ if (loadBalancerPoolInterface.neutronLoadBalancerPoolInUse(loadBalancerPoolUUID)) {
+ return Response.status(409).build();
+ }
+ NeutronLoadBalancerPool singleton = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerPool(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * remove it and return 204 status
+ */
+ loadBalancerPoolInterface.removeNeutronLoadBalancerPool(loadBalancerPoolUUID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ service.neutronLoadBalancerPoolDeleted(singleton);
+ }
+ }
+
+ /*
+ * remove corresponding members from the member cache too
+ */
+ INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this);
+ if (loadBalancerPoolMemberInterface != null) {
+ List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new
+ ArrayList<NeutronLoadBalancerPoolMember>(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers());
+ Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPoolMember member = i.next();
+ if (member.getPoolID() == loadBalancerPoolUUID)
+ loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID());
+ }
+ }
+ return Response.status(204).build();
+ }
}
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
end += rawPayload.length;
}
int checksumStartByte = start + getfieldOffset(CHECKSUM) / NetUtils.NumBitsInAByte;
+ int even = end & ~1;
- for (int i = start; i <= (end - 1); i = i + 2) {
+ for (int i = start; i < even; i = i + 2) {
// Skip, if the current bytes are checkSum bytes
if (i == checksumStartByte) {
continue;
wordData = ((data[i] << 8) & 0xFF00) + (data[i + 1] & 0xFF);
sum = sum + wordData;
}
- carry = (sum >> 16) & 0xFF;
+ if (even < end) {
+ // Add the last octet with zero padding.
+ wordData = (data[even] << 8) & 0xFF00;
+ sum = sum + wordData;
+ }
+
+ carry = sum >>> 16;
finalSum = (sum & 0xFFFF) + carry;
return (short) ~((short) finalSum & 0xFFFF);
}
*/
public void setHeaderField(String headerField, byte[] readValue) {
if (headerField.equals(PROTOCOL)) {
- payloadClass = protocolClassMap.get(readValue[0]);
+ // Don't set payloadClass if framgment offset is not zero.
+ byte[] fragoff = hdrFieldsMap.get(FRAGOFFSET);
+ if (fragoff == null || BitBufferHelper.getShort(fragoff) == 0) {
+ payloadClass = protocolClassMap.get(readValue[0]);
+ }
+ } else if (headerField.equals(FRAGOFFSET)) {
+ if (readValue != null && BitBufferHelper.getShort(readValue) != 0) {
+ // Clear payloadClass because protocol header is not present
+ // in this packet.
+ payloadClass = null;
+ }
} else if (headerField.equals(OPTIONS) &&
(readValue == null || readValue.length == 0)) {
hdrFieldsMap.remove(headerField);
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.controller.sal.packet;
+import java.util.Arrays;
+
import junit.framework.Assert;
import org.junit.Test;
(byte) 0x2b, (byte) 0x2c, (byte) 0x2d, (byte) 0x2e,
(byte) 0x2f, (byte) 0x30, (byte) 0x31, (byte) 0x32,
(byte) 0x33, (byte) 0x34, (byte) 0x35, (byte) 0x36, (byte) 0x37 };
+ serializeTest(icmpRawPayload, (short)0xe553);
+
+ serializeTest(null, (short)0xb108);
+ serializeTest(new byte[0], (short)0xb108);
+
+ byte[] odd = {
+ (byte)0xba, (byte)0xd4, (byte)0xc7, (byte)0x53,
+ (byte)0xf8, (byte)0x59, (byte)0x68, (byte)0x77,
+ (byte)0xfd, (byte)0x27, (byte)0xe0, (byte)0x5b,
+ (byte)0xd0, (byte)0x2e, (byte)0x28, (byte)0x41,
+ (byte)0xa3, (byte)0x48, (byte)0x5d, (byte)0x2e,
+ (byte)0x7d, (byte)0x5b, (byte)0xd3, (byte)0x60,
+ (byte)0xb3, (byte)0x88, (byte)0x8d, (byte)0x0f,
+ (byte)0x1d, (byte)0x87, (byte)0x51, (byte)0x0f,
+ (byte)0x6a, (byte)0xff, (byte)0xf7, (byte)0xd4,
+ (byte)0x40, (byte)0x35, (byte)0x4e, (byte)0x01,
+ (byte)0x36,
+ };
+ serializeTest(odd, (short)0xd0ad);
+
+ // Large payload that causes 16-bit checksum overflow more than
+ // 255 times.
+ byte[] largeEven = new byte[1024];
+ Arrays.fill(largeEven, (byte)0xff);
+ serializeTest(largeEven, (short)0xb108);
+
+ byte[] largeOdd = new byte[1021];
+ Arrays.fill(largeOdd, (byte)0xff);
+ serializeTest(largeOdd, (short)0xb207);
+ }
- short checksum = (short)0xe553;
-
- // Create ICMP object
+ private void serializeTest(byte[] payload, short checksum)
+ throws PacketException {
ICMP icmp = new ICMP();
- icmp.setType((byte)8);
- icmp.setCode((byte)0);
- icmp.setIdentifier((short) 0x46f5);
- icmp.setSequenceNumber((short) 2);
- icmp.setRawPayload(icmpRawPayload);
- //icmp.setChecksum(checksum);
+ icmp.setType((byte)8).setCode((byte)0).
+ setIdentifier((short)0x46f5).setSequenceNumber((short)2);
+ int payloadSize = 0;
+ if (payload != null) {
+ icmp.setRawPayload(payload);
+ payloadSize = payload.length;
+ }
// Serialize
- byte[] stream = icmp.serialize();
- Assert.assertTrue(stream.length == 64);
+ byte[] data = icmp.serialize();
+ Assert.assertEquals(payloadSize + 8, data.length);
// Deserialize
ICMP icmpDes = new ICMP();
- icmpDes.deserialize(stream, 0, stream.length);
+ icmpDes.deserialize(data, 0, data.length);
Assert.assertFalse(icmpDes.isCorrupted());
- Assert.assertTrue(icmpDes.getChecksum() == checksum);
- Assert.assertTrue(icmp.equals(icmpDes));
+ Assert.assertEquals(checksum, icmpDes.getChecksum());
+ Assert.assertEquals(icmp, icmpDes);
}
}
import java.net.UnknownHostException;
import java.util.Arrays;
-import junit.framework.Assert;
-
+import org.junit.Assert;
import org.junit.Test;
+
import org.opendaylight.controller.sal.match.Match;
import org.opendaylight.controller.sal.match.MatchType;
import org.opendaylight.controller.sal.utils.EtherTypes;
Assert.assertEquals(protocol, (byte) match.getField(MatchType.NW_PROTO).getValue());
Assert.assertEquals(tos, (byte) match.getField(MatchType.NW_TOS).getValue());
}
+
+ @Test
+ public void testFragment() throws Exception {
+ byte[] payload1 = new byte[0];
+ byte[] payload2 = {
+ (byte)0x61, (byte)0xd1, (byte)0x3d, (byte)0x51,
+ (byte)0x1b, (byte)0x75, (byte)0xa7, (byte)0x83,
+ };
+ byte[] payload3 = {
+ (byte)0xe7, (byte)0x0f, (byte)0x2d, (byte)0x7e,
+ (byte)0x15, (byte)0xba, (byte)0xe7, (byte)0x6d,
+ (byte)0xb5, (byte)0xc5, (byte)0xb5, (byte)0x37,
+ (byte)0x59, (byte)0xbc, (byte)0x91, (byte)0x43,
+ (byte)0xb5, (byte)0xb7, (byte)0xe4, (byte)0x28,
+ (byte)0xec, (byte)0x62, (byte)0x6b, (byte)0x6a,
+ (byte)0xd1, (byte)0xcb, (byte)0x79, (byte)0x1e,
+ (byte)0xfc, (byte)0x82, (byte)0xf5, (byte)0xb4,
+ };
+
+ // Ensure that the payload is not deserialized if the fragment offset
+ // is not zero.
+ byte proto = IPProtocols.TCP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf250);
+ fragmentTest(payload2, proto, (short)0xf248);
+ fragmentTest(payload3, proto, (short)0xf230);
+
+ proto = IPProtocols.UDP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf245);
+ fragmentTest(payload2, proto, (short)0xf23d);
+ fragmentTest(payload3, proto, (short)0xf225);
+
+ proto = IPProtocols.ICMP.byteValue();
+ fragmentTest(payload1, proto, (short)0xf255);
+ fragmentTest(payload2, proto, (short)0xf24d);
+ fragmentTest(payload3, proto, (short)0xf235);
+
+ // Ensure that the protocol header in the first fragment is
+ // deserialized.
+ proto = IPProtocols.TCP.byteValue();
+ TCP tcp = new TCP();
+ tcp.setSourcePort((short)1234).setDestinationPort((short)32000).
+ setSequenceNumber((int)0xd541f5f8).setAckNumber((int)0x58da787d).
+ setDataOffset((byte)5).setReserved((byte)0).
+ setHeaderLenFlags((short)0x18).setWindowSize((short)0x40e8).
+ setUrgentPointer((short)0x15f7).setChecksum((short)0x0d4e);
+ firstFragmentTest(tcp, payload1, proto, (short)0xdfe6);
+ tcp.setChecksum((short)0xab2a);
+ firstFragmentTest(tcp, payload2, proto, (short)0xdfde);
+ tcp.setChecksum((short)0x1c75);
+ firstFragmentTest(tcp, payload3, proto, (short)0xdfc6);
+
+ proto = IPProtocols.UDP.byteValue();
+ UDP udp = new UDP();
+ udp.setSourcePort((short)53).setDestinationPort((short)45383).
+ setLength((short)(payload1.length + 8)).setChecksum((short)0);
+ firstFragmentTest(udp, payload1, proto, (short)0xdfe7);
+ udp.setLength((short)(payload2.length + 8));
+ firstFragmentTest(udp, payload2, proto, (short)0xdfdf);
+ udp.setLength((short)(payload3.length + 8));
+ firstFragmentTest(udp, payload3, proto, (short)0xdfc7);
+
+ proto = IPProtocols.ICMP.byteValue();
+ ICMP icmp = new ICMP();
+ icmp.setType((byte)8).setCode((byte)0).setIdentifier((short)0x3d1e).
+ setSequenceNumber((short)1);
+ firstFragmentTest(icmp, payload1, proto, (short)0xdff7);
+ firstFragmentTest(icmp, payload2, proto, (short)0xdfef);
+ firstFragmentTest(icmp, payload3, proto, (short)0xdfd7);
+ }
+
+ private void fragmentTest(byte[] payload, byte proto, short checksum)
+ throws Exception {
+ // Construct a fragmented raw IPv4 packet.
+ int ipv4Len = 20;
+ byte[] rawIp = new byte[ipv4Len + payload.length];
+
+ byte ipVersion = 4;
+ byte dscp = 35;
+ byte ecn = 2;
+ byte tos = (byte)((dscp << 2) | ecn);
+ short totalLen = (short)rawIp.length;
+ short id = 22143;
+ short offset = 0xb9;
+ byte ttl = 64;
+ byte[] srcIp = {(byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x01};
+ byte[] dstIp = {(byte)0xc0, (byte)0xa9, (byte)0x66, (byte)0x23};
+
+ rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+ rawIp[1] = tos;
+ rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+ rawIp[3] = (byte)totalLen;
+ rawIp[4] = (byte)(id >>> Byte.SIZE);
+ rawIp[5] = (byte)id;
+ rawIp[6] = (byte)(offset >>> Byte.SIZE);
+ rawIp[7] = (byte)offset;
+ rawIp[8] = ttl;
+ rawIp[9] = proto;
+ rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+ rawIp[11] = (byte)checksum;
+ System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+ System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+ System.arraycopy(payload, 0, rawIp, ipv4Len, payload.length);
+
+ // Deserialize.
+ IPv4 ipv4 = new IPv4();
+ ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+ Assert.assertEquals(ipVersion, ipv4.getVersion());
+ Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+ Assert.assertEquals(dscp, ipv4.getDiffServ());
+ Assert.assertEquals(ecn, ipv4.getECN());
+ Assert.assertEquals(totalLen, ipv4.getTotalLength());
+ Assert.assertEquals(id, ipv4.getIdentification());
+ Assert.assertEquals((byte)0, ipv4.getFlags());
+ Assert.assertEquals(offset, ipv4.getFragmentOffset());
+ Assert.assertEquals(ttl, ipv4.getTtl());
+ Assert.assertEquals(proto, ipv4.getProtocol());
+ Assert.assertEquals(checksum, ipv4.getChecksum());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+ ipv4.getSourceAddress());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+ ipv4.getDestinationAddress());
+ Assert.assertFalse(ipv4.isCorrupted());
+
+ // payloadClass should not be set if fragment offset is not zero.
+ Assert.assertEquals(null, ipv4.getPayload());
+ Assert.assertArrayEquals(payload, ipv4.getRawPayload());
+ }
+
+ private void firstFragmentTest(Packet payload, byte[] rawPayload,
+ byte proto, short checksum)
+ throws Exception {
+ // Construct a raw IPv4 packet with MF flag.
+ int ipv4Len = 20;
+ payload.setRawPayload(rawPayload);
+ byte[] payloadBytes = payload.serialize();
+ byte[] rawIp = new byte[ipv4Len + payloadBytes.length];
+
+ byte ipVersion = 4;
+ byte dscp = 13;
+ byte ecn = 1;
+ byte tos = (byte)((dscp << 2) | ecn);
+ short totalLen = (short)rawIp.length;
+ short id = 19834;
+ byte flags = 0x1;
+ short offset = 0;
+ short off = (short)(((short)flags << 13) | offset);
+ byte ttl = 64;
+ byte[] srcIp = {(byte)0xac, (byte)0x23, (byte)0x5b, (byte)0xfd};
+ byte[] dstIp = {(byte)0xc0, (byte)0xa8, (byte)0x64, (byte)0x71};
+
+ rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+ rawIp[1] = tos;
+ rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+ rawIp[3] = (byte)totalLen;
+ rawIp[4] = (byte)(id >>> Byte.SIZE);
+ rawIp[5] = (byte)id;
+ rawIp[6] = (byte)(off >>> Byte.SIZE);
+ rawIp[7] = (byte)off;
+ rawIp[8] = ttl;
+ rawIp[9] = proto;
+ rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+ rawIp[11] = (byte)checksum;
+ System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+ System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+ System.arraycopy(payloadBytes, 0, rawIp, ipv4Len, payloadBytes.length);
+
+ // Deserialize.
+ IPv4 ipv4 = new IPv4();
+ ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+ Assert.assertEquals(ipVersion, ipv4.getVersion());
+ Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+ Assert.assertEquals(dscp, ipv4.getDiffServ());
+ Assert.assertEquals(ecn, ipv4.getECN());
+ Assert.assertEquals(totalLen, ipv4.getTotalLength());
+ Assert.assertEquals(id, ipv4.getIdentification());
+ Assert.assertEquals(flags, ipv4.getFlags());
+ Assert.assertEquals(offset, ipv4.getFragmentOffset());
+ Assert.assertEquals(ttl, ipv4.getTtl());
+ Assert.assertEquals(proto, ipv4.getProtocol());
+ Assert.assertEquals(checksum, ipv4.getChecksum());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+ ipv4.getSourceAddress());
+ Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+ ipv4.getDestinationAddress());
+ Assert.assertFalse(ipv4.isCorrupted());
+
+ // Protocol header in the first fragment should be deserialized.
+ Assert.assertEquals(null, ipv4.getRawPayload());
+
+ Packet desPayload = ipv4.getPayload();
+ Assert.assertEquals(payload, desPayload);
+ Assert.assertFalse(desPayload.isCorrupted());
+ Assert.assertArrayEquals(rawPayload, desPayload.getRawPayload());
+ }
}
package org.opendaylight.controller.topologymanager.internal;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.util.ArrayList;
-import java.util.Dictionary;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.LinkedBlockingQueue;
-
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.felix.dm.Component;
import org.eclipse.osgi.framework.console.CommandInterpreter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.LinkedBlockingQueue;
+
/**
* The class describes TopologyManager which is the central repository of the
* network topology. It provides service for applications to interact with
// all except the creation time stamp because that should
// be set only when the edge is created
TimeStamp timeStamp = null;
- for (Property prop : oldProps) {
- if (prop instanceof TimeStamp) {
- TimeStamp tsProp = (TimeStamp) prop;
- if (tsProp.getTimeStampName().equals("creation")) {
- timeStamp = tsProp;
- break;
+ if (oldProps != null) {
+ for (Property prop : oldProps) {
+ if (prop instanceof TimeStamp) {
+ TimeStamp tsProp = (TimeStamp) prop;
+ if (tsProp.getTimeStampName().equals("creation")) {
+ timeStamp = tsProp;
+ break;
+ }
}
}
}
if (prop instanceof TimeStamp) {
TimeStamp t = (TimeStamp) prop;
if (t.getTimeStampName().equals("creation")) {
- i.remove();
+ if (timeStamp != null) {
+ i.remove();
+ }
break;
}
}
package org.opendaylight.controller.topologymanager.internal;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.ConstructionException;
+import org.opendaylight.controller.sal.core.Description;
import org.opendaylight.controller.sal.core.Edge;
import org.opendaylight.controller.sal.core.Host;
import org.opendaylight.controller.sal.core.Latency;
import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
import org.opendaylight.controller.sal.core.Property;
import org.opendaylight.controller.sal.core.State;
+import org.opendaylight.controller.sal.core.TimeStamp;
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.packet.address.EthernetAddress;
import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.switchmanager.SwitchConfig;
import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
public class TopologyManagerImplTest {
/**
* Mockup of switch manager that only maintains existence of node
Assert.assertTrue(nodeNCmap.isEmpty());
}
+
+ @Test
+ public void bug1348FixTest() throws ConstructionException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ TestSwitchManager swMgr = new TestSwitchManager();
+ topoManagerImpl.setSwitchManager(swMgr);
+ topoManagerImpl.nonClusterObjectCreate();
+
+ NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 1, NodeCreator.createOFNode(1000L));
+ NodeConnector tailnc1 = NodeConnectorCreator.createOFNodeConnector(
+ (short) 2, NodeCreator.createOFNode(2000L));
+ Edge edge = new Edge(headnc1, tailnc1);
+ List<TopoEdgeUpdate> updatedEdges = new ArrayList<>();
+ Set<Property> edgeProps = new HashSet<>();
+ edgeProps.add(new TimeStamp(System.currentTimeMillis(), "creation"));
+ edgeProps.add(new Latency(Latency.LATENCY100ns));
+ edgeProps.add(new State(State.EDGE_UP));
+ edgeProps.add(new Bandwidth(Bandwidth.BW100Gbps));
+ edgeProps.add(new Description("Test edge"));
+ updatedEdges.add(new TopoEdgeUpdate(edge, edgeProps, UpdateType.CHANGED));
+
+ try {
+ topoManagerImpl.edgeUpdate(updatedEdges);
+ } catch (Exception e) {
+ Assert.fail("Exception was raised when trying to update edge properties: " + e.getMessage());
+ }
+
+ Assert.assertEquals(1, topoManagerImpl.getEdges().size());
+ Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
+ }
}