<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
</feature>
+
+ <feature name='odl-clustering-test-app' version='${project.version}'>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+ </feature>
+
</features>
Optional TODO: Remove TODO comments.
-->
<!-- test to validate features.xml -->
+ <!--FIXME BUG-2195 When running single feature tests for netconf connector, features including ssh proxy server always fail (this behavior does not appear when running karaf distro directly)-->
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-test</artifactId>
<repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-netconf/${netconf.version}/xml/features</repository>
+ <!-- FIXME: This introduces cycle between projects, which makes version updates
+ harder. Should be moved to different.
+ -->
<repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository>
<feature name='odl-netconf-connector-all' version='${project.version}' description='OpenDaylight :: Netconf Connector :: All'>
<!--
</feature>
<feature name='odl-netconf-ssh' version='${netconf.version}' description="OpenDaylight :: Netconf Connector :: SSH">
<feature version='${netconf.version}'>odl-netconf-tcp</feature>
- <feature version='${aaa.version}'>odl-aaa-authn-plugin</feature>
+ <!-- FIXME: This introduces cycle between projects, which makes version updates
+ harder. Should be moved to different.
+ -->
+ <feature version='${aaa.version}'>odl-aaa-netconf-plugin</feature>
<bundle>mvn:org.opendaylight.controller/netconf-ssh/${netconf.version}</bundle>
- <bundle>mvn:org.bouncycastle/bcpkix-jdk15on/${bouncycastle.version}</bundle>
- <bundle>mvn:org.bouncycastle/bcprov-jdk15on/${bouncycastle.version}</bundle>
</feature>
<feature name='odl-netconf-tcp' version='${netconf.version}' description="OpenDaylight :: Netconf Connector :: TCP">
<feature version='${netconf.version}'>odl-netconf-impl</feature>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-netty-util</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-netty-util/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.thirdparty/ganymed/${ganymed.version}</bundle>
+ <bundle>mvn:org.bouncycastle/bcpkix-jdk15on/${bouncycastle.version}</bundle>
+ <bundle>mvn:org.bouncycastle/bcprov-jdk15on/${bouncycastle.version}</bundle>
<bundle>mvn:org.apache.sshd/sshd-core/${sshd-core.version}</bundle>
<bundle>mvn:org.openexi/nagasena/${exi.nagasena.version}</bundle>
<bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
<maven>3.0</maven>
</prerequisites>
<modules>
- <module>base</module>
- <module>controller</module>
- <module>adsal</module>
- <module>nsf</module>
- <module>extras</module>
<module>config</module>
<module>config-persister</module>
<module>config-netty</module>
<module>flow</module>
<module>netconf</module>
<module>protocol-framework</module>
- <module>adsal-compatibility</module>
<module>akka</module>
<module>netconf-connector</module>
<module>restconf</module>
-->
<feature version='${project.version}'>odl-restconf</feature>
<feature version='${project.version}'>odl-mdsal-apidocs</feature>
- <feature version='${project.version}'>odl-clustering-test-app</feature>
</feature>
<!--
Necessary TODO: Define your features. It is useful to list then in order of dependency. So if A depends on B, list A first.
<bundle>wrap:mvn:org.json/json/${org.json.version}</bundle>
</feature>
- <feature name='odl-clustering-test-app' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-clustering</feature>
- <feature version='${project.version}'>odl-restconf</feature>
- <feature version='${yangtools.version}'>odl-yangtools-models</feature>
- <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
- <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
- <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
- <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
- </feature>
-
<feature name='odl-toaster-rest' version='${project.version}'>
<feature version='${project.version}'>odl-restconf</feature>
<feature version='${project.version}'>odl-toaster</feature>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../commons/opendaylight</relativePath>
+ <relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>appauth</artifactId>
<version>0.5.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../commons/opendaylight</relativePath>
+ <relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>arphandler</artifactId>
<version>0.6.0-SNAPSHOT</version>
<artifactId>clustering.services.integrationtest</artifactId>
<version>0.5.0-SNAPSHOT</version>
<properties>
- <sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.reportPath>../../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
</properties>
<dependencies>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
- <destFile>../implementation/target/jacoco-it.exec</destFile>
+ <destFile>../../implementation/target/jacoco-it.exec</destFile>
<includes>
<include>org.opendaylight.controller.*</include>
</includes>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.services</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.services-implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.stub</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.test</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.httpclient</artifactId>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
<configuration>
<failsOnError>true</failsOnError>
<configLocation>controller/checkstyle.xml</configLocation>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.integrationtest</artifactId>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
<configuration>
<failsOnError>true</failsOnError>
<configLocation>controller/checkstyle.xml</configLocation>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
- <version>${surefire.version}</version>
<configuration>
<skipTests>true</skipTests>
</configuration>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>configuration</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>configuration.implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager.implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.it.implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.shell</artifactId>
<version>${containermanager.shell.version}</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../commons/opendaylight</relativePath>
+ <relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>dummy-console</artifactId>
<version>1.2.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>commons.opendaylight</artifactId>\r
<version>1.5.0-SNAPSHOT</version>\r
- <relativePath>../../opendaylight/commons/opendaylight</relativePath>\r
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>\r
</parent>\r
<artifactId>features-adsal-compatibility</artifactId>\r
<packaging>jar</packaging>\r
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../opendaylight/commons/opendaylight</relativePath>
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-adsal</artifactId>
<version>${sal.version}</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../opendaylight/commons/opendaylight</relativePath>
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-base</artifactId>
<packaging>jar</packaging>
<groupId>org.opendaylight.controller</groupId>
<artifactId>karaf-tomcat-security</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-test</artifactId>
- <version>0.7.0-SNAPSHOT</version>
</dependency>
<!-- dependency for opendaylight-karaf-empty for use by testing -->
<dependency>
<bundle>wrap:mvn:io.netty/netty-common/${netty.version}</bundle>
<bundle>wrap:mvn:io.netty/netty-handler/${netty.version}</bundle>
<bundle>wrap:mvn:io.netty/netty-codec-http/${netty.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.thirdparty/ganymed/1.2.0-SNAPSHOT</bundle>
</feature>
<feature name="odl-base-jersey" description="Jersey" version="${jersey.version}">
<feature>odl-base-gemini-web</feature>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../opendaylight/commons/opendaylight</relativePath>
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>controller-features</artifactId>
<packaging>pom</packaging>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../opendaylight/commons/opendaylight</relativePath>
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>extras-features</artifactId>
<packaging>kar</packaging>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../opendaylight/commons/opendaylight</relativePath>
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-nsf</artifactId>
<version>${nsf.version}</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwarding.staticrouting</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwardingrulesmanager</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwardingrulesmanager.implementation</artifactId>
<artifactId>forwardingrulesmanager.integrationtest</artifactId>
<version>0.5.0-SNAPSHOT</version>
<properties>
- <sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.reportPath>../../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
</properties>
<dependencies>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
- <destFile>../implementation/target/jacoco-it.exec</destFile>
+ <destFile>../../implementation/target/jacoco-it.exec</destFile>
<includes>
<include>org.opendaylight.controller.*</include>
</includes>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker</artifactId>
<version>0.6.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.implementation</artifactId>
<version>0.6.0-SNAPSHOT</version>
<artifactId>hosttracker.integrationtest</artifactId>
<version>0.6.0-SNAPSHOT</version>
<properties>
- <sonar.jacoco.itReportPath>../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.jacoco.itReportPath>../../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.reportPath>../../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
</properties>
<dependencies>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
- <destFile>../implementation/target/jacoco-it.exec</destFile>
+ <destFile>../../implementation/target/jacoco-it.exec</destFile>
<includes>
<include>org.opendaylight.controller.*</include>
</includes>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.shell</artifactId>
<version>${hosttracker.shell.version}</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker_new</artifactId>
<version>0.5.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker_new.implementation</artifactId>
<version>0.5.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../commons/opendaylight</relativePath>
+ <relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>karaf-tomcat-security</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>logging.bridge</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>${artifactId}</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>bundlescanner</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>bundlescanner.implementation</artifactId>
if (classes == null || classes.size() == 0) return;
Map<String,String> names = new HashMap<String,String>();
StringBuilder conflictsMsg = new StringBuilder();
- for (Class c : classes) {
- XmlRootElement root = (XmlRootElement) c.getAnnotation(XmlRootElement.class);
+ for (Class<?> c : classes) {
+ XmlRootElement root = c.getAnnotation(XmlRootElement.class);
if (root == null) continue;
String rootName = root.name();
if ("##default".equals(rootName)) {
*/
package org.opendaylight.controller.northbound.bundlescanner.internal;
-
-
import java.io.File;
import java.io.FileFilter;
import java.net.MalformedURLException;
import org.springframework.osgi.mock.MockBundle;
import org.springframework.osgi.mock.MockBundleContext;
-import static junit.framework.Assert.assertNotNull;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class BundleScannerTest {
}
@Override
- public Enumeration findEntries(String path, String filePattern, boolean recurse) {
+ public Enumeration<URL> findEntries(String path, String filePattern, boolean recurse) {
return Collections.enumeration(classes);
}
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>controllermanager.northbound</artifactId>
<version>0.1.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>flowprogrammer.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.northbound</artifactId>
<version>0.5.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>httpservice-bridge</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>northbound.client</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>jolokia-bridge</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>networkconfig.bridgedomain.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwarding.staticrouting.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>statistics.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>subnets.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>swagger-ui</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>usermanager.northbound</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>networkconfig.neutron.northbound</artifactId>
+ </dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</fileset>
<mapper>
<regexpmapper from="^(.*)/([^/]+)/*/target/enunciate/generate/swagger/ui/(.*Northbound).*$$" to="\3"></regexpmapper>
+ <regexpmapper from="^(.*)/neutron/([^/]+)/*/target/enunciate/generate/swagger/ui/(.*resource-list.json)$$" to="neutron-\3"></regexpmapper>
<regexpmapper from="^(.*)/([^/]+)/*/target/enunciate/generate/swagger/ui/(.*resource-list.json)$$" to="\2-\3"></regexpmapper>
</mapper>
</copy>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager.northbound</artifactId>
<version>0.5.0-SNAPSHOT</version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topology.northbound</artifactId>
*/
package org.opendaylight.controller.topology.northbound;
-
-
import org.junit.Test;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.ConstructionException;
import java.util.List;
import java.util.Set;
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TopologyTest {
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../opendaylight/commons/opendaylight</relativePath>
+ <relativePath>../../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.parent</artifactId>
+ <version>1.1.0-SNAPSHOT</version>
+ <relativePath>../commons/parent</relativePath>
+ </parent>
+
+ <artifactId>adsal-parent</artifactId>
+ <packaging>pom</packaging>
+ <modules>
+ <module>forwarding/staticrouting</module>
+ <module>clustering/services</module>
+ <module>clustering/services_implementation</module>
+ <module>clustering/stub</module>
+ <module>clustering/test</module>
+ <module>configuration/api</module>
+ <module>configuration/implementation</module>
+ <module>routing/dijkstra_implementation</module>
+ <module>arphandler</module>
+ <module>forwardingrulesmanager/api</module>
+ <module>forwardingrulesmanager/implementation</module>
+ <module>hosttracker/api</module>
+ <module>hosttracker/implementation</module>
+ <module>hosttracker/shell</module>
+ <module>hosttracker_new/api</module>
+ <module>hosttracker_new/implementation</module>
+ <module>containermanager/api</module>
+ <module>containermanager/implementation</module>
+ <module>containermanager/shell</module>
+ <module>appauth</module>
+ <module>switchmanager/api</module>
+ <module>switchmanager/implementation</module>
+ <module>statisticsmanager/api</module>
+ <module>statisticsmanager/implementation</module>
+ <module>topologymanager/implementation</module>
+ <module>topologymanager/shell</module>
+ <module>usermanager/api</module>
+ <module>usermanager/implementation</module>
+ <module>connectionmanager/api</module>
+ <module>connectionmanager/implementation</module>
+ <module>security</module>
+ <module>karaf-tomcat-security</module>
+
+ <!-- SAL bundles -->
+ <module>sal/api</module>
+ <module>sal/implementation</module>
+
+ <!-- SAL Extension bundles -->
+ <module>sal/connection/api</module>
+ <module>sal/connection/implementation</module>
+ <module>sal/networkconfiguration/api</module>
+ <module>sal/networkconfiguration/implementation</module>
+
+ <!-- Web bundles -->
+ <module>web/root</module>
+ <module>web/flows</module>
+ <module>web/devices</module>
+ <module>web/troubleshoot</module>
+ <module>web/topology</module>
+ <module>web/osgi-brandfragment</module>
+
+ <!-- Northbound bundles -->
+ <module>northbound/commons</module>
+ <module>northbound/bundlescanner/api</module>
+ <module>northbound/bundlescanner/implementation</module>
+ <module>northbound/topology</module>
+ <module>northbound/staticrouting</module>
+ <module>northbound/statistics</module>
+ <module>northbound/flowprogrammer</module>
+ <module>northbound/hosttracker</module>
+ <module>northbound/subnets</module>
+ <module>northbound/switchmanager</module>
+ <module>northbound/containermanager</module>
+ <module>northbound/networkconfiguration/bridgedomain</module>
+ <module>northbound/httpservice-bridge</module>
+ <module>northbound/jolokia</module>
+ <module>northbound/connectionmanager</module>
+ <module>northbound/usermanager</module>
+ <module>northbound/controllermanager</module>
+
+ <!-- Debug and logging -->
+ <module>logging/bridge</module>
+
+ <!-- Southbound bundles -->
+ <module>protocol_plugins/openflow</module>
+ <module>protocol_plugins/stub</module>
+
+ <!-- Samples -->
+ <module>samples/simpleforwarding</module>
+ <module>samples/loadbalancer</module>
+ <module>samples/northbound/loadbalancer</module>
+
+ <module>dummy-console</module>
+
+ <!-- features -->
+ <module>features/base</module>
+ <module>features/controller</module>
+ <module>features/adsal</module>
+ <module>features/nsf</module>
+ <module>features/extras</module>
+ <module>features/adsal-compatibility</module>
+ </modules>
+ <profiles>
+ <profile>
+ <id>integrationtests</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ </activation>
+ <modules>
+ <module>clustering/integrationtest</module>
+ <module>configuration/integrationtest</module>
+ <module>forwardingrulesmanager/integrationtest</module>
+ <module>hosttracker/integrationtest</module>
+ <module>switchmanager/integrationtest</module>
+ <module>topologymanager/integrationtest</module>
+ <!-- Northbound integration tests -->
+ <module>northbound/integrationtest</module>
+ <module>statisticsmanager/integrationtest</module>
+ <module>containermanager/it.implementation</module>
+ <module>commons/integrationtest</module>
+
+ <module>commons/httpclient</module>
+ </modules>
+ </profile>
+ <profile>
+ <id>docs</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ </activation>
+ <modules>
+ <module>northbound/java-client</module>
+ <module>northbound/swagger-ui</module>
+ </modules>
+ </profile>
+ </profiles>
+</project>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol_plugins.openflow</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol_plugins.stub</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>routing.dijkstra_implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.connection</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.connection.implementation</artifactId>
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-
import org.opendaylight.controller.sal.connection.ConnectionConstants;
import org.opendaylight.controller.sal.connection.ConnectionLocality;
import org.opendaylight.controller.sal.connection.IConnectionListener;
private ConcurrentMap<String, IPluginInConnectionService> pluginService =
new ConcurrentHashMap<String, IPluginInConnectionService>();
- void setPluginService (Map props, IPluginInConnectionService s) {
+ void setPluginService (Map<?, ?> props, IPluginInConnectionService s) {
String type = null;
Object value = props.get(GlobalConstants.PROTOCOLPLUGINTYPE.toString());
if (value instanceof String) {
}
}
- void unsetPluginService(Map props, IPluginInConnectionService s) {
+ void unsetPluginService(Map<?, ?> props, IPluginInConnectionService s) {
String type = null;
Object value = props.get(GlobalConstants.PROTOCOLPLUGINTYPE.toString());
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.implementation</artifactId>
}
}
- void setPluginInDataService(Map props, IPluginInDataPacketService s) {
+ void setPluginInDataService(Map<?, ?> props, IPluginInDataPacketService s) {
ProtocolService.set(this.pluginInDataService, props, s, logger);
}
- void unsetPluginInDataService(Map props, IPluginInDataPacketService s) {
+ void unsetPluginInDataService(Map<?, ?> props, IPluginInDataPacketService s) {
ProtocolService.unset(this.pluginInDataService, props, s, logger);
}
- void setListenDataPacket(Map props, IListenDataPacket s) {
+ void setListenDataPacket(Map<?, ?> props, IListenDataPacket s) {
if (this.listenDataPacket == null || this.indexDataPacket == null) {
logger.error("data structure to store data is NULL");
return;
}
logger.trace("Received setListenDataPacket request");
- for (Object e : props.entrySet()) {
- Map.Entry entry = (Map.Entry) e;
- logger.trace("Prop key:({}) value:({})",entry.getKey(), entry.getValue());
+ for (Map.Entry<?, ?> e : props.entrySet()) {
+ logger.trace("Prop key:({}) value:({})",e.getKey(), e.getValue());
}
String listenerName = null;
}
}
- void unsetListenDataPacket(Map props, IListenDataPacket s) {
+ void unsetListenDataPacket(Map<?, ?> props, IListenDataPacket s) {
if (this.listenDataPacket == null || this.indexDataPacket == null) {
logger.error("data structure to store data is NULL");
return;
}
logger.trace("Received UNsetListenDataPacket request");
- for (Object e : props.entrySet()) {
- Map.Entry entry = (Map.Entry) e;
- logger.trace("Prop key:({}) value:({})",entry.getKey(), entry.getValue());
+ for (Map.Entry<?, ?> e : props.entrySet()) {
+ logger.trace("Prop key:({}) value:({})",e.getKey(), e.getValue());
}
String listenerName = null;
return false;
}
- ProtocolService plugin = (ProtocolService)o;
+ ProtocolService<?> plugin = (ProtocolService<?>)o;
return (service.equals(plugin.service) && priority == plugin.priority);
}
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.networkconfiguration</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.networkconfiguration.implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustersession</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.loadbalancer</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../../commons/opendaylight</relativePath>
+ <relativePath>../../../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.loadbalancer.northbound</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.simpleforwarding</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../commons/opendaylight</relativePath>
+ <relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>security</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>statisticsmanager</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>statisticsmanager.implementation</artifactId>
<artifactId>statisticsmanager.integrationtest</artifactId>
<version>0.5.0-SNAPSHOT</version>
<properties>
- <sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.reportPath>../../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
</properties>
<dependencies>
<dependency>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
- <destFile>../implementation/target/jacoco-it.exec</destFile>
+ <destFile>../../implementation/target/jacoco-it.exec</destFile>
<includes>
<include>org.opendaylight.controller.*</include>
</includes>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager.implementation</artifactId>
<artifactId>switchmanager.integrationtest</artifactId>
<version>0.5.0-SNAPSHOT</version>
<properties>
- <sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.jacoco.itReportPath>../../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.reportPath>../../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
</properties>
<dependencies>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
- <destFile>../implementation/target/jacoco-it.exec</destFile>
+ <destFile>../../implementation/target/jacoco-it.exec</destFile>
<includes>
<include>org.opendaylight.controller.*</include>
</includes>
assertFalse(debugit);
// Now lets create a hosttracker for testing purpose
- ServiceReference s = bc.getServiceReference(ISwitchManager.class
+ ServiceReference<?> s = bc.getServiceReference(ISwitchManager.class
.getName());
if (s != null) {
this.switchManager = (ISwitchManager) bc.getService(s);
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topologymanager</artifactId>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
<extensions>true</extensions>
<configuration>
<instructions>
<artifactId>topologymanager.integrationtest</artifactId>
<version>0.5.0-SNAPSHOT</version>
<properties>
- <sonar.jacoco.itReportPath>../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <sonar.jacoco.itReportPath>../../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.reportPath>../../implementation/target/jacoco.exec</sonar.jacoco.reportPath>
</properties>
<dependencies>
<dependency>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
- <destFile>../implementation/target/jacoco-it.exec</destFile>
+ <destFile>../../implementation/target/jacoco-it.exec</destFile>
<includes>
<include>org.opendaylight.controller.*</include>
</includes>
Set<Property> properties = new HashSet<Property>();
- ServiceReference r = bc.getServiceReference(IPluginInTopologyService.class
+ ServiceReference<?> r = bc.getServiceReference(IPluginInTopologyService.class
.getName());
TopologyServices topologyServices = null;
if (r != null) {
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topologymanager.shell</artifactId>
<version>${topologymanager.shell.version}</version>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>topologymanager</artifactId>
- <version>${topologymanager.version}</version>
</dependency>
</dependencies>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
<configuration>
<instructions>
<Import-Package>org.apache.felix.service.command,
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager.implementation</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>devices.web</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>flows.web</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>osgi-brandfragment.web</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>web</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>topology.web</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
<version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
+ <relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>troubleshoot.web</artifactId>
<feature name='odl-${repoName}-all' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: All'>
<!--
Necessary TODO:
- List all of the user consumable features you define in this feature file here.
+ List all of the features you define in this feature file here. This is meant to be used as index
+ of all available features, not necessarily a feature that a user would ever install directly.
+
Generally you would *not* list individual bundles here, but only features defined in *this* file.
It is useful to list them in the same order they occur in the file.
https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines
Particularly:
a) Prefixing names with 'odl-': https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Feature_Naming
- b) Descriptions: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Description
- c) Avoid start-levels: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Avoid_start-levels
+ b) For user-facing features, it's often useful to have two additional features with the suffix -rest and -ui
+ * The "base" feature, e.g., odl-${repoName}-<feautre> provides just the base functionality
+ * The "rest" feature, e.g., odl-${repoName}-<feautre>-rest includes the odl-${repoName}-<feautre> feature as well
+ as odl-restconf and any other features/bundles needed to make REST/RESTCONF work for the base feature
+ * The "ui" feature, e.g., odl-${repoName}-<feautre>-ui includes the odl-${repoName}-<feautre>-rest feature as well
+ as the odl-dlux-core feature and any other features/bundles needed to make DLUX work for the base feature
+ * Note: Not all features should be user-facing. Only features which end-users of OpenDaylight, e.g., network
+ opeartors would want to be able to easily install/enable/disable should be considered user-facing.
+ The goal of user-facing features is to hit the 90/10 point where ~10% of the configuration options cover
+ ~90% of use cases. Developers and advanced users can dig in and customize the installed features and
+ bundles to their heart's content.
+ c) Descriptions: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Description
+ d) Avoid start-levels: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Avoid_start-levels
It's also nice to list inside a feature, first the features it needs, then the bundles it needs, then the configfiles.
Examples:
package org.opendaylight.controller.filtervalve.cors.jaxb;
import static org.hamcrest.core.Is.is;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import com.google.common.base.Optional;
import java.io.File;
final String jspFilter = "jspFilter";
final String exactMatch = "/somePath";
final String prefixFilter = "prefixFilter";
- LinkedHashMap<String, String> patternMap = new LinkedHashMap<String, String>() {
- {
- put(exactMatch, exactMatchFilter);
- put("/*", defaultFilter);
- put("*.jsp", jspFilter);
- put("/foo/*", prefixFilter);
- }
- };
+ LinkedHashMap<String, String> patternMap = new LinkedHashMap<>();
+ patternMap.put(exactMatch, exactMatchFilter);
+ patternMap.put("/*", defaultFilter);
+ patternMap.put("*.jsp", jspFilter);
+ patternMap.put("/foo/*", prefixFilter);
urlMatcher = new UrlMatcher<>(patternMap);
assertMatches("/abc", defaultFilter);
assertMatches(exactMatch, exactMatchFilter, defaultFilter);
package org.opendaylight.controller.sal.packet;
-import junit.framework.Assert;
-
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.liblldp.BitBufferHelper;
<forwarding.staticrouting.northbound.version>0.5.0-SNAPSHOT</forwarding.staticrouting.northbound.version>
<forwardingrulesmanager.implementation.version>0.5.0-SNAPSHOT</forwardingrulesmanager.implementation.version>
<forwardingrulesmanager.version>0.7.0-SNAPSHOT</forwardingrulesmanager.version>
- <ganymed.version>1.2.0-SNAPSHOT</ganymed.version>
<hosttracker.api.version>0.6.0-SNAPSHOT</hosttracker.api.version>
<hosttracker.implementation.version>0.6.0-SNAPSHOT</hosttracker.implementation.version>
<hosttracker.northbound.version>0.5.0-SNAPSHOT</hosttracker.northbound.version>
<sonar.language>java</sonar.language>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
- <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+ <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
<sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<dependencyManagement>
<dependencies>
+
<!-- project specific dependencies -->
<dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>ietf-netconf-monitoring</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>ietf-netconf-monitoring-extension</artifactId>
- <version>${netconf.version}</version>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-artifacts</artifactId>
+ <version>${config.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
</dependency>
<dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>netconf-netty-util</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-artifacts</artifactId>
<version>${netconf.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
</dependency>
+
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
</dependency>
<!-- config-->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-api</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-manager</artifactId>
- <version>${config.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-manager</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netconf-connector</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-api</artifactId>
- <version>${config.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-api</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-directory-xml-adapter</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-file-xml-adapter</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-feature-adapter</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-impl</artifactId>
- <version>${netconf.version}</version>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-util</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-util</artifactId>
- <version>${config.version}</version>
- <type>test-jar</type>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>configuration</artifactId>
<artifactId>liblldp</artifactId>
<version>${sal.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>logback-config</artifactId>
- <version>${config.version}</version>
- </dependency>
<!-- Debug and logging -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<version>${dummy-console.version}</version>
</dependency>
- <!-- Netconf -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-api</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-client</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-client</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
-
- <!--Netconf config-->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-config-dispatcher</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-impl</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-impl</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-mapping-api</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-monitoring</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-netty-util</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-auth</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-usermanager</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-ssh</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-ssh</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-tcp</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-util</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-util</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-config-api</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-event-executor-config</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-threadgroup-config</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-timer-config</artifactId>
- <version>${config.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>networkconfig.bridgedomain.northbound</artifactId>
<artifactId>sal-rest-connector-config</artifactId>
<version>${mdsal.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netty-config</artifactId>
- <version>${config.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>md-sal-config</artifactId>
<version>${mdsal.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-config</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-connector-config</artifactId>
- <version>${netconf.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<version>${karaf.security.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>shutdown-api</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>shutdown-impl</artifactId>
- <version>${config.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>statistics.northbound</artifactId>
<version>${switchmanager.northbound.version}</version>
</dependency>
- <!-- threadpool -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-impl</artifactId>
- <version>${config.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>topology.northbound</artifactId>
<artifactId>web</artifactId>
<version>${web.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>yang-jmx-generator</artifactId>
- <version>${config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>yang-jmx-generator</artifactId>
- <version>${config.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>yang-test</artifactId>
- <version>${config.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>forwardingrules-manager</artifactId>
<type>xml</type>
<scope>runtime</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-config-netty</artifactId>
- <version>${config.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-flow</artifactId>
<artifactId>com.sun.jersey.jersey-servlet</artifactId>
<version>${jersey-servlet.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- <version>${ganymed.version}</version>
- </dependency>
+
<!-- Third parties from opendaylight released -->
<dependency>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>org.openflow.openflowj</artifactId>
<version>1.0.2</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-data-codec</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-spi</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-type-provider</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>concepts</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>object-cache-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>object-cache-guava</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>restconf-client-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>restconf-client-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-composite-node</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-codec-gson</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <!-- yangtools dependencies -->
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
+ <!-- yangtools artifacts -->
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
+ <artifactId>yangtools-artifacts</artifactId>
<version>${yangtools.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-maven-plugin-spi</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-parser-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-parser-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <!-- yang model dependencies -->
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-inet-types</artifactId>
- <version>${ietf-inet-types.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-restconf</artifactId>
- <version>${ietf-restconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-topology</artifactId>
- <version>${ietf-topology.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-topology-l3-unicast-igp</artifactId>
- <version>${ietf-topology.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types</artifactId>
- <version>${ietf-yang-types.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types-20130715</artifactId>
- <version>2013.07.15.7-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>opendaylight-l2-types</artifactId>
- <version>${opendaylight-l2-types.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>yang-ext</artifactId>
- <version>${yang-ext.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>antlr4-runtime-osgi-nohead</artifactId>
- <version>4.0</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>xtend-lib-osgi</artifactId>
- <version>${xtend.version}</version>
- </dependency>
+
<dependency>
<groupId>org.openexi</groupId>
<artifactId>nagasena</artifactId>
<version>${mdsal.version}</version>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>mockito-configuration</artifactId>
- <version>${yangtools.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-config</artifactId>
- <version>${config.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-protocol-framework</artifactId>
<type>xml</type>
<scope>runtime</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-netconf</artifactId>
- <version>${netconf.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-config-persister</artifactId>
- <version>${config.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netty-features</artifactId>
- <version>${config.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-base</artifactId>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
<configuration>
<failsOnError>true</failsOnError>
<configLocation>controller/checkstyle.xml</configLocation>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
- <version>${surefire.version}</version>
<configuration>
<argLine>${testvm.argLine} ${jacoco.agent.ut.arg}</argLine>
<systemProperties>
assertFalse(session.isSuccess());
}
- @Test
- public void testNegotiationFailedNoReconnect() throws Exception {
- final Promise<Boolean> p = new DefaultPromise<>(GlobalEventExecutor.INSTANCE);
-
- this.dispatcher = getServerDispatcher(p);
-
- this.server = this.dispatcher.createServer(this.serverAddress, new SessionListenerFactory<SimpleSessionListener>() {
- @Override
- public SimpleSessionListener getSessionListener() {
- return new SimpleSessionListener();
- }
- });
-
- this.server.get();
-
- this.clientDispatcher = new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
- @Override
- public SessionNegotiator<SimpleSession> getSessionNegotiator(final SessionListenerFactory<SimpleSessionListener> factory,
- final Channel channel, final Promise<SimpleSession> promise) {
-
- return new SimpleSessionNegotiator(promise, channel) {
- @Override
- protected void startNegotiation() throws Exception {
- negotiationFailed(new IllegalStateException("Negotiation failed"));
- }
- };
- }
- }, new DefaultPromise<SimpleSession>(GlobalEventExecutor.INSTANCE), eventLoopGroup);
-
- final ReconnectStrategyFactory reconnectStrategyFactory = mock(ReconnectStrategyFactory.class);
- final ReconnectStrategy reconnectStrategy = getMockedReconnectStrategy();
- doReturn(reconnectStrategy).when(reconnectStrategyFactory).createReconnectStrategy();
-
- this.clientDispatcher.createReconnectingClient(this.serverAddress,
- reconnectStrategyFactory, new SessionListenerFactory<SimpleSessionListener>() {
- @Override
- public SimpleSessionListener getSessionListener() {
- return new SimpleSessionListener();
- }
- });
-
-
- // Only one strategy should be created for initial connect, no more = no reconnects
- verify(reconnectStrategyFactory, times(1)).createReconnectStrategy();
- }
-
private SimpleDispatcher getClientDispatcher() {
return new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
@Override
"Top level container encapsulating configuration of all modules.";
list module {
- key "name";
+ key "type name";
leaf name {
description "Unique module instance name";
type string;
package org.opendaylight.controller.config.api;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
import org.junit.Assert;
import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.config.api.annotations.AbstractServiceInterface;
import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import javax.management.*;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
-
public class IdentityAttributeRefTest {
IdentityAttributeRef attr = new IdentityAttributeRef("attr");
package org.opendaylight.controller.config.api;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import com.google.common.collect.Lists;
import java.nio.file.AccessDeniedException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-
-import com.google.common.collect.Lists;
import org.hamcrest.CoreMatchers;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
-import javax.management.Query;
-
-import static org.junit.Assert.*;
-
public class JmxAttributeValidationExceptionTest {
- private JmxAttribute jmxAttribute = new JmxAttribute("attr1");
+ private final JmxAttribute jmxAttribute = new JmxAttribute("attr1");
@Test
public void testJmxAttributeValidationExceptionElement() throws Exception {
@Test
public void testJmxAttributeValidationExceptionList() throws Exception {
- List attributeNames = new ArrayList<JmxAttribute>();
+ List<JmxAttribute> attributeNames = new ArrayList<>();
attributeNames.add(new JmxAttribute("att1"));
attributeNames.add(new JmxAttribute("att2"));
attributeNames.add(new JmxAttribute("att3"));
@Test
public void testJmxAttributeValidationExceptionList2() throws Exception {
- List attributeNames = new ArrayList<JmxAttribute>();
+ List<JmxAttribute> attributeNames = new ArrayList<>();
attributeNames.add(new JmxAttribute("att1"));
attributeNames.add(new JmxAttribute("att2"));
attributeNames.add(new JmxAttribute("att3"));
JmxAttributeValidationException.checkCondition(false, "message", jmxAttribute);
}
- private void assertJmxEx(JmxAttributeValidationException e, String message, JmxAttribute... attrNames) {
+ private void assertJmxEx(final JmxAttributeValidationException e, final String message, final JmxAttribute... attrNames) {
assertEquals(message, e.getMessage());
assertEquals(Lists.newArrayList(attrNames), e.getAttributeNames());
}
package org.opendaylight.controller.config.api;
-import junit.framework.Assert;
-import org.junit.Test;
-
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
+import org.junit.Test;
public class ModuleIdentifierTest {
String fact = new String("factory");
package org.opendaylight.controller.config.api;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
-import static org.junit.Assert.assertNotEquals;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
-import static org.junit.matchers.JUnitMatchers.containsString;
-
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import com.google.common.collect.Lists;
import java.util.Map;
-
import org.junit.Assert;
import org.junit.Test;
public class ValidationExceptionTest {
- private String instance = "instance";
+ private final String instance = "instance";
private final ModuleIdentifier mi = new ModuleIdentifier("module", instance);
- private String instance2 = "instance2";
+ private final String instance2 = "instance2";
private final ModuleIdentifier mi2 = new ModuleIdentifier("module", instance2);
private final String message = "ex message";
private final Exception e = new IllegalStateException(message);
package org.opendaylight.controller.config.api.jmx;
+import java.util.ArrayList;
+import java.util.List;
+import javax.management.ObjectName;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
-import javax.management.ObjectName;
-import java.util.ArrayList;
-import java.util.List;
-
public class CommitStatusTest {
- List newInst = new ArrayList<ObjectName>();
- List reusedInst = new ArrayList<ObjectName>();
- List recreatedInst = new ArrayList<ObjectName>();
+ List<ObjectName> newInst = new ArrayList<>();
+ List<ObjectName> reusedInst = new ArrayList<>();
+ List<ObjectName> recreatedInst = new ArrayList<>();
@Before
public void setUp() throws Exception {
@Test
public void testNotEqual() throws Exception {
- List newInst2 = new ArrayList<ObjectName>();
- List reusedInst2 = new ArrayList<ObjectName>();
- List recreatedInst2 = new ArrayList<ObjectName>();
+ List<ObjectName> newInst2 = new ArrayList<>();
+ List<ObjectName> reusedInst2 = new ArrayList<>();
+ List<ObjectName> recreatedInst2 = new ArrayList<>();
newInst2.add(new ObjectName("first: key1 = value1"));
reusedInst2.add(new ObjectName("second: key = val"));
*/
package org.opendaylight.controller.config.api.jmx;
-import static junit.framework.Assert.fail;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-
+import static org.junit.Assert.fail;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
-
import java.util.HashMap;
import java.util.Map;
import javax.management.ObjectName;
-import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.config.api.ModuleIdentifier;
assertPattern(on, pattern);
}
- private void assertPattern(ObjectName test, ObjectName pattern) {
+ private void assertPattern(final ObjectName test, final ObjectName pattern) {
assertTrue(pattern.isPattern());
assertTrue(pattern.apply(test));
}
}, IllegalArgumentException.class);
}
- private void assertFailure(Runnable test, Class<? extends Exception> ex) {
+ private void assertFailure(final Runnable test, final Class<? extends Exception> ex) {
try {
test.run();
} catch(Exception e) {
- Assert.assertTrue("Failed with wrong exception: " + Throwables.getStackTraceAsString(e),
+ assertTrue("Failed with wrong exception: " + Throwables.getStackTraceAsString(e),
e.getClass().isAssignableFrom(ex));
return;
}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-artifacts</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-manager</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-manager</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-module-archetype</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-netty-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-api</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-directory-adapter</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-directory-autodetect-adapter</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-directory-xml-adapter</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-feature-adapter</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-file-adapter</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-file-xml-adapter</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-plugin-parent</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-util</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-util</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>logback-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>logback-config-loader</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netty-config-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netty-event-executor-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netty-threadgroup-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netty-timer-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>shutdown-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>shutdown-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>threadpool-config-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>yang-jmx-generator</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>yang-jmx-generator</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>yang-store-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>yang-store-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>yang-test</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>features-config</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>features-config-netty</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>features-config-persister</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-netty-features</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+</project>
+
package org.opendaylight.controller.config.manager.impl;
import static com.google.common.base.Preconditions.checkNotNull;
-
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
}
@Override
- public Set<ObjectName> lookupConfigBeans(String moduleName) {
+ public Set<ObjectName> lookupConfigBeans(final String moduleName) {
throw new UnsupportedOperationException();
}
@Override
- public Set<ObjectName> lookupConfigBeans(String moduleName, String instanceName) {
+ public Set<ObjectName> lookupConfigBeans(final String moduleName, final String instanceName) {
throw new UnsupportedOperationException();
}
@Override
- public ObjectName lookupConfigBean(String moduleName, String instanceName) throws InstanceNotFoundException {
+ public ObjectName lookupConfigBean(final String moduleName, final String instanceName) throws InstanceNotFoundException {
throw new UnsupportedOperationException();
}
@Override
- public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
+ public void checkConfigBeanExists(final ObjectName objectName) throws InstanceNotFoundException {
throw new InstanceNotFoundException("Cannot find " + objectName + " - Tried to use mocking registry");
}
}
@Override
- public ServiceReferenceJMXRegistration registerMBean(ServiceReferenceMXBeanImpl object, ObjectName on) throws InstanceAlreadyExistsException {
+ public ServiceReferenceJMXRegistration registerMBean(final ServiceReferenceMXBeanImpl object, final ObjectName on) throws InstanceAlreadyExistsException {
throw new UnsupportedOperationException();
}
/**
* Static constructor for transaction controller. Take current state as seen by config registry, allow writing new data.
*/
- public static SearchableServiceReferenceWritableRegistry createSRWritableRegistry(ServiceReferenceReadableRegistry oldReadableRegistry,
- ConfigTransactionLookupRegistry txLookupRegistry,
- Map<String, Map.Entry<ModuleFactory, BundleContext>> currentlyRegisteredFactories) {
+ public static SearchableServiceReferenceWritableRegistry createSRWritableRegistry(final ServiceReferenceReadableRegistry oldReadableRegistry,
+ final ConfigTransactionLookupRegistry txLookupRegistry,
+ final Map<String, Map.Entry<ModuleFactory, BundleContext>> currentlyRegisteredFactories) {
if (txLookupRegistry == null) {
throw new IllegalArgumentException("txLookupRegistry is null");
/**
* Copy back state to config registry after commit.
*/
- public static CloseableServiceReferenceReadableRegistry createSRReadableRegistry(ServiceReferenceWritableRegistry oldWritableRegistry,
- LookupRegistry lookupRegistry, BaseJMXRegistrator baseJMXRegistrator) {
+ public static CloseableServiceReferenceReadableRegistry createSRReadableRegistry(final ServiceReferenceWritableRegistry oldWritableRegistry,
+ final LookupRegistry lookupRegistry, final BaseJMXRegistrator baseJMXRegistrator) {
ServiceReferenceRegistryImpl old = (ServiceReferenceRegistryImpl) oldWritableRegistry;
// even if factories do change, nothing in the mapping can change between transactions
/**
* Fill refNames and mBeans maps from old instance
*/
- private static void copy(ServiceReferenceRegistryImpl old, ServiceReferenceRegistryImpl newRegistry, String nullableDstTransactionName) {
+ private static void copy(final ServiceReferenceRegistryImpl old, final ServiceReferenceRegistryImpl newRegistry, final String nullableDstTransactionName) {
for (Entry<ServiceReference, Entry<ServiceReferenceMXBeanImpl, ServiceReferenceJMXRegistration>> refNameEntry : old.mBeans.entrySet()) {
ObjectName currentImplementation;
ObjectName currentImplementationSrc = refNameEntry.getValue().getKey().getCurrentImplementation();
}
}
- private static Map<String, ModuleFactory> extractFactoriesMap(Map<String, Map.Entry<ModuleFactory, BundleContext>> currentlyRegisteredFactories) {
+ private static Map<String, ModuleFactory> extractFactoriesMap(final Map<String, Map.Entry<ModuleFactory, BundleContext>> currentlyRegisteredFactories) {
Map<String, ModuleFactory> result = new HashMap<>();
for (Entry<String, Entry<ModuleFactory, BundleContext>> entry : currentlyRegisteredFactories.entrySet()) {
result.put(entry.getKey(), entry.getValue().getKey());
return result;
}
- private ServiceReferenceRegistryImpl(Map<String, ModuleFactory> factories, LookupRegistry lookupRegistry,
- ServiceReferenceTransactionRegistratorFactory serviceReferenceRegistratorFactory,
- boolean writable) {
+ private ServiceReferenceRegistryImpl(final Map<String, ModuleFactory> factories, final LookupRegistry lookupRegistry,
+ final ServiceReferenceTransactionRegistratorFactory serviceReferenceRegistratorFactory,
+ final boolean writable) {
this.factories = factories;
this.writable = writable;
this.lookupRegistry = lookupRegistry;
Set<String> qNames = InterfacesHelper.getQNames(siAnnotations);
allAnnotations.addAll(siAnnotations);
allQNameSet.addAll(qNames);
- modifiableFactoryNamesToQNames.put(entry.getKey(), Collections.unmodifiableSet(qNames));
+ modifiableFactoryNamesToQNames.put(entry.getKey(), qNames);
}
- this.factoryNamesToQNames = Collections.unmodifiableMap(modifiableFactoryNamesToQNames);
- this.allQNames = Collections.unmodifiableSet(allQNameSet);
+ this.factoryNamesToQNames = ImmutableMap.copyOf(modifiableFactoryNamesToQNames);
+ this.allQNames = ImmutableSet.copyOf(allQNameSet);
// fill namespacesToAnnotations
Map<String /* namespace */, Map<String /* localName */, ServiceInterfaceAnnotation>> modifiableNamespacesToAnnotations =
new HashMap<>();
ofNamespace.put(sia.localName(), sia);
modifiableServiceQNamesToAnnotations.put(sia.value(), sia);
}
- this.namespacesToAnnotations = Collections.unmodifiableMap(modifiableNamespacesToAnnotations);
- this.serviceQNamesToAnnotations = Collections.unmodifiableMap(modifiableServiceQNamesToAnnotations);
+ this.namespacesToAnnotations = ImmutableMap.copyOf(modifiableNamespacesToAnnotations);
+ this.serviceQNamesToAnnotations = ImmutableMap.copyOf(modifiableServiceQNamesToAnnotations);
LOGGER.trace("factoryNamesToQNames:{}", this.factoryNamesToQNames);
}
@Override
- public Map<ServiceInterfaceAnnotation, String /* service ref name */> findServiceInterfaces(ModuleIdentifier moduleIdentifier) {
+ public Map<ServiceInterfaceAnnotation, String /* service ref name */> findServiceInterfaces(final ModuleIdentifier moduleIdentifier) {
Map<ServiceInterfaceAnnotation, String /* service ref name */> result = modulesToServiceRef.get(moduleIdentifier);
if (result == null) {
return Collections.emptyMap();
}
@Override
- public synchronized Set<String> lookupServiceInterfaceNames(ObjectName objectName) throws InstanceNotFoundException {
+ public synchronized Set<String> lookupServiceInterfaceNames(final ObjectName objectName) throws InstanceNotFoundException {
lookupRegistry.checkConfigBeanExists(objectName);
String factoryName = ObjectNameUtil.getFactoryName(objectName);
}
@Override
- public synchronized String getServiceInterfaceName(String namespace, String localName) {
+ public synchronized String getServiceInterfaceName(final String namespace, final String localName) {
Map<String /* localName */, ServiceInterfaceAnnotation> ofNamespace = namespacesToAnnotations.get(namespace);
if (ofNamespace == null) {
LOGGER.error("Cannot find namespace {} in {}", namespace, namespacesToAnnotations);
return result;
}
- private ObjectName getObjectName(ModuleIdentifier moduleIdentifier) {
+ private ObjectName getObjectName(final ModuleIdentifier moduleIdentifier) {
ObjectName on;
try {
on = lookupRegistry.lookupConfigBean(moduleIdentifier.getFactoryName(), moduleIdentifier.getInstanceName());
}
@Override
- public synchronized ObjectName lookupConfigBeanByServiceInterfaceName(String serviceInterfaceQName, String refName) {
+ public synchronized ObjectName lookupConfigBeanByServiceInterfaceName(final String serviceInterfaceQName, final String refName) {
ServiceReference serviceReference = new ServiceReference(serviceInterfaceQName, refName);
ModuleIdentifier moduleIdentifier = refNames.get(serviceReference);
if (moduleIdentifier == null) {
}
@Override
- public synchronized Map<String /* refName */, ObjectName> lookupServiceReferencesByServiceInterfaceName(String serviceInterfaceQName) {
+ public synchronized Map<String /* refName */, ObjectName> lookupServiceReferencesByServiceInterfaceName(final String serviceInterfaceQName) {
Map<String, Map<String, ObjectName>> serviceMapping = getServiceMapping();
Map<String, ObjectName> innerMap = serviceMapping.get(serviceInterfaceQName);
if (innerMap == null) {
}
@Override
- public synchronized ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
+ public synchronized ObjectName getServiceReference(final String serviceInterfaceQName, final String refName) throws InstanceNotFoundException {
ServiceReference serviceReference = new ServiceReference(serviceInterfaceQName, refName);
if (mBeans.containsKey(serviceReference) == false) {
throw new InstanceNotFoundException("Cannot find " + serviceReference);
}
@Override
- public synchronized void checkServiceReferenceExists(ObjectName objectName) throws InstanceNotFoundException {
+ public synchronized void checkServiceReferenceExists(final ObjectName objectName) throws InstanceNotFoundException {
String actualTransactionName = ObjectNameUtil.getTransactionName(objectName);
String expectedTransactionName = serviceReferenceRegistrator.getNullableTransactionName();
if (writable & actualTransactionName == null || (writable && actualTransactionName.equals(expectedTransactionName) == false)) {
}
@Override
- public synchronized ObjectName saveServiceReference(String serviceInterfaceName, String refName, ObjectName moduleON) throws InstanceNotFoundException {
+ public synchronized ObjectName saveServiceReference(final String serviceInterfaceName, final String refName, final ObjectName moduleON) throws InstanceNotFoundException {
assertWritable();
ServiceReference serviceReference = new ServiceReference(serviceInterfaceName, refName);
return saveServiceReference(serviceReference, moduleON);
}
- private synchronized ObjectName saveServiceReference(ServiceReference serviceReference, ObjectName moduleON)
+ private synchronized ObjectName saveServiceReference(final ServiceReference serviceReference, final ObjectName moduleON)
throws InstanceNotFoundException{
return saveServiceReference(serviceReference, moduleON, false);
}
- private synchronized ObjectName saveServiceReference(ServiceReference serviceReference, ObjectName moduleON,
- boolean skipChecks) throws InstanceNotFoundException {
+ private synchronized ObjectName saveServiceReference(final ServiceReference serviceReference, final ObjectName moduleON,
+ final boolean skipChecks) throws InstanceNotFoundException {
// make sure it is found
if (skipChecks == false) {
}
@Override
- public ServiceReferenceJMXRegistration setValue(ServiceReferenceJMXRegistration value) {
+ public ServiceReferenceJMXRegistration setValue(final ServiceReferenceJMXRegistration value) {
throw new UnsupportedOperationException();
}
};
}
- private ObjectName getServiceON(ServiceReference serviceReference) {
+ private ObjectName getServiceON(final ServiceReference serviceReference) {
if (writable) {
return ObjectNameUtil.createTransactionServiceON(serviceReferenceRegistrator.getNullableTransactionName(),
serviceReference.getServiceInterfaceQName(), serviceReference.getRefName());
}
@Override
- public synchronized void removeServiceReference(String serviceInterfaceName, String refName) throws InstanceNotFoundException{
+ public synchronized void removeServiceReference(final String serviceInterfaceName, final String refName) throws InstanceNotFoundException{
ServiceReference serviceReference = new ServiceReference(serviceInterfaceName, refName);
removeServiceReference(serviceReference);
}
- private synchronized void removeServiceReference(ServiceReference serviceReference) throws InstanceNotFoundException {
+ private synchronized void removeServiceReference(final ServiceReference serviceReference) throws InstanceNotFoundException {
LOGGER.debug("Removing service reference {} from {}", serviceReference, this);
assertWritable();
// is the qName known?
}
@Override
- public synchronized boolean removeServiceReferences(ObjectName moduleObjectName) throws InstanceNotFoundException {
+ public synchronized boolean removeServiceReferences(final ObjectName moduleObjectName) throws InstanceNotFoundException {
lookupRegistry.checkConfigBeanExists(moduleObjectName);
String factoryName = ObjectNameUtil.getFactoryName(moduleObjectName);
// check that service interface name exist
}
- private boolean removeServiceReferences(ObjectName moduleObjectName, Set<String> qNames) throws InstanceNotFoundException {
+ private boolean removeServiceReferences(final ObjectName moduleObjectName, final Set<String> qNames) throws InstanceNotFoundException {
ObjectNameUtil.checkType(moduleObjectName, ObjectNameUtil.TYPE_MODULE);
assertWritable();
Set<ServiceReference> serviceReferencesLinkingTo = findServiceReferencesLinkingTo(moduleObjectName, qNames);
return serviceReferencesLinkingTo.isEmpty() == false;
}
- private Set<ServiceReference> findServiceReferencesLinkingTo(ObjectName moduleObjectName, Set<String> serviceInterfaceQNames) {
+ private Set<ServiceReference> findServiceReferencesLinkingTo(final ObjectName moduleObjectName, final Set<String> serviceInterfaceQNames) {
String factoryName = ObjectNameUtil.getFactoryName(moduleObjectName);
if (serviceInterfaceQNames == null) {
LOGGER.warn("Possible error in code: cannot find factoryName {} in {}, object name {}", factoryName, factoryNamesToQNames, moduleObjectName);
*/
package org.opendaylight.controller.config.manager.impl.util;
-import org.opendaylight.controller.config.api.annotations.AbstractServiceInterface;
-import org.opendaylight.controller.config.api.annotations.ServiceInterfaceAnnotation;
-import org.opendaylight.controller.config.spi.Module;
-import org.opendaylight.controller.config.spi.ModuleFactory;
-
-import javax.management.JMX;
+import com.google.common.collect.ImmutableSet;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
+import javax.management.JMX;
+import org.opendaylight.controller.config.api.annotations.AbstractServiceInterface;
+import org.opendaylight.controller.config.api.annotations.ServiceInterfaceAnnotation;
+import org.opendaylight.controller.config.spi.Module;
+import org.opendaylight.controller.config.spi.ModuleFactory;
-public class InterfacesHelper {
+public final class InterfacesHelper {
private InterfacesHelper() {
}
}
- private static Set<Class<?>> getAllSuperInterfaces(Set<Class<?>> ifcs) {
+ private static Set<Class<?>> getAllSuperInterfaces(final Set<? extends Class<?>> ifcs) {
Set<Class<?>> interfaces = new HashSet<>(ifcs); // create copy to modify
// each interface can extend other interfaces
Set<Class<?>> result = new HashSet<>();
* Get interfaces that this class is derived from that are JMX interfaces.
*/
public static Set<Class<?>> getMXInterfaces(
- Class<? extends Module> configBeanClass) {
+ final Class<? extends Module> configBeanClass) {
Set<Class<?>> allInterfaces = getAllInterfaces(configBeanClass);
Set<Class<?>> result = new HashSet<>();
for (Class<?> clazz : allInterfaces) {
* annotation.
*/
public static Set<Class<?>> getServiceInterfaces(
- Class<? extends Module> configBeanClass) {
+ final Class<? extends Module> configBeanClass) {
Set<Class<?>> allInterfaces = getAllInterfaces(configBeanClass);
Set<Class<?>> result = new HashSet<>();
for (Class<?> clazz : allInterfaces) {
return result;
}
- public static Set<Class<? extends AbstractServiceInterface>> getAllAbstractServiceClasses(Class<? extends Module> configBeanClass) {
+ public static Set<Class<? extends AbstractServiceInterface>> getAllAbstractServiceClasses(final Class<? extends Module> configBeanClass) {
Set<Class<? extends AbstractServiceInterface>> foundGeneratedSIClasses = new HashSet<>();
for (Class<?> clazz : getAllInterfaces(configBeanClass)) {
* {@link org.opendaylight.controller.config.api.annotations.ServiceInterfaceAnnotation#osgiRegistrationType()}
*/
public static Set<Class<?>> getOsgiRegistrationTypes(
- Class<? extends Module> configBeanClass) {
+ final Class<? extends Module> configBeanClass) {
Set<Class<?>> serviceInterfaces = getServiceInterfaces(configBeanClass);
Set<Class<?>> result = new HashSet<>();
for (Class<?> clazz : serviceInterfaces) {
return result;
}
- public static Set<String> getQNames(Set<ServiceInterfaceAnnotation> siAnnotations) {
+ public static Set<String> getQNames(final Set<ServiceInterfaceAnnotation> siAnnotations) {
Set<String> qNames = new HashSet<>();
for (ServiceInterfaceAnnotation sia: siAnnotations) {
qNames.add(sia.value());
}
- return Collections.unmodifiableSet(qNames);
+ return ImmutableSet.copyOf(qNames);
}
- public static Set<ServiceInterfaceAnnotation> getServiceInterfaceAnnotations(ModuleFactory factory) {
+ public static Set<ServiceInterfaceAnnotation> getServiceInterfaceAnnotations(final ModuleFactory factory) {
Set<Class<? extends AbstractServiceInterface>> implementedServiceIntefaces = Collections.unmodifiableSet(factory.getImplementedServiceIntefaces());
return getServiceInterfaceAnnotations(implementedServiceIntefaces);
}
- private static Set<ServiceInterfaceAnnotation> getServiceInterfaceAnnotations(Set<Class<? extends AbstractServiceInterface>> implementedServiceIntefaces) {
+ private static Set<ServiceInterfaceAnnotation> getServiceInterfaceAnnotations(final Set<Class<? extends AbstractServiceInterface>> implementedServiceIntefaces) {
Set<Class<? extends AbstractServiceInterface>> inspected = getAllAbstractServiceInterfaceClasses(implementedServiceIntefaces);
Set<ServiceInterfaceAnnotation> result = new HashSet<>();
// SIs can form hierarchies, inspect superclass until it does not extend AbstractSI
}
static Set<Class<? extends AbstractServiceInterface>> getAllAbstractServiceInterfaceClasses(
- Set<Class<? extends AbstractServiceInterface>> directlyImplementedAbstractSIs) {
+ final Set<Class<? extends AbstractServiceInterface>> directlyImplementedAbstractSIs) {
- Set<Class<?>> allInterfaces = getAllSuperInterfaces((Set) directlyImplementedAbstractSIs);
+ Set<Class<?>> allInterfaces = getAllSuperInterfaces(directlyImplementedAbstractSIs);
Set<Class<? extends AbstractServiceInterface>> result = new HashSet<>();
for(Class<?> ifc: allInterfaces){
if (AbstractServiceInterface.class.isAssignableFrom(ifc) &&
package org.opendaylight.controller.config.manager.impl.util;
import static com.google.common.base.Preconditions.checkNotNull;
-
import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
}
@SafeVarargs
- public static <T> AutoCloseable registerService(BundleContext bundleContext, T service, Class<? super T> ... interfaces) {
+ public static <T> AutoCloseable registerService(final BundleContext bundleContext, final T service, final Class<? super T> ... interfaces) {
checkNotNull(service);
checkNotNull(interfaces);
List<AutoCloseable> autoCloseableList = new ArrayList<>();
};
}
- public static AutoCloseable wrap(final BundleTracker bundleTracker) {
+ public static AutoCloseable wrap(final BundleTracker<?> bundleTracker) {
checkNotNull(bundleTracker);
return new AutoCloseable() {
@Override
Object serviceTypeRaw = args[0];
Object serviceInstance = args[1];
- Dictionary<String, ?> props = (Dictionary) args[2];
+ Dictionary<String, ?> props = (Dictionary<String, ?>) args[2];
if (serviceTypeRaw instanceof Class) {
Class<?> serviceType = (Class<?>) serviceTypeRaw;
tested.put(moduleIdentifier,
mockedModule(),
- moduleFactory,
- maybeOldInternalInfo,
- transactionModuleJMXRegistration,
- isDefaultBean, mock(BundleContext.class));
+ moduleFactory,
+ maybeOldInternalInfo,
+ transactionModuleJMXRegistration,
+ isDefaultBean, mock(BundleContext.class));
}
private static Module mockedModule() {
package org.opendaylight.controller.config.manager.impl.osgi;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertSame;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-
import java.util.Collections;
import javax.management.ObjectName;
import org.junit.Before;
package org.opendaylight.controller.config.manager.impl.osgi;
-import static junit.framework.Assert.fail;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
-import static org.junit.matchers.JUnitMatchers.containsString;
+import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
-
import com.google.common.collect.Lists;
import java.util.Map;
import org.junit.Before;
@Mock
private BundleContext bundleContext;
private BundleContextBackedModuleFactoriesResolver resolver;
- private ServiceReference s1;
- private ServiceReference s2;
+ private ServiceReference<?> s1;
+ private ServiceReference<?> s2;
private ModuleFactory f1;
private ModuleFactory f2;
resolver = new BundleContextBackedModuleFactoriesResolver(bundleContext);
}
- private ModuleFactory getMockFactory(String name) {
+ private ModuleFactory getMockFactory(final String name) {
ModuleFactory mock = mock(ModuleFactory.class);
doReturn(name).when(mock).toString();
doReturn(name).when(mock).getImplementationName();
return mock;
}
- private ServiceReference getServiceRef() {
- ServiceReference mock = mock(ServiceReference.class);
+ private ServiceReference<?> getServiceRef() {
+ ServiceReference<?> mock = mock(ServiceReference.class);
doReturn("serviceRef").when(mock).toString();
final Bundle bundle = mock(Bundle.class);
doReturn(bundleContext).when(bundle).getBundleContext();
package org.opendaylight.controller.config.manager.impl.osgi;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
-
import java.util.Dictionary;
import java.util.Set;
-
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
- doAnswer(new Answer() {
+ doAnswer(new Answer<Object>() {
@Override
public Object answer(final InvocationOnMock invocation) throws Throwable {
return getClass().getClassLoader().loadClass((String) invocation.getArguments()[0]);
}
static class WrongConstructorTestingFactory extends TestingFactory {
- WrongConstructorTestingFactory(String randomParam) {
+ WrongConstructorTestingFactory(final String randomParam) {
}
}
package org.opendaylight.controller.config.manager.impl.osgi;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import java.util.Dictionary;
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.config.manager.impl.osgi.mapping.RefreshingSCPModuleInfoRegistry;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceRegistration;
-import java.util.*;
-
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
public class RefreshingSCPModuleInfoRegistryTest {
@Test
public void testConstructor() throws Exception {
doReturn("string").when(prov).toString();
BundleContext ctxt = mock(BundleContext.class);
- Dictionary dict = new Hashtable();
- ServiceRegistration servReg = mock(ServiceRegistration.class);
+ ServiceRegistration<?> servReg = mock(ServiceRegistration.class);
doReturn(servReg).when(ctxt).registerService(Mockito.any(Class.class), Mockito.any(SchemaContextProvider.class), Mockito.any(Dictionary.class));
doReturn(servReg).when(ctxt).registerService(Mockito.anyString(), Mockito.any(Object.class), Mockito.any(Dictionary.class));
RefreshingSCPModuleInfoRegistry scpreg = new RefreshingSCPModuleInfoRegistry(reg, prov, ctxt);
package org.opendaylight.controller.config.manager.impl.util;
import static org.junit.Assert.assertEquals;
-
+import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import java.util.Collections;
import java.util.HashSet;
input.add(clazz);
Set<Class<? extends AbstractServiceInterface>> result = InterfacesHelper.getAllAbstractServiceInterfaceClasses(input);
- Set<Class<?>> expected = Sets.newHashSet((Class<?>) TestingScheduledThreadPoolServiceInterface.class,
+ Set<Class<?>> expected = ImmutableSet.of((Class<?>) TestingScheduledThreadPoolServiceInterface.class,
TestingThreadPoolServiceInterface.class
);
assertEquals(expected, result);
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
-
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
OsgiRegistrationUtil.wrap(serviceReg).close();
verify(serviceReg).unregister();
- final BundleTracker tracker = mock(BundleTracker.class);
+ final BundleTracker<?> tracker = mock(BundleTracker.class);
doNothing().when(tracker).close();
OsgiRegistrationUtil.wrap(tracker).close();
verify(tracker).close();
}
private ServiceRegistration<?> mockServiceRegistration() {
- ServiceRegistration mock = mock(ServiceRegistration.class);
+ ServiceRegistration<?> mock = mock(ServiceRegistration.class);
doNothing().when(mock).unregister();
return mock;
}
*/
package org.opendaylight.controller.config.manager.testingservices.scheduledthreadpool;
-import com.google.common.collect.Sets;
+import com.google.common.collect.ImmutableSet;
+import java.util.HashSet;
+import java.util.Set;
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DependencyResolverFactory;
import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
import org.opendaylight.controller.config.spi.ModuleFactory;
import org.osgi.framework.BundleContext;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
public class TestingScheduledThreadPoolModuleFactory implements ModuleFactory {
public static final String NAME = "scheduled";
- private static Set<Class<? extends AbstractServiceInterface>> ifc = Collections.unmodifiableSet(Sets.newHashSet(
+ private static Set<Class<? extends AbstractServiceInterface>> ifc = ImmutableSet.of(
(Class<? extends AbstractServiceInterface>) TestingScheduledThreadPoolServiceInterface.class,
- TestingThreadPoolServiceInterface.class));
+ TestingThreadPoolServiceInterface.class);
@Override
public boolean isModuleImplementingServiceInterface(
- Class<? extends AbstractServiceInterface> serviceInterface) {
+ final Class<? extends AbstractServiceInterface> serviceInterface) {
return ifc.contains(serviceInterface);
}
}
@Override
- public Module createModule(String instanceName,
- DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ public Module createModule(final String instanceName,
+ final DependencyResolver dependencyResolver, final BundleContext bundleContext) {
return new TestingScheduledThreadPoolModule(new ModuleIdentifier(NAME,
instanceName), null, null);
}
@Override
- public Module createModule(String instanceName,
- DependencyResolver dependencyResolver, DynamicMBeanWithInstance old, BundleContext bundleContext)
+ public Module createModule(final String instanceName,
+ final DependencyResolver dependencyResolver, final DynamicMBeanWithInstance old, final BundleContext bundleContext)
throws Exception {
TestingScheduledThreadPoolImpl oldInstance;
try {
}
@Override
- public Set<Module> getDefaultModules(DependencyResolverFactory dependencyResolverFactory, BundleContext bundleContext) {
+ public Set<Module> getDefaultModules(final DependencyResolverFactory dependencyResolverFactory, final BundleContext bundleContext) {
return new HashSet<Module>();
}
*/
package org.opendaylight.controller.config.manager.testingservices.scheduledthreadpool.test;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
-
import javax.annotation.Nullable;
import javax.management.DynamicMBean;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
-
import org.junit.Test;
import org.opendaylight.controller.config.api.ValidationException;
import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
import org.opendaylight.controller.config.manager.testingservices.parallelapsp.TestingParallelAPSPConfigMXBean;
import org.opendaylight.controller.config.manager.testingservices.parallelapsp.TestingParallelAPSPModuleFactory;
import org.opendaylight.controller.config.manager.testingservices.scheduledthreadpool.TestingScheduledThreadPoolImpl;
-import org.opendaylight.controller.config.manager.testingservices.scheduledthreadpool
- .TestingScheduledThreadPoolModuleFactory;
+import org.opendaylight.controller.config.manager.testingservices.scheduledthreadpool.TestingScheduledThreadPoolModuleFactory;
import org.opendaylight.controller.config.util.ConfigTransactionJMXClient;
public class TwoInterfacesExportTest extends AbstractScheduledTest {
- private void assertExists(String moduleName, String instanceName)
+ private void assertExists(final String moduleName, final String instanceName)
throws Exception {
assertExists(null, moduleName, instanceName);
}
- private void assertExists(@Nullable ConfigTransactionJMXClient transaction,
- String moduleName, String instanceName)
+ private void assertExists(@Nullable final ConfigTransactionJMXClient transaction,
+ final String moduleName, final String instanceName)
throws InstanceNotFoundException {
if (transaction != null) {
transaction.lookupConfigBean(moduleName, instanceName);
}
}
- private void assertNotExists(String moduleName, String instanceName) {
+ private void assertNotExists(final String moduleName, final String instanceName) {
assertNotExists(null, moduleName, instanceName);
}
private void assertNotExists(
- @Nullable ConfigTransactionJMXClient transaction,
- String moduleName, String instanceName) {
+ @Nullable final ConfigTransactionJMXClient transaction,
+ final String moduleName, final String instanceName) {
if (transaction != null) {
try {
*/
package org.opendaylight.controller.config.manager.testingservices.threadpool;
-import com.google.common.collect.Sets;
+import com.google.common.collect.ImmutableSet;
+import java.util.HashSet;
+import java.util.Set;
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DependencyResolverFactory;
import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
import org.opendaylight.controller.config.spi.ModuleFactory;
import org.osgi.framework.BundleContext;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
public class TestingFixedThreadPoolModuleFactory extends AbstractTestingFixedThreadPoolModuleFactory implements ModuleFactory {
public static final String NAME = "fixed";
- private static Set<Class<? extends AbstractServiceInterface>> ifc = Collections.unmodifiableSet(Sets.newHashSet(
+ private static final Set<Class<? extends AbstractServiceInterface>> ifc = ImmutableSet.of(
(Class<? extends AbstractServiceInterface>) ModifiableThreadPoolServiceInterface.class,
- TestingThreadPoolServiceInterface.class));
+ TestingThreadPoolServiceInterface.class);
@Override
public String getImplementationName() {
}
@Override
- public TestingFixedThreadPoolModule createModule(String instanceName,
- DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ public TestingFixedThreadPoolModule createModule(final String instanceName,
+ final DependencyResolver dependencyResolver, final BundleContext bundleContext) {
return new TestingFixedThreadPoolModule(new ModuleIdentifier(NAME,
instanceName), null, null);
}
@Override
- public Module createModule(String instanceName,
- DependencyResolver dependencyResolver, DynamicMBeanWithInstance old, BundleContext bundleContext)
+ public Module createModule(final String instanceName,
+ final DependencyResolver dependencyResolver, final DynamicMBeanWithInstance old, final BundleContext bundleContext)
throws Exception {
int threadCount = (Integer) old.getAttribute("ThreadCount");
// is the instance compatible?
@Override
public boolean isModuleImplementingServiceInterface(
- Class<? extends AbstractServiceInterface> serviceInterface) {
+ final Class<? extends AbstractServiceInterface> serviceInterface) {
return ifc.contains(serviceInterface);
}
@Override
- public Set<Module> getDefaultModules(DependencyResolverFactory dependencyResolverFactory, BundleContext bundleContext) {
+ public Set<Module> getDefaultModules(final DependencyResolverFactory dependencyResolverFactory, final BundleContext bundleContext) {
return new HashSet<Module>();
}
<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
<!--
Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-subsystem</artifactId>
- <version>0.3.0-SNAPSHOT</version>
- </parent>
- <artifactId>config-netty-config</artifactId>
- <description>Configuration files for sal-rest-connector</description>
- <packaging>jar</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- <executions>
- <execution>
- <id>attach-artifacts</id>
- <goals>
- <goal>attach-artifact</goal>
- </goals>
- <phase>package</phase>
- <configuration>
- <artifacts>
- <artifact>
- <file>${project.build.directory}/classes/initial/00-netty.xml</file>
- <type>xml</type>
- <classifier>config</classifier>
- </artifact>
- </artifacts>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-subsystem</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>config-netty-config</artifactId>
+ <description>Configuration files for sal-rest-connector</description>
+ <packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/00-netty.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
</project>
import org.opendaylight.controller.config.persist.api.PropertiesProvider;
public class PropertiesProviderTest implements PropertiesProvider {
- private final Map<String,String> properties = new HashMap();
+ private final Map<String,String> properties = new HashMap<>();
public void addProperty(String key,String value){
properties.put(key,value);
*/
package org.opendaylight.controller.config.persist.storage.directory.xml;
+import static com.google.common.base.Preconditions.checkArgument;
import com.google.common.base.Optional;
import com.google.common.io.Files;
-import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
-import org.opendaylight.controller.config.persist.api.Persister;
-import org.opendaylight.controller.config.persist.storage.file.xml.model.ConfigSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.xml.bind.JAXBContext;
-import javax.xml.bind.JAXBException;
-import javax.xml.bind.Unmarshaller;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
-
-import static com.google.common.base.Preconditions.checkArgument;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.api.Persister;
+import org.opendaylight.controller.config.persist.storage.file.xml.model.ConfigSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class XmlDirectoryPersister implements Persister {
- private static final Logger logger = LoggerFactory.getLogger(XmlDirectoryPersister.class);
+ private static final Logger LOG = LoggerFactory.getLogger(XmlDirectoryPersister.class);
private final File storage;
private final Optional<FilenameFilter> extensionsFilter;
/**
* Creates XmlDirectoryPersister that picks up all files in specified folder
*/
- public XmlDirectoryPersister(File storage) {
+ public XmlDirectoryPersister(final File storage) {
this(storage, Optional.<FilenameFilter>absent());
}
/**
* Creates XmlDirectoryPersister that picks up files only with specified file extension
*/
- public XmlDirectoryPersister(File storage, Set<String> fileExtensions) {
+ public XmlDirectoryPersister(final File storage, final Set<String> fileExtensions) {
this(storage, Optional.of(getFilter(fileExtensions)));
}
- private XmlDirectoryPersister(File storage, Optional<FilenameFilter> extensionsFilter) {
+ private XmlDirectoryPersister(final File storage, final Optional<FilenameFilter> extensionsFilter) {
checkArgument(storage.exists() && storage.isDirectory(), "Storage directory does not exist: " + storage);
this.storage = storage;
this.extensionsFilter = extensionsFilter;
}
@Override
- public void persistConfig(ConfigSnapshotHolder holder) throws IOException {
+ public void persistConfig(final ConfigSnapshotHolder holder) throws IOException {
throw new UnsupportedOperationException("This adapter is read only. Please set readonly=true on " + getClass());
}
List<File> sortedFiles = new ArrayList<>(Arrays.asList(filesArray));
Collections.sort(sortedFiles);
// combine all found files
- logger.debug("Reading files in following order: {}", sortedFiles);
+ LOG.debug("Reading files in following order: {}", sortedFiles);
List<ConfigSnapshotHolder> result = new ArrayList<>();
for (File file : sortedFiles) {
- logger.trace("Adding file '{}' to combined result", file);
+ LOG.trace("Adding file '{}' to combined result", file);
Optional<ConfigSnapshotHolder> h = fromXmlSnapshot(file);
// Ignore non valid snapshot
if(h.isPresent() == false) {
return result;
}
- private Optional<ConfigSnapshotHolder> fromXmlSnapshot(File file) {
+ private Optional<ConfigSnapshotHolder> fromXmlSnapshot(final File file) {
try {
return Optional.of(loadLastConfig(file));
} catch (JAXBException e) {
// In case of parse error, issue a warning, ignore and continue
- logger.warn(
+ LOG.warn(
"Unable to parse configuration snapshot from {}. Initial config from {} will be IGNORED in this run. " +
"Note that subsequent config files may fail due to this problem. " +
"Xml markup in this file needs to be fixed, for detailed information see enclosed exception.",
return Optional.absent();
}
- public static ConfigSnapshotHolder loadLastConfig(File file) throws JAXBException {
+ public static ConfigSnapshotHolder loadLastConfig(final File file) throws JAXBException {
JAXBContext jaxbContext = JAXBContext.newInstance(ConfigSnapshot.class);
Unmarshaller um = jaxbContext.createUnmarshaller();
return new FilenameFilter() {
@Override
- public boolean accept(File dir, String name) {
+ public boolean accept(final File dir, final String name) {
String ext = Files.getFileExtension(name);
return fileExtensions.contains(ext);
}
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Sets;
+import java.io.File;
+import java.util.Set;
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.config.persist.api.PropertiesProvider;
import org.opendaylight.controller.config.persist.api.StorageAdapter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.File;
-import java.util.Set;
-
/**
* StorageAdapter that retrieves initial configuration from a directory. If multiple files are present, snapshot and
* required capabilities will be merged together. Writing to this persister is not supported.
*/
public class XmlDirectoryStorageAdapter implements StorageAdapter {
- private static final Logger logger = LoggerFactory.getLogger(XmlDirectoryStorageAdapter.class);
+ private static final Logger LOG = LoggerFactory.getLogger(XmlDirectoryStorageAdapter.class);
public static final String DIRECTORY_STORAGE_PROP = "directoryStorage";
public static final String INCLUDE_EXT_PROP = "includeExtensions";
@Override
- public Persister instantiate(PropertiesProvider propertiesProvider) {
+ public Persister instantiate(final PropertiesProvider propertiesProvider) {
String fileStorageProperty = propertiesProvider.getProperty(DIRECTORY_STORAGE_PROP);
Preconditions.checkNotNull(fileStorageProperty, "Unable to find " + propertiesProvider.getFullKeyForReporting(DIRECTORY_STORAGE_PROP));
File storage = new File(fileStorageProperty);
String fileExtensions = propertiesProvider.getProperty(INCLUDE_EXT_PROP);
- logger.debug("Using storage: {}", storage);
+ LOG.debug("Using storage: {}", storage);
if(fileExtensions != null) {
- logger.debug("Using extensions: {}", fileExtensions);
+ LOG.debug("Using extensions: {}", fileExtensions);
return new XmlDirectoryPersister(storage, splitExtensions(fileExtensions));
} else {
return new XmlDirectoryPersister(storage);
}
}
- private Set<String> splitExtensions(String fileExtensions) {
+ private Set<String> splitExtensions(final String fileExtensions) {
return Sets.newHashSet(Splitter.on(EXTENSIONS_SEPARATOR).trimResults().omitEmptyStrings()
.split(fileExtensions));
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-
+import com.google.common.base.Optional;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.SortedSet;
-
import org.junit.Test;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
import org.slf4j.LoggerFactory;
import org.xml.sax.SAXException;
-import com.google.common.base.Optional;
-
public class DirectoryStorageAdapterTest {
+ private static final Logger LOG = LoggerFactory.getLogger(DirectoryStorageAdapterTest.class);
Persister tested;
- Logger logger = LoggerFactory.getLogger(DirectoryStorageAdapterTest.class.toString());
- private Persister instantiatePersisterFromAdapter(File file, Optional<String> extensions){
+ private Persister instantiatePersisterFromAdapter(final File file, final Optional<String> extensions){
PropertiesProviderTest pp = new PropertiesProviderTest();
pp.addProperty(XmlDirectoryStorageAdapter.DIRECTORY_STORAGE_PROP,file.getPath());
if(extensions.isPresent()) {
return dsa.instantiate(pp);
}
- private Persister instantiatePersisterFromAdapter(File file){
+ private Persister instantiatePersisterFromAdapter(final File file){
return instantiatePersisterFromAdapter(file, Optional.<String>absent());
}
}
}
- private File getFolder(String folderName) {
+ private File getFolder(final String folderName) {
File result = new File(("src/test/resources/" +
folderName).replace("/", File.separator));
assertTrue(result + " is not a directory", result.isDirectory());
File folder = getFolder("oneFile");
tested = instantiatePersisterFromAdapter(folder, Optional.of("xml"));
- logger.info("Testing : " + tested.toString());
+ LOG.info("Testing : {}", tested);
List<ConfigSnapshotHolder> results = tested.loadLastConfigs();
assertEquals(1, results.size());
ConfigSnapshotHolder result = results.get(0);
public void testOneFileWrongExtension() throws Exception {
File folder = getFolder("oneFile");
tested = instantiatePersisterFromAdapter(folder, Optional.of("aa, bb"));
- logger.info("Testing : " + tested.toString());
+ LOG.info("Testing : {}", tested);
}
- private void assertResult(ConfigSnapshotHolder result, String s, String... caps) throws SAXException, IOException {
+ private void assertResult(final ConfigSnapshotHolder result, final String s, final String... caps) throws SAXException, IOException {
assertXMLEqual(s, result.getConfigSnapshot());
int i = 0;
for (String capFromSnapshot : result.getCapabilities()) {
public void testTwoFilesAllExtensions() throws Exception {
File folder = getFolder("twoFiles");
tested = instantiatePersisterFromAdapter(folder);
- logger.info("Testing : " + tested.toString());
+ LOG.info("Testing : {}", tested);
List<ConfigSnapshotHolder> results = tested.loadLastConfigs();
assertEquals(2, results.size());
public void testTwoFilesTwoExtensions() throws Exception {
File folder = getFolder("twoFiles");
tested = instantiatePersisterFromAdapter(folder, Optional.of("xml, xml2"));
- logger.info("Testing : " + tested.toString());
+ LOG.info("Testing : {}", tested);
assertEquals(2, tested.loadLastConfigs().size());
}
public void testTwoFilesOnlyOneExtension() throws Exception {
File folder = getFolder("twoFiles");
tested = instantiatePersisterFromAdapter(folder, Optional.of("xml"));
- logger.info("Testing : " + tested.toString());
+ LOG.info("Testing : ", tested);
List<ConfigSnapshotHolder> results = tested.loadLastConfigs();
assertEquals(1, results.size());
public void testTwoFilesOneInvalid() throws Exception {
File folder = getFolder("twoFiles_corrupt");
tested = instantiatePersisterFromAdapter(folder, Optional.of("xml"));
- logger.info("Testing : " + tested.toString());
+ LOG.info("Testing : {}", tested);
List<ConfigSnapshotHolder> results = tested.loadLastConfigs();
assertEquals(1, results.size());
import org.slf4j.LoggerFactory;
public class ConfigPusherCustomizer implements ServiceTrackerCustomizer<ConfigPusher, ConfigPusher>, AutoCloseable {
- private static final Logger LOGGER = LoggerFactory.getLogger(ConfigPusherCustomizer.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigPusherCustomizer.class);
private ConfigFeaturesListener configFeaturesListener = null;
private FeatureServiceCustomizer featureServiceCustomizer = null;
private ServiceTracker<FeaturesService,FeaturesService> fsst = null;
@Override
public ConfigPusher addingService(ServiceReference<ConfigPusher> configPusherServiceReference) {
- LOGGER.trace("Got ConfigPusherCustomizer.addingService {}", configPusherServiceReference);
+ LOG.trace("Got ConfigPusherCustomizer.addingService {}", configPusherServiceReference);
BundleContext bc = configPusherServiceReference.getBundle().getBundleContext();
ConfigPusher cpService = bc.getService(configPusherServiceReference);
featureServiceCustomizer = new FeatureServiceCustomizer(cpService);
while(true) {
try {
if(!interuppted) {
- if(toInstall.isEmpty()) {
- event = queue.take();
- } else {
- event = queue.poll(POLL_TIME, TimeUnit.MILLISECONDS);
- }
- if(event != null && event.getFeature() !=null) {
- processFeatureEvent(event,toInstall);
- }
+ if(toInstall.isEmpty()) {
+ event = queue.take();
+ } else {
+ event = queue.poll(POLL_TIME, TimeUnit.MILLISECONDS);
+ }
+ if(event != null && event.getFeature() !=null) {
+ processFeatureEvent(event,toInstall);
+ }
} else if(toInstall.isEmpty()) {
LOGGER.error("ConfigPushingRunnable - exiting");
return;
*/
package org.opendaylight.controller.configpusherfeature.internal;
+import com.google.common.collect.LinkedHashMultimap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
-
import org.apache.karaf.features.Feature;
import org.apache.karaf.features.FeaturesService;
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.LinkedHashMultimap;
-
/*
* Simple class to push configs to the config subsystem from Feature's configfiles
*/
public class FeatureConfigPusher {
- private static final Logger LOGGER = LoggerFactory.getLogger(FeatureConfigPusher.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FeatureConfigPusher.class);
private static final int MAX_RETRIES=100;
private static final int RETRY_PAUSE_MILLIS=1;
private FeaturesService featuresService = null;
/*
* @param p - ConfigPusher to push ConfigSnapshotHolders
*/
- public FeatureConfigPusher(ConfigPusher p, FeaturesService f) {
+ public FeatureConfigPusher(final ConfigPusher p, final FeaturesService f) {
pusher = p;
featuresService = f;
}
* If a Feature is not in the returned LinkedHashMultimap then we couldn't push its configs
* (Ususally because it was not yet installed)
*/
- public LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushConfigs(List<Feature> features) throws Exception, InterruptedException {
+ public LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushConfigs(final List<Feature> features) throws Exception, InterruptedException {
LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushedFeatures = LinkedHashMultimap.create();
for(Feature feature: features) {
LinkedHashSet<FeatureConfigSnapshotHolder> configSnapShots = pushConfig(feature);
return pushedFeatures;
}
- private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(Feature feature) throws Exception, InterruptedException {
+ private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(final Feature feature) throws Exception, InterruptedException {
LinkedHashSet<FeatureConfigSnapshotHolder> configs = new LinkedHashSet<FeatureConfigSnapshotHolder>();
if(isInstalled(feature)) {
ChildAwareFeatureWrapper wrappedFeature = new ChildAwareFeatureWrapper(feature,featuresService);
return configs;
}
- private boolean isInstalled(Feature feature) {
+ private boolean isInstalled(final Feature feature) {
for(int retries=0;retries<MAX_RETRIES;retries++) {
try {
List<Feature> installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
if(installedFeatures.contains(feature)) {
return true;
} else {
- LOGGER.warn("Karaf featuresService.listInstalledFeatures() has not yet finished installing feature (retry {}) {} {}",retries,feature.getName(),feature.getVersion());
+ LOG.warn("Karaf featuresService.listInstalledFeatures() has not yet finished installing feature (retry {}) {} {}",retries,feature.getName(),feature.getVersion());
}
} catch (Exception e) {
if(retries < MAX_RETRIES) {
- LOGGER.warn("Karaf featuresService.listInstalledFeatures() has thrown an exception, retry {}, Exception {}", retries,e);
+ LOG.warn("Karaf featuresService.listInstalledFeatures() has thrown an exception, retry {}", retries, e);
} else {
- LOGGER.error("Giving up on Karaf featuresService.listInstalledFeatures() which has thrown an exception, retry {}, Exception {}", retries,e);
+ LOG.error("Giving up on Karaf featuresService.listInstalledFeatures() which has thrown an exception, retry {}", retries, e);
throw e;
}
}
throw new IllegalStateException(e1);
}
}
- LOGGER.error("Giving up (after {} retries) on Karaf featuresService.listInstalledFeatures() which has not yet finished installing feature {} {}",MAX_RETRIES,feature.getName(),feature.getVersion());
+ LOG.error("Giving up (after {} retries) on Karaf featuresService.listInstalledFeatures() which has not yet finished installing feature {} {}",MAX_RETRIES,feature.getName(),feature.getVersion());
return false;
}
- private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(LinkedHashSet<FeatureConfigSnapshotHolder> configs) throws InterruptedException {
+ private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(final LinkedHashSet<FeatureConfigSnapshotHolder> configs) throws InterruptedException {
LinkedHashSet<FeatureConfigSnapshotHolder> configsToPush = new LinkedHashSet<FeatureConfigSnapshotHolder>(configs);
configsToPush.removeAll(pushedConfigs);
if(!configsToPush.isEmpty()) {
@Override
public String toString() {
- StringBuilder b = new StringBuilder();
- Path p = Paths.get(fileInfo.getFinalname());
- b.append(p.getFileName())
- .append("(")
- .append(getCauseFeature())
- .append(",")
- .append(getFeature())
- .append(")");
- return b.toString();
-
+ StringBuilder b = new StringBuilder();
+ Path p = Paths.get(fileInfo.getFinalname());
+ b.append(p.getFileName())
+ .append("(")
+ .append(getCauseFeature())
+ .append(",")
+ .append(getFeature())
+ .append(")");
+ return b.toString();
}
@Override
package org.opendaylight.controller.config.persist.storage.file.xml;
-import static junit.framework.Assert.assertFalse;
+import static org.junit.Assert.assertFalse;
import static org.custommonkey.xmlunit.XMLAssert.assertXMLEqual;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
-
+import com.google.common.base.Charsets;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.SortedSet;
import java.util.TreeSet;
-
-import junit.framework.Assert;
-
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.test.PropertiesProviderTest;
-import com.google.common.base.Charsets;
-
public class FileStorageAdapterTest {
private static int i;
@Before
public void setUp() throws Exception {
file = Files.createTempFile("testFilePersist", ".txt").toFile();
- if (!file.exists())
+ if (!file.exists()) {
return;
+ }
com.google.common.io.Files.write("", file, Charsets.UTF_8);
i = 1;
}
storage.persistConfig(holder);
- Assert.assertEquals(storage.toString().replace("\\","/"),"XmlFileStorageAdapter [storage="+NON_EXISTENT_DIRECTORY+NON_EXISTENT_FILE+"]");
+ assertEquals(storage.toString().replace("\\","/"),"XmlFileStorageAdapter [storage="+NON_EXISTENT_DIRECTORY+NON_EXISTENT_FILE+"]");
delete(new File(NON_EXISTENT_DIRECTORY));
}
@Test
storage.setNumberOfBackups(Integer.MAX_VALUE);
List<ConfigSnapshotHolder> last = storage.loadLastConfigs();
- Assert.assertEquals(createCaps(), last.get(0).getCapabilities());
+ assertEquals(createCaps(), last.get(0).getCapabilities());
}
private SortedSet<String> createCaps() {
@Test
public void testNoLastConfig() throws Exception {
File file = Files.createTempFile("testFilePersist", ".txt").toFile();
- if (!file.exists())
+ if (!file.exists()) {
return;
+ }
XmlFileStorageAdapter storage = new XmlFileStorageAdapter();
storage.setFileStorage(file);
return "<config>" + i++ + "</config>";
}
- private void delete(File f) throws IOException {
+ private void delete(final File f) throws IOException {
if (f.isDirectory()) {
- for (File c : f.listFiles())
+ for (File c : f.listFiles()) {
delete(c);
+ }
}
- if (!f.delete())
+ if (!f.delete()) {
throw new FileNotFoundException("Failed to delete file: " + f);
+ }
}
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
-
-import org.junit.After;
import org.junit.Before;
import org.junit.Test;
*/
package org.opendaylight.controller.config.util;
+import static org.hamcrest.CoreMatchers.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
-import static org.junit.matchers.JUnitMatchers.hasItem;
-
+import com.google.common.collect.Sets;
import java.lang.management.ManagementFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
-
import javax.management.InstanceNotFoundException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
-
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.config.api.ConfigRegistry;
-import com.google.common.collect.Sets;
-
public class ConfigRegistryClientsTest {
private TestingConfigRegistry testingRegistry;
assertEquals(Sets.newHashSet(TestingConfigRegistry.run2, TestingConfigRegistry.run1, TestingConfigRegistry.run3), jmxLookup);
}
- private Set<ObjectName> lookupRuntimeBeans(ConfigRegistryClient client)
+ private Set<ObjectName> lookupRuntimeBeans(final ConfigRegistryClient client)
throws Exception {
Set<ObjectName> beans = client.lookupRuntimeBeans();
for (ObjectName on : beans) {
}
private Set<ObjectName> clientLookupRuntimeBeansWithModuleAndInstance(
- ConfigRegistryClient client, String moduleName, String instanceName) {
+ final ConfigRegistryClient client, final String moduleName, final String instanceName) {
Set<ObjectName> beans = client.lookupRuntimeBeans(moduleName, instanceName);
if (beans.size() > 0) {
assertEquals("RuntimeBean",
assertThat(clientLookupServiceInterfaceNames(testingRegistryON), hasItem(TestingConfigRegistry.serviceQName2));
}
- private Set<String> clientLookupServiceInterfaceNames(ObjectName client) throws InstanceNotFoundException{
+ private Set<String> clientLookupServiceInterfaceNames(final ObjectName client) throws InstanceNotFoundException{
return jmxRegistryClient.lookupServiceInterfaceNames(client);
}
public void testLookupConfigBeans2() throws Exception {
Method method = LookupRegistry.class.getMethod("lookupConfigBeans",
String.class, String.class);
- Object[] args = new Object[] { TestingConfigRegistry.moduleName1,
- TestingConfigRegistry.instName1 };
+ Object[] args = new Object[] { TestingConfigRegistry.moduleName1, TestingConfigRegistry.instName1 };
test(method, args);
}
public void testLookupConfigBean() throws Exception {
Method method = LookupRegistry.class.getMethod("lookupConfigBean",
String.class, String.class);
- Object[] args = new Object[] { TestingConfigRegistry.moduleName1,
- TestingConfigRegistry.instName1 };
+ Object[] args = new Object[] { TestingConfigRegistry.moduleName1, TestingConfigRegistry.instName1 };
test(method, args);
}
// check logs
String[] expectedLogs = new String[] {
- "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.LogbackConfigurationLoaderTest: LOGBACK ready -> about to use it",
- "LoggingEvent -> [TRACE] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: tracing",
- "LoggingEvent -> [DEBUG] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: debugging",
- "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: infoing",
- "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: warning",
- "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: erroring",
- "LoggingEvent -> [DEBUG] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: debugging",
- "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: infoing",
- "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: warning",
- "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: erroring",
- "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.logwork.Informer: infoing",
- "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Informer: warning",
- "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Informer: erroring",
- "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Warner: warning",
- "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Warner: erroring",
- "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Errorer: erroring"
-
+ "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.LogbackConfigurationLoaderTest: LOGBACK ready -> about to use it",
+ "LoggingEvent -> [TRACE] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: tracing",
+ "LoggingEvent -> [DEBUG] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: debugging",
+ "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: infoing",
+ "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: warning",
+ "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Tracer: erroring",
+ "LoggingEvent -> [DEBUG] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: debugging",
+ "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: infoing",
+ "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: warning",
+ "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Debugger: erroring",
+ "LoggingEvent -> [INFO] org.opendaylight.controller.logback.config.loader.test.logwork.Informer: infoing",
+ "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Informer: warning",
+ "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Informer: erroring",
+ "LoggingEvent -> [WARN] org.opendaylight.controller.logback.config.loader.test.logwork.Warner: warning",
+ "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Warner: erroring",
+ "LoggingEvent -> [ERROR] org.opendaylight.controller.logback.config.loader.test.logwork.Errorer: erroring"
};
List<String> logSnapshot = new ArrayList<>(TestAppender.getLogRecord());
-<?xml version="1.0" encoding="UTF-8"?>\r
-<configuration debug="true">\r
-\r
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">\r
- <encoder>\r
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>\r
- </encoder>\r
- </appender>\r
-\r
- <root level="INFO">\r
- <appender-ref ref="STDOUT" />\r
- </root>\r
-\r
- <!-- Base log level -->\r
- <logger name="org.opendaylight.controller.logback.config.loader" level="DEBUG"/>\r
-\r
-</configuration>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration debug="true">
+
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <root level="INFO">
+ <appender-ref ref="STDOUT" />
+ </root>
+
+ <!-- Base log level -->
+ <logger name="org.opendaylight.controller.logback.config.loader" level="DEBUG"/>
+
+</configuration>
-<?xml version="1.0" encoding="UTF-8"?>\r
-<configuration debug="false">\r
-\r
- <appender name="TEST" class="org.opendaylight.controller.logback.config.loader.test.TestAppender"/>\r
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">\r
- <encoder>\r
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>\r
- </encoder>\r
- </appender>\r
-\r
- <root level="INFO">\r
- <appender-ref ref="TEST" />\r
- <appender-ref ref="STDOUT" />\r
- </root>\r
-\r
- <!-- Base log level -->\r
- <logger name="org.opendaylight.controller.logback.config.loader" level="INFO"/>\r
-\r
-</configuration>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration debug="false">
+
+ <appender name="TEST" class="org.opendaylight.controller.logback.config.loader.test.TestAppender"/>
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <root level="INFO">
+ <appender-ref ref="TEST" />
+ <appender-ref ref="STDOUT" />
+ </root>
+
+ <!-- Base log level -->
+ <logger name="org.opendaylight.controller.logback.config.loader" level="INFO"/>
+
+</configuration>
-<?xml version="1.0" encoding="UTF-8"?>\r
-<configuration debug="false">\r
-\r
- <!-- Base log level -->\r
- <logger name="org.opendaylight.controller.logback.config.loader" level="DEBUG"/>\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Tracer" level="TRACE"/>\r
-<!-- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Debugger" level="DEBUG"/> -->\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Informer" level="DEBUG"/>\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Warner" level="ERROR"/>\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Errorer" level="ERROR"/>\r
-\r
-</configuration>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration debug="false">
+
+ <!-- Base log level -->
+ <logger name="org.opendaylight.controller.logback.config.loader" level="DEBUG"/>
+ <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Tracer" level="TRACE"/>
+<!-- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Debugger" level="DEBUG"/> -->
+ <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Informer" level="DEBUG"/>
+ <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Warner" level="ERROR"/>
+ <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Errorer" level="ERROR"/>
+
+</configuration>
-<?xml version="1.0" encoding="UTF-8"?>\r
-<configuration debug="false">\r
- <root level="INFO">\r
- <appender-ref ref="TEST" />\r
- </root>\r
-\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Informer" level="INFO"/>\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Warner" level="WARN"/>\r
-\r
- <logger name="org.opendaylight.controller.logback.config.loader.test.LogbackConfigurationLoaderTest" level="TRACE"/>\r
-</configuration>\r
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration debug="false">
+ <root level="INFO">
+ <appender-ref ref="TEST" />
+ </root>
+
+ <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Informer" level="INFO"/>
+ <logger name="org.opendaylight.controller.logback.config.loader.test.logwork.Warner" level="WARN"/>
+
+ <logger name="org.opendaylight.controller.logback.config.loader.test.LogbackConfigurationLoaderTest" level="TRACE"/>
+</configuration>
*/
package org.opendaylight.controller.config.yang.logback.config;
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.LoggerFactory;
-
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
import ch.qos.logback.core.rolling.FixedWindowRollingPolicy;
import ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy;
import ch.qos.logback.core.rolling.TimeBasedRollingPolicy;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.slf4j.LoggerFactory;
/**
* Implementation of {@link ContextSetter}. Resets running logback
private final LogbackStatusListener statusListener;
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ContextSetterImpl.class);
- public ContextSetterImpl(LogbackRuntimeRegistrator rootRuntimeBeanRegistratorWrapper) {
+ public ContextSetterImpl(final LogbackRuntimeRegistrator rootRuntimeBeanRegistratorWrapper) {
statusListener = new LogbackStatusListener(rootRuntimeBeanRegistratorWrapper);
statusListener.register();
}
- public void updateContext(LogbackModule module) {
+ @Override
+ public void updateContext(final LogbackModule module) {
LoggerContext context = (LoggerContext) LoggerFactory.getILoggerFactory();
List<ch.qos.logback.classic.Logger> loggersBefore = context.getLoggerList();
createLoggers(context, module, Sets.newHashSet(loggersBefore));
}
- private Map<String, Appender<ILoggingEvent>> createConsoleAppenders(LoggerContext context, LogbackModule module) {
+ private Map<String, Appender<ILoggingEvent>> createConsoleAppenders(final LoggerContext context, final LogbackModule module) {
Map<String, Appender<ILoggingEvent>> appendersMap = new HashMap<>();
for (ConsoleAppenderTO appender : module.getConsoleAppenderTO()) {
Preconditions.checkState(appendersMap.containsKey(appender.getName()) == false,
"Duplicate appender name %s", appender.getName());
- ch.qos.logback.core.ConsoleAppender app = new ch.qos.logback.core.ConsoleAppender();
+ ch.qos.logback.core.ConsoleAppender<ILoggingEvent> app = new ch.qos.logback.core.ConsoleAppender<>();
app.setContext(context);
PatternLayoutEncoder encoder = new PatternLayoutEncoder();
encoder.setContext(context);
return appendersMap;
}
- private void createLoggers(LoggerContext context, LogbackModule module,
- Set<ch.qos.logback.classic.Logger> loggersBefore) {
+ private void createLoggers(final LoggerContext context, final LogbackModule module,
+ final Set<ch.qos.logback.classic.Logger> loggersBefore) {
Map<String, Appender<ILoggingEvent>> appendersMap = getAppenders(module, context);
}
}
- private void addNewAppenders(Map<String, Appender<ILoggingEvent>> appendersMap, LoggerTO logger,
- ch.qos.logback.classic.Logger logbackLogger, Optional<Set<Appender<ILoggingEvent>>> appendersBefore) {
+ private void addNewAppenders(final Map<String, Appender<ILoggingEvent>> appendersMap, final LoggerTO logger,
+ final ch.qos.logback.classic.Logger logbackLogger, final Optional<Set<Appender<ILoggingEvent>>> appendersBefore) {
if (logger.getAppenders() != null) {
for (String appenderName : logger.getAppenders()) {
if (appendersMap.containsKey(appenderName)) {
}
}
- private void removeBeforeAppenders(Set<ch.qos.logback.classic.Logger> loggersBefore, LoggerTO logger,
- ch.qos.logback.classic.Logger logbackLogger, Optional<Set<Appender<ILoggingEvent>>> appendersBefore) {
+ private void removeBeforeAppenders(final Set<ch.qos.logback.classic.Logger> loggersBefore, final LoggerTO logger,
+ final ch.qos.logback.classic.Logger logbackLogger, final Optional<Set<Appender<ILoggingEvent>>> appendersBefore) {
if (appendersBefore.isPresent()) {
for (Appender<ILoggingEvent> appenderBefore : appendersBefore.get()) {
logbackLogger.detachAppender(appenderBefore);
}
}
- private Optional<Set<Appender<ILoggingEvent>>> getAppendersBefore(Set<ch.qos.logback.classic.Logger> loggersBefore,
- ch.qos.logback.classic.Logger logbackLogger) {
+ private Optional<Set<Appender<ILoggingEvent>>> getAppendersBefore(final Set<ch.qos.logback.classic.Logger> loggersBefore,
+ final ch.qos.logback.classic.Logger logbackLogger) {
if (loggersBefore.contains(logbackLogger)) {
Iterator<Appender<ILoggingEvent>> appenderIt = logbackLogger.iteratorForAppenders();
Set<Appender<ILoggingEvent>> appendersBefore = Sets.newHashSet();
}
- private Map<String, Appender<ILoggingEvent>> getAppenders(LogbackModule module, LoggerContext context) {
+ private Map<String, Appender<ILoggingEvent>> getAppenders(final LogbackModule module, final LoggerContext context) {
Map<String, Appender<ILoggingEvent>> appendersMap = new HashMap<>();
addAllAppenders(appendersMap, createRollingAppenders(context, module));
addAllAppenders(appendersMap, createFileAppenders(context, module));
return appendersMap;
}
- private void addAllAppenders(Map<String, Appender<ILoggingEvent>> allAppenders,
- Map<String, Appender<ILoggingEvent>> appendersToAdd) {
+ private void addAllAppenders(final Map<String, Appender<ILoggingEvent>> allAppenders,
+ final Map<String, Appender<ILoggingEvent>> appendersToAdd) {
for (String appenderName : appendersToAdd.keySet()) {
Preconditions.checkState(allAppenders.containsKey(appenderName) == false, "Duplicate appender name %s",
appenderName);
}
}
- private Map<String, Appender<ILoggingEvent>> createFileAppenders(LoggerContext context, LogbackModule module) {
+ private Map<String, Appender<ILoggingEvent>> createFileAppenders(final LoggerContext context, final LogbackModule module) {
Map<String, Appender<ILoggingEvent>> appendersMap = new HashMap<>();
for (FileAppenderTO appender : module.getFileAppenderTO()) {
Preconditions.checkState(appendersMap.containsKey(appender.getName()) == false,
"Duplicate appender name %s", appender.getName());
- ch.qos.logback.core.FileAppender app = new ch.qos.logback.core.FileAppender<>();
+ ch.qos.logback.core.FileAppender<ILoggingEvent> app = new ch.qos.logback.core.FileAppender<>();
app.setAppend(appender.getAppend());
app.setContext(context);
PatternLayoutEncoder encoder = new PatternLayoutEncoder();
return appendersMap;
}
- private Map<String, Appender<ILoggingEvent>> createRollingAppenders(LoggerContext context, LogbackModule module) {
+ private Map<String, Appender<ILoggingEvent>> createRollingAppenders(final LoggerContext context, final LogbackModule module) {
Map<String, Appender<ILoggingEvent>> appendersMap = new HashMap<>();
for (RollingFileAppenderTO appender : module.getRollingFileAppenderTO()) {
Preconditions.checkState(appendersMap.containsKey(appender.getName()) == false,
"Duplicate appender name %s", appender.getName());
- ch.qos.logback.core.rolling.RollingFileAppender app = new ch.qos.logback.core.rolling.RollingFileAppender<>();
+ ch.qos.logback.core.rolling.RollingFileAppender<ILoggingEvent> app = new ch.qos.logback.core.rolling.RollingFileAppender<>();
app.setAppend(appender.getAppend());
app.setContext(context);
PatternLayoutEncoder encoder = new PatternLayoutEncoder();
policy.start();
app.setRollingPolicy(policy);
} else if (appender.getRollingPolicyType().equals("TimeBasedRollingPolicy")) {
- TimeBasedRollingPolicy policy = new TimeBasedRollingPolicy();
+ TimeBasedRollingPolicy<ILoggingEvent> policy = new TimeBasedRollingPolicy<>();
policy.setContext(context);
policy.setMaxHistory(appender.getMaxHistory());
if (appender.getCleanHistoryOnStart() != null) {
policy.start();
app.setRollingPolicy(policy);
}
- SizeBasedTriggeringPolicy triggeringPolicy = new SizeBasedTriggeringPolicy();
+ SizeBasedTriggeringPolicy<ILoggingEvent> triggeringPolicy = new SizeBasedTriggeringPolicy<>();
triggeringPolicy.setContext(context);
triggeringPolicy.setMaxFileSize(appender.getMaxFileSize());
triggeringPolicy.start();
*/
package org.opendaylight.controller.config.yang.logback.config;
+import ch.qos.logback.classic.Logger;
+import ch.qos.logback.classic.LoggerContext;
+import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.classic.spi.LoggerComparator;
+import ch.qos.logback.core.Appender;
+import ch.qos.logback.core.rolling.FixedWindowRollingPolicy;
+import ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy;
+import ch.qos.logback.core.rolling.TimeBasedRollingPolicy;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
-
import org.apache.commons.lang3.StringUtils;
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DependencyResolverFactory;
import org.osgi.framework.BundleContext;
import org.slf4j.LoggerFactory;
-import ch.qos.logback.classic.Logger;
-import ch.qos.logback.classic.LoggerContext;
-import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
-import ch.qos.logback.classic.spi.ILoggingEvent;
-import ch.qos.logback.classic.spi.LoggerComparator;
-import ch.qos.logback.core.Appender;
-import ch.qos.logback.core.rolling.FixedWindowRollingPolicy;
-import ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy;
-import ch.qos.logback.core.rolling.TimeBasedRollingPolicy;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
/**
*
*/
private Map<String, FileAppenderTO> fileDTOs;
@Override
- public LogbackModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
- BundleContext bundleContext) {
+ public LogbackModule instantiateModule(final String instanceName, final DependencyResolver dependencyResolver,
+ final BundleContext bundleContext) {
Preconditions.checkArgument(instanceName.equals(INSTANCE_NAME),
"There should be just one instance of logback, named " + INSTANCE_NAME);
prepareDTOs();
}
@Override
- public LogbackModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
- LogbackModule oldModule, AutoCloseable oldInstance, BundleContext bundleContext) {
+ public LogbackModule instantiateModule(final String instanceName, final DependencyResolver dependencyResolver,
+ final LogbackModule oldModule, final AutoCloseable oldInstance, final BundleContext bundleContext) {
Preconditions.checkArgument(instanceName.equals(INSTANCE_NAME),
"There should be just one instance of logback, named " + INSTANCE_NAME);
prepareDTOs();
prepareAppendersDTOs(context);
}
- private void prepareAppendersDTOs(LoggerContext context) {
+ private void prepareAppendersDTOs(final LoggerContext context) {
this.rollingDTOs = new HashMap<>();
this.consoleDTOs = new HashMap<>();
this.fileDTOs = new HashMap<>();
app.setFileNamePattern(rollingPolicy.getFileNamePattern());
app.setRollingPolicyType("FixedWindowRollingPolicy");
} else if (rollingApp.getRollingPolicy() instanceof TimeBasedRollingPolicy<?>) {
- TimeBasedRollingPolicy rollingPolicy = (TimeBasedRollingPolicy) rollingApp.getRollingPolicy();
+ TimeBasedRollingPolicy<ILoggingEvent> rollingPolicy = (TimeBasedRollingPolicy<ILoggingEvent>) rollingApp.getRollingPolicy();
app.setRollingPolicyType("TimeBasedRollingPolicy");
app.setFileNamePattern(rollingPolicy.getFileNamePattern());
app.setMaxHistory(rollingPolicy.getMaxHistory());
}
}
- private Map<String, LoggerTO> prepareLoggersDTOs(LoggerContext context) {
+ private Map<String, LoggerTO> prepareLoggersDTOs(final LoggerContext context) {
Map<String, LoggerTO> DTOs = new HashMap<>();
List<String> appenders = new ArrayList<>();
List<org.slf4j.Logger> loggersToBeAdd = removeUnusableLoggers(context.getLoggerList(),
return DTOs;
}
- private List<org.slf4j.Logger> removeUnusableLoggers(List<Logger> loggerList, Logger rootLogger) {
+ private List<org.slf4j.Logger> removeUnusableLoggers(final List<Logger> loggerList, final Logger rootLogger) {
Collections.sort(loggerList, new LoggerComparator());
Map<String, org.slf4j.Logger> loggersToReturn = new HashMap<>();
}
@Override
- public Set<LogbackModule> getDefaultModules(DependencyResolverFactory dependencyResolverFactory,
- BundleContext bundleContext) {
+ public Set<LogbackModule> getDefaultModules(final DependencyResolverFactory dependencyResolverFactory,
+ final BundleContext bundleContext) {
DependencyResolver resolver = dependencyResolverFactory.createDependencyResolver(new ModuleIdentifier(
getImplementationName(), INSTANCE_NAME));
LogbackModule defaultLogback = instantiateModule(INSTANCE_NAME, resolver, bundleContext);
*/
package org.opendaylight.controller.config.yang.logback.config;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import java.util.ArrayList;
import java.util.Arrays;
/**
*
*/
-public final class NetconfClientDispatcherModule extends org.opendaylight.controller.config.yang.config.netconf.client.dispatcher.AbstractNetconfClientDispatcherModule
- {
+public final class NetconfClientDispatcherModule extends org.opendaylight.controller.config.yang.config.netconf.client.dispatcher.AbstractNetconfClientDispatcherModule {
public NetconfClientDispatcherModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
*/
package org.opendaylight.controller.config.yang.netty.timer;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
-
-import junit.framework.Assert;
-
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.config.api.ConflictingVersionException;
try {
createInstance(transaction, instanceName, 0L, 10, true);
transaction.validateConfig();
- Assert.fail();
+ fail();
} catch (ValidationException e) {
- Assert.assertTrue(e.getMessage().contains("TickDuration value must be greater than 0"));
+ assertTrue(e.getMessage().contains("TickDuration value must be greater than 0"));
}
}
try {
createInstance(transaction, instanceName, 500L, 0, true);
transaction.validateConfig();
- Assert.fail();
+ fail();
} catch (ValidationException e) {
- Assert.assertTrue(e.getMessage().contains("TicksPerWheel value must be greater than 0"));
+ assertTrue(e.getMessage().contains("TicksPerWheel value must be greater than 0"));
}
}
assertStatus(status, 0, 1, 1);
}
- private ObjectName createInstance(ConfigTransactionJMXClient transaction, String instanceName,
+ private ObjectName createInstance(final ConfigTransactionJMXClient transaction, final String instanceName,
final Long tickDuration, final Integer ticksPerWheel, final boolean hasThreadfactory)
throws InstanceAlreadyExistsException {
ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), instanceName);
return nameCreated;
}
- private ObjectName createThreadfactoryInstance(ConfigTransactionJMXClient transaction, String instanceName,
+ private ObjectName createThreadfactoryInstance(final ConfigTransactionJMXClient transaction, final String instanceName,
final String namePrefix) throws InstanceAlreadyExistsException {
ObjectName nameCreated = transaction.createModule(threadFactory.getImplementationName(), instanceName);
NamingThreadFactoryModuleMXBean mxBean = transaction.newMBeanProxy(nameCreated,
<module>netconf-config-dispatcher</module>
<module>config-module-archetype</module>
<module>config-netty-config</module>
+
+ <module>config-artifacts</module>
</modules>
<dependencies>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-/**
- * Generated file
-
- * Generated from: yang module name: shutdown-impl yang module local name: shutdown
- * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
- * Generated at: Wed Dec 18 14:02:06 CET 2013
- *
- * Do not modify this file unless it is present under src/main directory
- */
package org.opendaylight.controller.config.yang.shutdown.impl;
+import java.util.Arrays;
+import java.util.Set;
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DependencyResolverFactory;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
-import java.util.Arrays;
-import java.util.Set;
-
public class ShutdownModuleFactory extends AbstractShutdownModuleFactory {
public ShutdownModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
package org.opendaylight.controller.config.yang.shutdown.impl;
import com.google.common.base.Optional;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
import org.opendaylight.controller.config.shutdown.ShutdownService;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-
public class ShutdownServiceImpl implements ShutdownService, AutoCloseable {
private final ShutdownService impl;
private final ShutdownRuntimeRegistration registration;
}
class Impl implements ShutdownService {
- private static final Logger logger = LoggerFactory.getLogger(Impl.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Impl.class);
private final String secret;
private final Bundle systemBundle;
@Override
public void shutdown(String inputSecret, Long maxWaitTime, Optional<String> reason) {
- logger.warn("Shutdown issued with secret {} and reason {}", inputSecret, reason);
+ LOG.warn("Shutdown issued with secret {} and reason {}", inputSecret, reason);
try {
Thread.sleep(1000); // prevent brute force attack
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.warn("Shutdown process interrupted", e);
+ LOG.warn("Shutdown process interrupted", e);
}
if (this.secret.equals(inputSecret)) {
- logger.info("Server is shutting down");
+ LOG.info("Server is shutting down");
// actual work:
Thread stopSystemBundleThread = new StopSystemBundleThread(systemBundle);
stopSystemBundleThread.start();
if (maxWaitTime != null && maxWaitTime > 0) {
Thread systemExitThread = new CallSystemExitThread(maxWaitTime);
- logger.debug("Scheduling {}", systemExitThread);
+ LOG.debug("Scheduling {}", systemExitThread);
systemExitThread.start();
}
// end
} else {
- logger.warn("Unauthorized attempt to shut down server");
+ LOG.warn("Unauthorized attempt to shut down server");
throw new IllegalArgumentException("Invalid secret");
}
}
}
class StopSystemBundleThread extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(StopSystemBundleThread.class);
+ private static final Logger LOG = LoggerFactory.getLogger(StopSystemBundleThread.class);
private final Bundle systemBundle;
StopSystemBundleThread(Bundle systemBundle) {
try {
// wait so that JMX response is received
Thread.sleep(1000);
- logger.debug("Stopping system bundle");
+ LOG.debug("Stopping system bundle");
systemBundle.stop();
} catch (BundleException e) {
- logger.warn("Can not stop OSGi server", e);
+ LOG.warn("Can not stop OSGi server", e);
} catch (InterruptedException e) {
- logger.warn("Shutdown process interrupted", e);
+ LOG.warn("Shutdown process interrupted", e);
}
}
}
class CallSystemExitThread extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(CallSystemExitThread.class);
+ private static final Logger LOG = LoggerFactory.getLogger(CallSystemExitThread.class);
private final long maxWaitTime;
CallSystemExitThread(long maxWaitTime) {
super("call-system-exit-daemon");
try {
// wait specified time
Thread.sleep(maxWaitTime);
- logger.error("Since some threads are still running, server is going to shut down via System.exit(1) !");
+ LOG.error("Since some threads are still running, server is going to shut down via System.exit(1) !");
// do a thread dump
ThreadInfo[] threads = ManagementFactory.getThreadMXBean().dumpAllThreads(true, true);
StringBuffer sb = new StringBuffer();
sb.append(info);
sb.append("\n");
}
- logger.warn("Thread dump:{}", sb);
+ LOG.warn("Thread dump:{}", sb);
System.exit(1);
} catch (InterruptedException e) {
- logger.warn("Interrupted, not going to call System.exit(1)");
+ LOG.warn("Interrupted, not going to call System.exit(1)");
}
}
}
*/
package org.opendaylight.controller.config.threadpool.async;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import javax.management.InstanceAlreadyExistsException;
import javax.management.ObjectName;
import javax.management.InstanceAlreadyExistsException;
import javax.management.ObjectName;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
public class SyncEventBusConfigBeanTest extends AbstractConfigTest {
*/
package org.opendaylight.controller.config.threadpool.fixed;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import javax.management.InstanceNotFoundException;
import javax.management.ObjectName;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
public class FlexibleThreadPoolConfigBeanTest extends AbstractConfigTest {
*/
package org.opendaylight.controller.config.threadpool.naming;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
*/
package org.opendaylight.controller.config.threadpool.scheduled;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
String varName = BindingGeneratorUtil
.parseToValidParamName(attrEntry.getKey());
- {
- ModuleField field;
+ ModuleField field;
if (isIdentity) {
String identityBaseClass = getInnerTypeFromIdentity(((TypedAttribute) attributeIfc).getType());
IdentityRefModuleField identityField = new IdentityRefModuleField(type, varName,
nullableDefaultWrapped, isDependency, dependency, isListOfDependencies, needsDepResolver);
}
moduleFields.add(field);
- }
+
+
String getterName = "get"
+ attributeIfc.getUpperCaseCammelCase();
MethodDefinition getter = new MethodDefinition(type,
.append("}\n");
} else {
result.append(format(
- "private %s %sDependency;\n"+
- "protected final %s get%sDependency(){\n"+
- "return %sDependency;\n"+
- "}",
- osgi, moduleField.getName(), osgi, moduleField.getAttributeName(), moduleField.getName()));
+ "private %s %sDependency;\n"+
+ "protected final %s get%sDependency(){\n"+
+ "return %sDependency;\n"+
+ "}",
+ osgi, moduleField.getName(), osgi, moduleField.getAttributeName(), moduleField.getName()));
}
}
}
*/
package org.opendaylight.controller.config.yangjmxgenerator.plugin;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import org.junit.Test;
import org.opendaylight.controller.config.yangjmxgenerator.ConfigConstants;
*/
package org.opendaylight.controller.config.yangjmxgenerator.plugin;
+import static org.junit.Assert.assertEquals;
import java.io.File;
-
-import junit.framework.Assert;
-
import org.junit.Test;
public class JMXGeneratorGeneratedFilesTrackerTest {
JMXGenerator.GeneratedFilesTracker tracker = new JMXGenerator.GeneratedFilesTracker();
tracker.addFile(new File("./a/b/c"));
- Assert.assertEquals(1, tracker.getFiles().size());
+ assertEquals(1, tracker.getFiles().size());
tracker.addFile(new File("./a/b/c"));
}
}
*/
package org.opendaylight.controller.config.yangjmxgenerator.plugin;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import org.opendaylight.controller.config.yangjmxgenerator.ServiceInterfaceEntry;
public class Dependency {
- private final ServiceInterfaceEntry sie;
- private final boolean mandatory;
+ private final ServiceInterfaceEntry sie;
+ private final boolean mandatory;
- public Dependency(ServiceInterfaceEntry sie, boolean mandatory) {
- this.sie = sie;
- this.mandatory = mandatory;
- }
+ public Dependency(ServiceInterfaceEntry sie, boolean mandatory) {
+ this.sie = sie;
+ this.mandatory = mandatory;
+ }
- public ServiceInterfaceEntry getSie() {
- return sie;
- }
+ public ServiceInterfaceEntry getSie() {
+ return sie;
+ }
- public boolean isMandatory() {
- return mandatory;
- }
+ public boolean isMandatory() {
+ return mandatory;
+ }
- @Override
- public boolean equals(Object o) {
- if (this == o)
- return true;
- if (o == null || getClass() != o.getClass())
- return false;
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
- Dependency that = (Dependency) o;
+ Dependency that = (Dependency) o;
- if (mandatory != that.mandatory)
- return false;
- if (!sie.equals(that.sie))
- return false;
+ if (mandatory != that.mandatory)
+ return false;
+ if (!sie.equals(that.sie))
+ return false;
- return true;
- }
-
- @Override
- public int hashCode() {
- int result = sie.hashCode();
- result = 31 * result + (mandatory ? 1 : 0);
- return result;
- }
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = sie.hashCode();
+ result = 31 * result + (mandatory ? 1 : 0);
+ return result;
}
+}
*/
package org.opendaylight.controller.config.yangjmxgenerator;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.format;
+import static org.junit.Assert.assertNotNull;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
QName qName = identitySchemaNode.getQName();
Preconditions.checkArgument(
result.containsKey(qName) == false,
- format("Two identities of %s contain same " + "qname %s",
- module, qName));
+ "Two identities of %s contain same qname %s",
+ module, qName);
result.put(qName, identitySchemaNode);
}
return result;
*/
package org.opendaylight.controller.config.yangjmxgenerator;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNull;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
}
}
assertEquals("Expected identities not found " + copyOfExpectedNames,
- Collections.EMPTY_MAP, copyOfExpectedNames);
+ Collections.emptyMap(), copyOfExpectedNames);
}
@Test
import com.google.common.collect.Lists;
-import junit.framework.Assert;
-
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.config.api.IdentityAttributeRef;
List<ObjectName> testingDeps = proxy.getTestingDeps();
ObjectName testingDep = proxy.getTestingDep();
- Assert.assertEquals(TESTING_DEP_PREFIX, ObjectNameUtil.getInstanceName(testingDep));
+ assertEquals(TESTING_DEP_PREFIX, ObjectNameUtil.getInstanceName(testingDep));
assertTestingDeps(testingDeps, 4);
transaction.abortConfig();
}
private void assertTestingDeps(List<ObjectName> testingDeps, int i) {
- Assert.assertEquals(i, testingDeps.size());
+ assertEquals(i, testingDeps.size());
int c = 1;
for (ObjectName testingDep : testingDeps) {
- Assert.assertEquals(TESTING_DEP_PREFIX + Integer.toString(c++), ObjectNameUtil.getInstanceName(testingDep));
+ assertEquals(TESTING_DEP_PREFIX + Integer.toString(c++), ObjectNameUtil.getInstanceName(testingDep));
}
}
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
<configuration>
<excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/configuration\/initial\/</excludes>
</configuration>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
<configuration>
<excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/configuration\/initial\/</excludes>
</configuration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.jdt.launching.localJavaApplication">
-<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
-<listEntry value="/sal"/>
-</listAttribute>
-<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
-<listEntry value="4"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.debug.core.source_locator_id" value="org.eclipse.jdt.launching.sourceLocator.JavaSourceLookupDirector"/>
-<stringAttribute key="org.eclipse.debug.core.source_locator_memento" value="<?xml version="1.0" encoding="UTF-8" standalone="no"?> <sourceLookupDirector> <sourceContainers duplicates="false"> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;switchmanager.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;commons.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;configuration&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;configuration.implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;containermanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;flowprogrammer.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;forwarding.staticrouting.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;hosttracker&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;hosttracker.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;protocol_plugins.openflow&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;statistics.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;subnets.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;topology.northbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;containermanager.implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;devices.web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;flows.web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;samples.simpleforwarding&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;topology.web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;troubleshoot.web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;clustering.services&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;mactracker&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;sal&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;slicemanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;switchmanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;topologymanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;arphandler&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;clustering.services-implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;clustering.test&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;configuration.web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;forwarding.ipswitch&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;forwarding.staticrouting&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;forwardingrulesmanager.implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;forwardingrulesmanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;home.web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;monitor&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;northboundtest&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;onep.topology.southbound&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;openflowj&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;protocol_plugin.openflow&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;routing.dijkstra_implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;sal.implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;slicemanager.implementation&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;statisticsmanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;tifmgr&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;usermanager&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> <container memento="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#10;&lt;javaProject name=&quot;web&quot;/&gt;&#10;" typeId="org.eclipse.jdt.launching.sourceContainer.javaProject"/> </sourceContainers> </sourceLookupDirector> "/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<listAttribute key="org.eclipse.jdt.launching.CLASSPATH">
-<listEntry value="<?xml version="1.0" encoding="UTF-8" standalone="no"?> <runtimeClasspathEntry containerPath="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6" path="1" type="4"/> "/>
-<listEntry value="<?xml version="1.0" encoding="UTF-8" standalone="no"?> <runtimeClasspathEntry internalArchive="/distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight/lib/org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar" path="3" type="2"/> "/>
-<listEntry value="<?xml version="1.0" encoding="UTF-8" standalone="no"?> <runtimeClasspathEntry internalArchive="/distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar" path="3" type="2"/> "/>
-<listEntry value="<?xml version="1.0" encoding="UTF-8" standalone="no"?> <runtimeClasspathEntry internalArchive="/distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight/lib/org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar" path="3" type="2"/> "/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.CLASSPATH_PROVIDER" value="org.eclipse.m2e.launchconfig.classpathProvider"/>
-<booleanAttribute key="org.eclipse.jdt.launching.DEFAULT_CLASSPATH" value="false"/>
-<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.eclipse.equinox.launcher.Main"/>
-<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="-console -consoleLog"/>
-<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="sal"/>
-<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.m2e.launchconfig.sourcepathProvider"/>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Djava.io.tmpdir=${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}/work/tmp -Dosgi.install.area=${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage} -Dosgi.configuration.area=${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}/configuration -Dosgi.frameworkClassPath=file:${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar,file:${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}/lib/org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar,file:${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}/lib/org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar -Dosgi.framework=file:${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="clean install"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value="fastreassembly"/>
-<listAttribute key="M2_PROPERTIES">
-<listEntry value="fastreassembly.directory=${workspace_loc:/distribution.opendaylight/target/distribution.opendaylight-osgipackage/opendaylight/plugins}"/>
-</listAttribute>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${selected_resource_loc}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="clean install"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value="docs,integrationtests"/>
-<listAttribute key="M2_PROPERTIES"/>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/releasepom}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="install"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES"/>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="true"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/releasepom}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="clean install"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES">
-<listEntry value="skipTests=true"/>
-<listEntry value="skipIT=true"/>
-<listEntry value="enunciate.skip=true"/>
-</listAttribute>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/releasepom}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="clean install sonar:sonar"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES">
-<listEntry value="maven.test.skip=true"/>
-</listAttribute>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/releasepom}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="clean install"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES"/>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/releasepom}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?pde version="3.8"?><target name="opendaylight-local" sequenceNumber="6">
-<locations>
-<location path="${workspace_loc:/distribution.opendaylight/}/../p2site/target/repository" type="Directory"/>
-</locations>
-</target>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.pde.ui.EquinoxLauncher">
-<booleanAttribute key="append.args" value="true"/>
-<booleanAttribute key="automaticAdd" value="true"/>
-<booleanAttribute key="automaticValidate" value="false"/>
-<stringAttribute key="bad_container_name" value="/distribution.opendaylight/opendaylight-osgi-launche"/>
-<stringAttribute key="bootstrap" value=""/>
-<stringAttribute key="checked" value="[NONE]"/>
-<booleanAttribute key="clearConfig" value="true"/>
-<stringAttribute key="configLocation" value="${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/opendaylight-osgi-launcher-local"/>
-<booleanAttribute key="default" value="true"/>
-<booleanAttribute key="default_auto_start" value="true"/>
-<intAttribute key="default_start_level" value="4"/>
-<stringAttribute key="deselected_workspace_plugins" value="org.opendaylight.controller.clustering.stub,org.opendaylight.controller.protocol_plugins.stub"/>
-<booleanAttribute key="includeOptional" value="false"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
-<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="-os ${target.os} -ws ${target.ws} -arch ${target.arch} -nl ${target.nl} -consoleLog -console"/>
-<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.pde.ui.workbenchClasspathProvider"/>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Declipse.ignoreApp=true -Dosgi.noShutdown=true -Dorg.osgi.framework.system.packages.extra=sun.reflect,sun.reflect.misc,sun.misc -Dosgi.hook.configurators.include=org.eclipse.virgo.kernel.equinox.extensions.hooks.ExtensionsHookConfigurator -Dlogback.configurationFile=${project_loc:/distribution.opendaylight}/src/main/resources/configuration/logback.xml -Dorg.eclipse.gemini.web.tomcat.config.path=${project_loc:/distribution.opendaylight}/src/main/resources/configuration/tomcat-server.xml -Dosgi.frameworkClassPath=file:${project_loc:/distribution.opendaylight}/../p2site/target/repository/plugins/org.eclipse.equinox.launcher_1.3.0.v20120522-1813.jar,file:${project_loc:/distribution.opendaylight}/../p2site/target/repository/plugins/org.eclipse.virgo.kernel.equinox.extensions_3.6.0.RELEASE.jar,file:${project_loc:/distribution.opendaylight}/../p2site/target/repository/plugins/org.eclipse.osgi_3.8.1.v20120830-144521.jar"/>
-<stringAttribute key="pde.version" value="3.3"/>
-<booleanAttribute key="show_selected_only" value="false"/>
-<stringAttribute key="target_bundles" value="ch.qos.logback.classic@default:default,ch.qos.logback.core@default:default,javax.activation@default:default,javax.annotation@default:default,javax.el@default:default,javax.persistence@default:default,javax.servlet.jsp@default:default,javax.servlet@default:default,javax.xml.rpc@default:default,org.apache.commons.io@default:default,org.apache.felix.gogo.command@default:default,org.apache.felix.gogo.runtime@default:default,org.apache.felix.gogo.shell@default:default,org.eclipse.equinox.console@default:default,org.eclipse.equinox.ds@1:true,org.eclipse.equinox.launcher@default:default,org.eclipse.equinox.util@default:default,org.eclipse.osgi.services@default:default,org.eclipse.osgi@-1:true"/>
-<booleanAttribute key="tracing" value="false"/>
-<booleanAttribute key="useCustomFeatures" value="false"/>
-<booleanAttribute key="useDefaultConfigArea" value="true"/>
-<stringAttribute key="workspace_bundles" value="com.cisco.csdn.debugtools.osgidbg@default:default,org.opendaylight.controller.appauth@default:default,org.opendaylight.controller.arphandler@default:default,org.opendaylight.controller.bundlescanner.implementation@default:default,org.opendaylight.controller.bundlescanner@default:default,org.opendaylight.controller.clustering.services-implementation@default:default,org.opendaylight.controller.clustering.services@default:default,org.opendaylight.controller.clustering.test@default:default,org.opendaylight.controller.commons.northbound@default:default,org.opendaylight.controller.concepts@default:default,org.opendaylight.controller.configuration.implementation@default:default,org.opendaylight.controller.configuration@default:default,org.opendaylight.controller.connectionmanager.implementation@default:default,org.opendaylight.controller.connectionmanager@default:default,org.opendaylight.controller.containermanager.implementation@default:default,org.opendaylight.controller.containermanager.it.implementation@default:default,org.opendaylight.controller.containermanager.northbound@default:default,org.opendaylight.controller.containermanager@default:default,org.opendaylight.controller.devices.web@default:default,org.opendaylight.controller.flowprogrammer.northbound@default:default,org.opendaylight.controller.flows.web@default:default,org.opendaylight.controller.forwarding.staticrouting.northbound@default:default,org.opendaylight.controller.forwarding.staticrouting@default:default,org.opendaylight.controller.forwardingrulesmanager.implementation@default:default,org.opendaylight.controller.forwardingrulesmanager@default:default,org.opendaylight.controller.hosttracker.implementation@default:default,org.opendaylight.controller.hosttracker.northbound@default:default,org.opendaylight.controller.hosttracker@default:default,org.opendaylight.controller.hosttracker_new.implementation@default:default,org.opendaylight.controller.hosttracker_new@default:default,org.opendaylight.controller.logging.bridge@default:default,org.opendaylight.controller.model.flow-base@default:default,org.opendaylight.controller.model.flow-service@default:default,org.opendaylight.controller.model.flow-statistics@default:default,org.opendaylight.controller.model.inventory@default:default,org.opendaylight.controller.networkconfig.bridgedomain.northbound@default:default,org.opendaylight.controller.protocol_plugins.openflow@default:default,org.opendaylight.controller.routing.dijkstra_implementation@default:default,org.opendaylight.controller.sal-binding-api@default:default,org.opendaylight.controller.sal-common-util@default:default,org.opendaylight.controller.sal-common@default:default,org.opendaylight.controller.sal.connection.implementation@default:default,org.opendaylight.controller.sal.connection@default:default,org.opendaylight.controller.sal.implementation@default:default,org.opendaylight.controller.sal.networkconfiguration.implementation@default:default,org.opendaylight.controller.sal.networkconfiguration@default:default,org.opendaylight.controller.sal@default:default,org.opendaylight.controller.samples.loadbalancer.northbound@default:default,org.opendaylight.controller.samples.loadbalancer@default:default,org.opendaylight.controller.samples.sample-toaster-consumer@default:default,org.opendaylight.controller.samples.sample-toaster-provider@default:default,org.opendaylight.controller.samples.sample-toaster@default:default,org.opendaylight.controller.samples.simpleforwarding@default:default,org.opendaylight.controller.security@default:false,org.opendaylight.controller.statistics.northbound@default:default,org.opendaylight.controller.statisticsmanager.implementation@default:default,org.opendaylight.controller.statisticsmanager@default:default,org.opendaylight.controller.subnets.northbound@default:default,org.opendaylight.controller.switchmanager.implementation@default:default,org.opendaylight.controller.switchmanager.northbound@default:default,org.opendaylight.controller.switchmanager@default:default,org.opendaylight.controller.thirdparty.com.sun.jersey.jersey-servlet@default:default,org.opendaylight.controller.thirdparty.net.sf.jung2@default:default,org.opendaylight.controller.thirdparty.org.apache.catalina.filters.CorsFilter@default:false,org.opendaylight.controller.thirdparty.org.openflow.openflowj@default:default,org.opendaylight.controller.topology.northbound@default:default,org.opendaylight.controller.topology.web@default:default,org.opendaylight.controller.topologymanager@default:default,org.opendaylight.controller.troubleshoot.web@default:default,org.opendaylight.controller.usermanager.implementation@default:default,org.opendaylight.controller.usermanager@default:default,org.opendaylight.controller.web@default:default"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.pde.ui.EquinoxLauncher">
-<booleanAttribute key="append.args" value="true"/>
-<booleanAttribute key="automaticAdd" value="true"/>
-<booleanAttribute key="automaticValidate" value="false"/>
-<stringAttribute key="bad_container_name" value="/distribution.opendaylight/opendaylight-osgi-launche"/>
-<stringAttribute key="bootstrap" value=""/>
-<stringAttribute key="checked" value="[NONE]"/>
-<booleanAttribute key="clearConfig" value="true"/>
-<stringAttribute key="configLocation" value="${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/opendaylight-osgi-launcher"/>
-<booleanAttribute key="default" value="true"/>
-<booleanAttribute key="default_auto_start" value="true"/>
-<intAttribute key="default_start_level" value="4"/>
-<stringAttribute key="deselected_workspace_plugins" value="org.opendaylight.controller.clustering.stub,org.opendaylight.controller.protocol_plugins.stub"/>
-<booleanAttribute key="includeOptional" value="false"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.JRE_CONTAINER" value="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
-<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="-os ${target.os} -ws ${target.ws} -arch ${target.arch} -nl ${target.nl} -consoleLog -console"/>
-<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.pde.ui.workbenchClasspathProvider"/>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Declipse.ignoreApp=true -Dosgi.noShutdown=true -Dorg.osgi.framework.system.packages.extra=sun.reflect,sun.reflect.misc,sun.misc -Dosgi.hook.configurators.include=org.eclipse.virgo.kernel.equinox.extensions.hooks.ExtensionsHookConfigurator -Dlogback.configurationFile=${project_loc:/distribution.opendaylight}/src/main/resources/configuration/logback.xml -Dorg.eclipse.gemini.web.tomcat.config.path=${project_loc:/distribution.opendaylight}/src/main/resources/configuration/tomcat-server.xml -Dosgi.frameworkClassPath=file:${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/.bundle_pool/plugins/org.eclipse.equinox.launcher_1.3.0.v20120522-1813.jar,file:${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/.bundle_pool/plugins/org.eclipse.virgo.kernel.equinox.extensions_3.6.0.RELEASE.jar,file:${workspace_loc}/.metadata/.plugins/org.eclipse.pde.core/.bundle_pool/plugins/org.eclipse.osgi_3.8.1.v20120830-144521.jar"/>
-<stringAttribute key="pde.version" value="3.3"/>
-<booleanAttribute key="show_selected_only" value="false"/>
-<stringAttribute key="target_bundles" value="ch.qos.logback.classic@default:default,ch.qos.logback.core@default:default,javax.activation@default:default,javax.annotation@default:default,javax.el@default:default,javax.persistence@default:default,javax.servlet.jsp@default:default,javax.servlet@default:default,javax.xml.rpc@default:default,org.apache.commons.io@default:default,org.apache.felix.gogo.command@default:default,org.apache.felix.gogo.runtime@default:default,org.apache.felix.gogo.shell@default:default,org.eclipse.equinox.console@default:default,org.eclipse.equinox.ds@1:true,org.eclipse.equinox.launcher@default:default,org.eclipse.equinox.util@default:default,org.eclipse.osgi.services@default:default,org.eclipse.osgi@-1:true"/>
-<booleanAttribute key="tracing" value="false"/>
-<booleanAttribute key="useCustomFeatures" value="false"/>
-<booleanAttribute key="useDefaultConfigArea" value="true"/>
-<stringAttribute key="workspace_bundles" value="com.cisco.csdn.debugtools.osgidbg@default:default,org.opendaylight.controller.appauth@default:default,org.opendaylight.controller.arphandler@default:default,org.opendaylight.controller.bundlescanner.implementation@default:default,org.opendaylight.controller.bundlescanner@default:default,org.opendaylight.controller.clustering.services-implementation@default:default,org.opendaylight.controller.clustering.services@default:default,org.opendaylight.controller.clustering.test@default:default,org.opendaylight.controller.commons.northbound@default:default,org.opendaylight.controller.concepts@default:default,org.opendaylight.controller.configuration.implementation@default:default,org.opendaylight.controller.configuration@default:default,org.opendaylight.controller.connectionmanager.implementation@default:default,org.opendaylight.controller.connectionmanager@default:default,org.opendaylight.controller.containermanager.implementation@default:default,org.opendaylight.controller.containermanager.it.implementation@default:default,org.opendaylight.controller.containermanager.northbound@default:default,org.opendaylight.controller.containermanager@default:default,org.opendaylight.controller.devices.web@default:default,org.opendaylight.controller.flowprogrammer.northbound@default:default,org.opendaylight.controller.flows.web@default:default,org.opendaylight.controller.forwarding.staticrouting.northbound@default:default,org.opendaylight.controller.forwarding.staticrouting@default:default,org.opendaylight.controller.forwardingrulesmanager.implementation@default:default,org.opendaylight.controller.forwardingrulesmanager@default:default,org.opendaylight.controller.hosttracker.implementation@default:default,org.opendaylight.controller.hosttracker.northbound@default:default,org.opendaylight.controller.hosttracker@default:default,org.opendaylight.controller.hosttracker_new.implementation@default:default,org.opendaylight.controller.hosttracker_new@default:default,org.opendaylight.controller.logging.bridge@default:default,org.opendaylight.controller.model.flow-base@default:default,org.opendaylight.controller.model.flow-service@default:default,org.opendaylight.controller.model.flow-statistics@default:default,org.opendaylight.controller.model.inventory@default:default,org.opendaylight.controller.networkconfig.bridgedomain.northbound@default:default,org.opendaylight.controller.protocol_plugins.openflow@default:default,org.opendaylight.controller.routing.dijkstra_implementation@default:default,org.opendaylight.controller.sal-binding-api@default:default,org.opendaylight.controller.sal-common-util@default:default,org.opendaylight.controller.sal-common@default:default,org.opendaylight.controller.sal.connection.implementation@default:default,org.opendaylight.controller.sal.connection@default:default,org.opendaylight.controller.sal.implementation@default:default,org.opendaylight.controller.sal.networkconfiguration.implementation@default:default,org.opendaylight.controller.sal.networkconfiguration@default:default,org.opendaylight.controller.sal@default:default,org.opendaylight.controller.samples.loadbalancer.northbound@default:default,org.opendaylight.controller.samples.loadbalancer@default:default,org.opendaylight.controller.samples.sample-toaster-consumer@default:default,org.opendaylight.controller.samples.sample-toaster-provider@default:default,org.opendaylight.controller.samples.sample-toaster@default:default,org.opendaylight.controller.samples.simpleforwarding@default:default,org.opendaylight.controller.security@default:false,org.opendaylight.controller.statistics.northbound@default:default,org.opendaylight.controller.statisticsmanager.implementation@default:default,org.opendaylight.controller.statisticsmanager@default:default,org.opendaylight.controller.subnets.northbound@default:default,org.opendaylight.controller.switchmanager.implementation@default:default,org.opendaylight.controller.switchmanager.northbound@default:default,org.opendaylight.controller.switchmanager@default:default,org.opendaylight.controller.thirdparty.com.sun.jersey.jersey-servlet@default:default,org.opendaylight.controller.thirdparty.net.sf.jung2@default:default,org.opendaylight.controller.thirdparty.org.apache.catalina.filters.CorsFilter@default:false,org.opendaylight.controller.thirdparty.org.openflow.openflowj@default:default,org.opendaylight.controller.topology.northbound@default:default,org.opendaylight.controller.topology.web@default:default,org.opendaylight.controller.topologymanager@default:default,org.opendaylight.controller.troubleshoot.web@default:default,org.opendaylight.controller.usermanager.implementation@default:default,org.opendaylight.controller.usermanager@default:default,org.opendaylight.controller.web@default:default"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="sonar:sonar"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES"/>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${selected_resource_loc}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="sonar:sonar"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES"/>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<intAttribute key="M2_THREADS" value="1"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="false"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${working_set:<?xml version="1.0" encoding="UTF-8"?> <resources> <item path="/distribution.opendaylight" type="4"/> </resources>}"/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/releasepom}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?pde version="3.8"?><target name="opendaylight" sequenceNumber="4">
-<locations>
-<location includeAllPlatforms="false" includeConfigurePhase="true" includeMode="slicer" includeSource="true" type="InstallableUnit">
-<unit id="org.apache.jasper" version="7.0.32.v201211201952"/>
-<unit id="javax.servlet.jsp.jstl.impl" version="1.2.0.v201210211230"/>
-<unit id="org.springframework.context" version="3.1.3.RELEASE"/>
-<unit id="org.eclipse.gemini.web.extender" version="2.2.0.RELEASE"/>
-<unit id="jackson-jaxrs" version="1.9.8"/>
-<unit id="com.sun.jersey.jersey-server" version="1.17.0"/>
-<unit id="org.eclipse.jdt.core.compiler.batch" version="3.8.0.I20120518-2145"/>
-<unit id="org.springframework.web" version="3.1.3.RELEASE"/>
-<unit id="com.google.gson" version="2.1.0"/>
-<unit id="org.springframework.security.config" version="3.1.3.RELEASE"/>
-<unit id="org.springframework.transaction" version="3.1.3.RELEASE"/>
-<unit id="org.eclipse.virgo.util.math" version="3.6.0.RELEASE"/>
-<unit id="org.apache.el" version="7.0.32.v201211081135"/>
-<unit id="org.springframework.web.servlet" version="3.1.3.RELEASE"/>
-<unit id="org.apache.felix.dependencymanager.shell" version="3.0.1"/>
-<unit id="log4j.over.slf4j" version="1.7.2"/>
-<unit id="com.springsource.org.aopalliance" version="1.0.0"/>
-<unit id="javax.annotation" version="1.1.0.v201209060031"/>
-<unit id="jcl.over.slf4j" version="1.7.2"/>
-<unit id="javax.mail.glassfish" version="1.4.1.v201108011116"/>
-<unit id="slf4j.api" version="1.7.2"/>
-<unit id="org.springframework.expression" version="3.1.3.RELEASE"/>
-<unit id="jackson-mapper-asl" version="1.9.8"/>
-<unit id="org.eclipse.gemini.web.tomcat" version="2.2.0.RELEASE"/>
-<unit id="org.apache.felix.gogo.command" version="0.8.0.v201108120515"/>
-<unit id="org.springframework.asm" version="3.1.3.RELEASE"/>
-<unit id="org.eclipse.equinox.ds" version="1.4.0.v20120522-1841"/>
-<unit id="org.eclipse.equinox.console" version="1.0.0.v20120522-1841"/>
-<unit id="org.apache.catalina" version="7.0.32.v201211201336"/>
-<unit id="chameleon-mbeans" version="1.0.0"/>
-<unit id="javax.servlet.jsp.jstl" version="1.2.0.v201105211821"/>
-<unit id="org.apache.felix.gogo.runtime" version="0.8.0.v201108120515"/>
-<unit id="org.apache.tomcat.util" version="7.0.32.v201211201952"/>
-<unit id="jackson-core-asl" version="1.9.8"/>
-<unit id="javax.activation" version="1.1.0.v201211130549"/>
-<unit id="org.eclipse.gemini.web.core" version="2.2.0.RELEASE"/>
-<unit id="org.eclipse.virgo.util.osgi" version="3.6.0.RELEASE"/>
-<unit id="org.apache.commons.io" version="2.3.0"/>
-<unit id="javax.servlet.jsp" version="2.2.0.v201112011158"/>
-<unit id="org.apache.tomcat.api" version="7.0.32.v201211081135"/>
-<unit id="com.sun.jersey.core" version="1.17.0"/>
-<unit id="org.springframework.security.taglibs" version="3.1.3.RELEASE"/>
-<unit id="org.springframework.security.web" version="3.1.3.RELEASE"/>
-<unit id="com.sun.jersey.client" version="1.17.0"/>
-<unit id="org.springframework.aop" version="3.1.3.RELEASE"/>
-<unit id="org.apache.coyote" version="7.0.32.v201211201952"/>
-<unit id="org.eclipse.virgo.kernel.equinox.extensions" version="3.6.0.RELEASE"/>
-<unit id="org.eclipse.osgi.services" version="3.3.100.v20120522-1822"/>
-<unit id="org.eclipse.virgo.util.common" version="3.6.0.RELEASE"/>
-<unit id="org.eclipse.equinox.util" version="1.0.400.v20120522-2049"/>
-<unit id="org.springframework.core" version="3.1.3.RELEASE"/>
-<unit id="org.apache.commons.fileupload" version="1.2.2"/>
-<unit id="org.codehaus.jettison.jettison" version="1.3.3"/>
-<unit id="org.eclipse.virgo.util.io" version="3.6.0.RELEASE"/>
-<unit id="org.apache.felix.gogo.shell" version="0.8.0.v201110170705"/>
-<unit id="org.apache.commons.lang3" version="3.1.0"/>
-<unit id="org.eclipse.equinox.cm" version="1.0.400.v20120522-1841"/>
-<unit id="org.springframework.beans" version="3.1.3.RELEASE"/>
-<unit id="javax.servlet" version="3.0.0.v201112011016"/>
-<unit id="org.eclipse.equinox.launcher" version="1.3.0.v20120522-1813"/>
-<unit id="javax.persistence" version="2.0.4.v201112161009"/>
-<unit id="org.eclipse.osgi" version="3.8.1.v20120830-144521"/>
-<unit id="ch.qos.logback.core" version="1.0.9"/>
-<unit id="javax.ejb" version="3.1.1.v201204261316"/>
-<unit id="org.apache.catalina.ha" version="7.0.32.v201211201952"/>
-<unit id="org.springframework.context.support" version="3.1.3.RELEASE"/>
-<unit id="org.eclipse.virgo.util.osgi.manifest" version="3.6.0.RELEASE"/>
-<unit id="javax.xml.rpc" version="1.1.0.v201005080400"/>
-<unit id="ch.qos.logback.classic" version="1.0.9"/>
-<unit id="org.springframework.security.core" version="3.1.3.RELEASE"/>
-<unit id="javax.el" version="2.2.0.v201108011116"/>
-<unit id="org.apache.juli.extras" version="7.0.32.v201211081135"/>
-<unit id="org.jboss.spec.javax.transaction.jboss-transaction-api_1.1_spec" version="1.0.1.Final"/>
-<unit id="org.apache.catalina.tribes" version="7.0.32.v201211201952"/>
-<unit id="org.eclipse.virgo.util.parser.manifest" version="3.6.0.RELEASE"/>
-<unit id="org.apache.felix.dependencymanager" version="3.1.0"/>
-<repository location="http://nexus.opendaylight.org/content/repositories/controllerp2site/"/>
-</location>
-</locations>
-</target>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
- <artifactId>distribution.opendaylight</artifactId>
- <version>0.2.0-SNAPSHOT</version>
- <packaging>pom</packaging>
- <prerequisites>
- <maven>3.0</maven>
- </prerequisites>
-
- <dependencies>
- <dependency>
- <groupId>ch.qos.logback</groupId>
- <artifactId>logback-classic</artifactId>
- </dependency>
- <dependency>
- <groupId>ch.qos.logback</groupId>
- <artifactId>logback-core</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.core</groupId>
- <artifactId>jackson-annotations</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.core</groupId>
- <artifactId>jackson-core</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.core</groupId>
- <artifactId>jackson-databind</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.datatype</groupId>
- <artifactId>jackson-datatype-json-org</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.jaxrs</groupId>
- <artifactId>jackson-jaxrs-base</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.jaxrs</groupId>
- <artifactId>jackson-jaxrs-json-provider</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.fasterxml.jackson.module</groupId>
- <artifactId>jackson-module-jaxb-annotations</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.google.code.gson</groupId>
- <artifactId>gson</artifactId>
- </dependency>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-client</artifactId>
- </dependency>
- <!-- Jersey for JAXRS -->
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-core</artifactId>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-server</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-codec</groupId>
- <artifactId>commons-codec</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-fileupload</groupId>
- <artifactId>commons-fileupload</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-io</groupId>
- <artifactId>commons-io</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-net</groupId>
- <artifactId>commons-net</artifactId>
- </dependency>
- <dependency>
- <groupId>eclipselink</groupId>
- <artifactId>javax.persistence</artifactId>
- </dependency>
- <dependency>
- <groupId>eclipselink</groupId>
- <artifactId>javax.resource</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>javax.servlet</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>javax.servlet.jsp</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.apache.felix.gogo.command</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.apache.felix.gogo.runtime</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.apache.felix.gogo.shell</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.equinox.cm</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.equinox.console</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.equinox.ds</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.equinox.launcher</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.equinox.util</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.osgi</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.osgi.services</artifactId>
- </dependency>
- <!-- Gemini Web -->
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.gemini.web.core</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.gemini.web.extender</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.gemini.web.tomcat</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.kernel.equinox.extensions</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.util.common</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.util.io</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.util.math</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.util.osgi</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.util.osgi.manifest</artifactId>
- </dependency>
- <dependency>
- <groupId>geminiweb</groupId>
- <artifactId>org.eclipse.virgo.util.parser.manifest</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-buffer</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-codec</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-codec-http</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-common</artifactId>
- </dependency>
-
- <!--Netty-->
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-handler</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-transport</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.activation</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.annotation</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.ejb</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.el</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.mail.glassfish</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.servlet.jsp.jstl</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.servlet.jsp.jstl.impl</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>javax.xml.rpc</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.catalina</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.catalina.ha</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.catalina.tribes</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.coyote</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.el</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.jasper</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.juli.extras</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.tomcat.api</artifactId>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.tomcat.util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.aopalliance</groupId>
- <artifactId>com.springsource.org.aopalliance</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.commons</groupId>
- <artifactId>commons-lang3</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.dependencymanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.dependencymanager.shell</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.fileinstall</artifactId>
- </dependency>
- <!-- felix webconsole -->
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.webconsole</artifactId>
- <classifier>all</classifier>
- </dependency>
-
- <dependency>
- <groupId>org.codehaus.jettison</groupId>
- <artifactId>jettison</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.eclipse.equinox.http</groupId>
- <artifactId>servlet</artifactId>
- </dependency>
- <dependency>
- <groupId>org.eclipse.persistence</groupId>
- <artifactId>org.eclipse.persistence.antlr</artifactId>
- </dependency>
- <dependency>
- <groupId>org.eclipse.persistence</groupId>
- <artifactId>org.eclipse.persistence.core</artifactId>
- </dependency>
- <dependency>
- <groupId>org.eclipse.persistence</groupId>
- <artifactId>org.eclipse.persistence.moxy</artifactId>
- </dependency>
- <dependency>
- <groupId>org.javassist</groupId>
- <artifactId>javassist</artifactId>
- </dependency>
- <dependency>
- <groupId>org.jboss.spec.javax.transaction</groupId>
- <artifactId>jboss-transaction-api_1.1_spec</artifactId>
- </dependency>
- <dependency>
- <groupId>org.jolokia</groupId>
- <artifactId>jolokia-osgi</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.json</groupId>
- <artifactId>json</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>appauth</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>arphandler</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>bundlescanner</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>bundlescanner.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services-implementation</artifactId>
- </dependency>
-
- <!-- testing dependencies I'm pretty sure we should trim -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.test</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.httpclient</artifactId>
- </dependency>
-
- <!-- Northbound bundles -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>connectionmanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>connectionmanager.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>connectionmanager.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>containermanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>containermanager.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>containermanager.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>controllermanager.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>devices.web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>flowprogrammer.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>flows.web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwarding.staticrouting</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwarding.staticrouting.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwardingrulesmanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwardingrulesmanager.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>httpservice-bridge</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>jolokia-bridge</artifactId>
- </dependency>
- <!-- Debug and logging -->
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>logging.bridge</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>networkconfig.bridgedomain.northbound</artifactId>
- </dependency>
-
- <!-- Neutron -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>networkconfig.neutron</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>networkconfig.neutron.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>networkconfig.neutron.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>osgi-brandfragment.web</artifactId>
- </dependency>
-
- <!-- Southbound bundles -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>protocol_plugins.openflow</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>routing.dijkstra_implementation</artifactId>
- </dependency>
-
- <!-- SAL bundles -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- </dependency>
-
- <!-- SAL Extension bundles -->
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal.connection</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal.connection.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal.networkconfiguration</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal.networkconfiguration.implementation</artifactId>
- </dependency>
-
- <!-- samples -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>samples.loadbalancer</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>samples.loadbalancer.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>samples.simpleforwarding</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>security</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>statistics.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>statisticsmanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>statisticsmanager.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>subnets.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topology.northbound</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topology.web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topologymanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>troubleshoot.web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>usermanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>usermanager.implementation</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>usermanager.northbound</artifactId>
- </dependency>
-
- <!-- Web bundles -->
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>com.sun.jersey.jersey-servlet</artifactId>
- </dependency>
-
- <!-- Third parties from opendaylight released -->
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>net.sf.jung2</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>org.openflow.openflowj</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.ow2.asm</groupId>
- <artifactId>asm-all</artifactId>
- </dependency>
- <!-- Visual VM hook -->
- <dependency>
- <groupId>org.ow2.chameleon.management</groupId>
- <artifactId>chameleon-mbeans</artifactId>
- </dependency>
-
- <!-- Third party depedencies -->
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>jcl-over-slf4j</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>log4j-over-slf4j</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.aop</artifactId>
- </dependency>
- <!-- Add Pax Exam -->
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.asm</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.beans</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.context</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.context.support</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.core</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.expression</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.transaction</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework</groupId>
- <artifactId>org.springframework.web.servlet</artifactId>
- </dependency>
- <!-- Spring security -->
- <dependency>
- <groupId>org.springframework.security</groupId>
- <artifactId>spring-security-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework.security</groupId>
- <artifactId>spring-security-core</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework.security</groupId>
- <artifactId>spring-security-taglibs</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework.security</groupId>
- <artifactId>spring-security-web</artifactId>
- </dependency>
- <dependency>
- <groupId>virgomirror</groupId>
- <artifactId>org.eclipse.jdt.core.compiler.batch</artifactId>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-dependency-plugin</artifactId>
- <version>2.8</version>
- <executions>
- <execution>
- <id>unpack-provided-configs</id>
- <goals>
- <goal>unpack-dependencies</goal>
- </goals>
- <phase>generate-resources</phase>
- <configuration>
- <outputDirectory>${project.build.directory}/configuration</outputDirectory>
- <includeArtifactIds>sal-rest-connector-config,config-netty-config,md-sal-config,netconf-config,toaster-config,netconf-connector-config,sal-clustering-config</includeArtifactIds>
- <includes>**\/*.xml,**/*.conf</includes>
- <excludeTransitive>true</excludeTransitive>
- <ignorePermissions>false</ignorePermissions>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <artifactId>maven-assembly-plugin</artifactId>
- <version>2.3</version>
- <executions>
- <execution>
- <id>distro-assembly</id>
- <goals>
- <goal>single</goal>
- </goals>
- <phase>package</phase>
- <configuration>
- <descriptors>
- <descriptor>src/assemble/bin.xml</descriptor>
- </descriptors>
- <finalName>${project.artifactId}</finalName>
- </configuration>
- </execution>
- </executions>
- </plugin>
-
- <!--Make checkstyle ignore initial xml configuration files by overriding its configuration from parent-->
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- <version>${checkstyle.version}</version>
- <configuration>
- <excludes>**\/target\/,**\/bin\/,**\/target-ide\/,**\/configuration\/initial\/</excludes>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>buildnumber-maven-plugin</artifactId>
- <version>1.2</version>
- <configuration>
- <doCheck>false</doCheck>
- <doUpdate>false</doUpdate>
- <revisionOnScmFailure>VersionUnknown</revisionOnScmFailure>
- </configuration>
- <executions>
- <execution>
- <goals>
- <goal>create</goal>
- </goals>
- <phase>validate</phase>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- </scm>
-
- <profiles>
- <profile>
- <id>notduringrelease</id>
- <activation>
- <property>
- <name>!DOINGRELEASE</name>
- </property>
- </activation>
- <dependencies>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>ietf-netconf-monitoring</artifactId>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>ietf-netconf-monitoring-extension</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <version>2.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.servicemix.bundles</groupId>
- <artifactId>org.apache.servicemix.bundles.xerces</artifactId>
- <version>2.11.0_1</version>
- </dependency>
- <dependency>
- <groupId>org.bouncycastle</groupId>
- <artifactId>bcpkix-jdk15on</artifactId>
- </dependency>
- <dependency>
- <groupId>org.bouncycastle</groupId>
- <artifactId>bcprov-jdk15on</artifactId>
- </dependency>
- <dependency>
- <groupId>org.eclipse.birt.runtime.3_7_1</groupId>
- <artifactId>org.apache.xml.resolver</artifactId>
- <version>1.2.0</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>concepts</artifactId>
- </dependency>
-
- <!-- config-->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-manager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netconf-connector</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-directory-xml-adapter</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-file-xml-adapter</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>filter-valve</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>logback-config</artifactId>
- </dependency>
-
- <!-- Netconf -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-client</artifactId>
- </dependency>
-
- <!--Netconf config-->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-config-dispatcher</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-mapping-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-monitoring</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-netty-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-ssh</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-auth</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-usermanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-tcp</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-config-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-event-executor-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-threadgroup-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-timer-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>protocol-framework</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-broker-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-broker-impl</artifactId>
- </dependency>
- <!-- md-sal -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-compatibility</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-connector-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-core-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-core-spi</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-inmemory-datastore</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-netconf-connector</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-remote</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-rest-connector</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-rest-connector-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netty-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>md-sal-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-connector-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>toaster-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-rest-docgen</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-restconf-broker</artifactId>
- </dependency>
-
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>shutdown-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>shutdown-impl</artifactId>
- </dependency>
-
- <!-- threadpool -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>yang-jmx-generator</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.md</groupId>
- <artifactId>forwardingrules-manager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.md</groupId>
- <artifactId>inventory-manager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.md</groupId>
- <artifactId>statistics-manager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.md</groupId>
- <artifactId>topology-lldp-discovery</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>liblldp</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.md</groupId>
- <artifactId>topology-manager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-base</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-service</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-statistics</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-topology</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- </dependency>
-
- <!-- toaster example I'm pretty sure we should trim -->
- <dependency>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>sample-toaster</artifactId>
- <version>${mdsal.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>sample-toaster-consumer</artifactId>
- <version>${mdsal.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>sample-toaster-provider</artifactId>
- <version>${mdsal.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.sshd</groupId>
- <artifactId>sshd-core</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-data-codec</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-spi</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-model-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-type-provider</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>concepts</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>object-cache-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>object-cache-guava</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>restconf-client-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>restconf-client-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>util</artifactId>
- </dependency>
- <!-- yangtools dependencies I'm pretty sure we can trim -->
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-util</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-parser-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-parser-impl</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-codec-gson</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-composite-node</artifactId>
- </dependency>
- <!-- yang model dependencies -->
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-inet-types</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-restconf</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-topology</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types-20130715</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>opendaylight-l2-types</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>yang-ext</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>antlr4-runtime-osgi-nohead</artifactId>
- <version>4.0</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>xtend-lib-osgi</artifactId>
- <version>2.4.3</version>
- </dependency>
- <dependency>
- <groupId>org.openexi</groupId>
- <artifactId>nagasena</artifactId>
- </dependency>
- <dependency>
- <groupId>org.openexi</groupId>
- <artifactId>nagasena-rta</artifactId>
- </dependency>
- <dependency>
- <groupId>org.zeromq</groupId>
- <artifactId>jeromq</artifactId>
- <version>0.3.1</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-clustering-config</artifactId>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-servlets</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-client</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-continuation</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-util</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-server</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-io</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-http</artifactId>
- <version>8.1.14.v20131031</version>
- </dependency>
- </dependencies>
- </profile>
- <profile>
- <id>integrationtests</id>
- <activation>
- <activeByDefault>false</activeByDefault>
- </activation>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sanitytest</artifactId>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-dependency-plugin</artifactId>
- <version>2.8</version>
- <configuration>
- <artifactItems>
- <artifactItem>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sanitytest</artifactId>
- <type>jar</type>
- </artifactItem>
- </artifactItems>
- </configuration>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sanitytest</artifactId>
- <version>${sanitytest.version}</version>
- </dependency>
- </dependencies>
- <executions>
- <execution>
- <id>copy</id>
- <goals>
- <goal>copy</goal>
- </goals>
- <phase>package</phase>
- </execution>
- <execution>
- <id>unpack-provided-configs</id>
- <goals>
- <goal>unpack-dependencies</goal>
- </goals>
- <phase>generate-resources</phase>
- <configuration>
- <outputDirectory>${project.build.directory}/configuration</outputDirectory>
- <includeArtifactIds>sal-rest-connector-config,config-netty-config,md-sal-config,netconf-config,toaster-config,netconf-connector-config,sal-clustering-config</includeArtifactIds>
- <includes>**\/*.xml,**/*.conf</includes>
- <excludeTransitive>true</excludeTransitive>
- <ignorePermissions>false</ignorePermissions>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>exec-maven-plugin</artifactId>
- <version>1.2.1</version>
- <configuration>
- <executable>${java.home}/bin/java</executable>
- <arguments>
- <argument>-cp</argument>
- <argument>./target/dependency/*</argument>
- <argument>org.opendaylight.controller.distribution.Sanity</argument>
- </arguments>
- <environmentVariables>
- <JAVA_HOME>${java.home}</JAVA_HOME>
- </environmentVariables>
- </configuration>
- <executions>
- <execution>
- <id>sanity-test</id>
- <goals>
- <goal>exec</goal>
- </goals>
- <phase>package</phase>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </profile>
- <profile>
- <id>docs</id>
- <activation>
- <activeByDefault>false</activeByDefault>
- </activation>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>swagger-ui</artifactId>
- <version>0.1.0-SNAPSHOT</version>
- </dependency>
- </dependencies>
- </profile>
- </profiles>
-</project>
+++ /dev/null
-rem Inject the sanitytest jar as a controller plugin
-copy .\target\dependency\sanitytest*.jar .\target\distribution.opendaylight-osgipackage\opendaylight\plugins
-
-rem Store the current working directory in a variable so that we can get back to it later
-set cwd=%cd%
-
-rem Switch to the distribution folder
-cd .\target\distribution.opendaylight-osgipackage\opendaylight
-
-rem Run the controller
-cmd.exe /c run.bat
-
-rem Store the exit value of the controller in a variable
-set success=%ERRORLEVEL%
-
-rem Switch back to the directory from which this script was invoked
-cd %cwd%
-
-rem Remove the sanitytest jar from the plugins directory
-del .\target\distribution.opendaylight-osgipackage\opendaylight\plugins\sanitytest*.jar
-
-rem Exit using the exit code that we had captured earlier after running the controller
-exit /b %SUCCESS%
\ No newline at end of file
+++ /dev/null
-# Inject the sanitytest jar as a controller plugin
-cp ./target/dependency/sanitytest*.jar ./target/distribution.opendaylight-osgipackage/opendaylight/plugins
-
-# Store the current working directory in a variable so that we can get back to it later
-cwd=`pwd`
-
-# Switch to the distribution folder
-cd ./target/distribution.opendaylight-osgipackage/opendaylight/
-
-# Run the controller
-./run.sh
-
-# Store the exit value of the controller in a variable
-success=`echo $?`
-
-# Switch back to the directory from which this script was invoked
-cd $cwd
-
-# Remove the sanitytest jar from the plugins directory
-rm ./target/distribution.opendaylight-osgipackage/opendaylight/plugins/sanitytest*.jar
-
-# Exit using the exit code that we had captured earlier after running the controller
-exit $success
-
+++ /dev/null
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
- <id>osgipackage</id>
- <formats>
- <format>dir</format>
- <format>zip</format>
- </formats>
- <includeBaseDirectory>false</includeBaseDirectory>
- <dependencySets>
- <dependencySet>
- <outputDirectory>opendaylight/plugins</outputDirectory>
- <excludes>
- <exclude>equinoxSDK381:org.eclipse.osgi</exclude>
- <exclude>equinoxSDK381:org.eclipse.equinox.console</exclude>
- <exclude>equinoxSDK381:org.eclipse.equinox.launcher</exclude>
- <exclude>equinoxSDK381:org.eclipse.equinox.ds</exclude>
- <exclude>equinoxSDK381:org.eclipse.equinox.util</exclude>
- <exclude>equinoxSDK381:org.eclipse.osgi.services</exclude>
- <exclude>virgomirror:org.eclipse.jdt.core.compiler.batch</exclude>
- <exclude>org.apache.felix:org.apache.felix.fileinstall</exclude>
- <exclude>geminiweb:org.eclipse.virgo.kernel.equinox.extensions</exclude>
- <exclude>org.slf4j:slf4j-api</exclude>
- <exclude>ch.qos.logback:logback-core</exclude>
- <exclude>ch.qos.logback:logback-classic</exclude>
- <exclude>com.sun.jersey:jersey-core</exclude>
- <exclude>com.sun.jersey:jersey-server</exclude>
- <exclude>org.opendaylight.controller:logging.bridge</exclude>
- <exclude>org.opendaylight.controller:sanitytest</exclude>
- </excludes>
- <outputFileNameMapping>
- ${artifact.groupId}.${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension}
- </outputFileNameMapping>
- <unpack>false</unpack>
- <scope>runtime</scope>
- <useTransitiveDependencies>false</useTransitiveDependencies>
- </dependencySet>
- <dependencySet>
- <outputDirectory>opendaylight/lib</outputDirectory>
- <includes>
- <include>equinoxSDK381:org.eclipse.osgi</include>
- <include>equinoxSDK381:org.eclipse.equinox.console</include>
- <include>equinoxSDK381:org.eclipse.equinox.launcher</include>
- <include>equinoxSDK381:org.eclipse.equinox.ds</include>
- <include>equinoxSDK381:org.eclipse.equinox.util</include>
- <include>equinoxSDK381:org.eclipse.osgi.services</include>
- <include>virgomirror:org.eclipse.jdt.core.compiler.batch</include>
- <include>org.apache.felix:org.apache.felix.fileinstall</include>
- <include>geminiweb:org.eclipse.virgo.kernel.equinox.extensions</include>
- <include>org.slf4j:slf4j-api</include>
- <include>ch.qos.logback:logback-core</include>
- <include>ch.qos.logback:logback-classic</include>
- <include>com.sun.jersey:jersey-core</include>
- <include>com.sun.jersey:jersey-server</include>
- <include>org.opendaylight.controller:logging.bridge</include>
- </includes>
- <useTransitiveDependencies>false</useTransitiveDependencies>
- <outputFileNameMapping>
- ${artifact.artifactId}-${artifact.version}${dashClassifier?}.${artifact.extension}
- </outputFileNameMapping>
- <unpack>false</unpack>
- <scope>runtime</scope>
- </dependencySet>
- </dependencySets>
- <fileSets>
- <fileSet>
- <directory>
- src/main/resources/
- </directory>
- <excludes>
- <exclude>version.properties</exclude>
- <exclude>configuration/config.ini</exclude>
- </excludes>
- <outputDirectory>
- opendaylight/
- </outputDirectory>
- </fileSet>
- <fileSet>
- <directory>${project.build.directory}/configuration/initial</directory>
- <outputDirectory>/opendaylight/configuration/initial</outputDirectory>
- <excludes>
- <exclude>**/META-INF/**</exclude>
- </excludes>
- </fileSet>
- </fileSets>
- <files>
- <file>
- <source>src/main/resources/version.properties</source>
- <outputDirectory>opendaylight</outputDirectory>
- <filtered>true</filtered>
- </file>
- <file>
- <source>src/main/resources/configuration/config.ini</source>
- <outputDirectory>opendaylight/configuration</outputDirectory>
- <filtered>true</filtered>
- </file>
- </files>
-</assembly>
+++ /dev/null
-osgi.bundles=\
- reference\:file\:../lib/org.apache.felix.fileinstall-3.1.6.jar@1:start,\
- reference\:file\:../lib/org.eclipse.jdt.core.compiler.batch-3.8.0.I20120518-2145.jar@1:start,\
- reference\:file\:../lib/org.eclipse.equinox.ds-1.4.0.v20120522-1841.jar@2:start,\
- reference\:file\:../lib/org.eclipse.equinox.util-1.0.400.v20120522-2049.jar@2:start,\
- reference\:file\:../lib/org.eclipse.osgi.services-3.3.100.v20120522-1822@2:start,\
- reference\:file\:../lib/org.eclipse.equinox.console-1.0.0.v20120522-1841.jar@start,\
- reference\:file\:../lib/slf4j-api-1.7.2.jar@1:start,\
- reference\:file\:../lib/logback-classic-1.0.9.jar@1:start,\
- reference\:file\:../lib/logback-core-1.0.9.jar@1:start,\
- reference\:file\:../lib/logging.bridge-${logging.bridge.version}@1:start,\
- reference\:file\:../lib/jersey-core-1.17.jar@2:start,\
- reference\:file\:../lib/jersey-server-1.17.jar@2:start
-
-# Netconf startup configuration
-
-# Netconf tcp address:port is optional
-#netconf.tcp.address=127.0.0.1
-#netconf.tcp.port=8383
-
-# Netconf tcp address:port is optional
-netconf.ssh.address=0.0.0.0
-netconf.ssh.port=1830
-netconf.ssh.pk.path = ./configuration/RSA.pk
-netconf.ssh.default.user = netconf
-netconf.ssh.default.password = netconf
-
-
-netconf.config.persister.active=1,2
-# read startup configuration
-netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.directory.xml.XmlDirectoryStorageAdapter
-netconf.config.persister.1.properties.directoryStorage=configuration/initial/
-# include only xml files, files with other extensions will be skipped, multiple extensions are permitted e.g. netconf.config.persister.1.properties.includeExtensions=xml,cfg,config
-netconf.config.persister.1.properties.includeExtensions=xml
-netconf.config.persister.1.readonly=true
-
-netconf.config.persister.2.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter
-netconf.config.persister.2.properties.fileStorage=configuration/current/controller.currentconfig.xml
-netconf.config.persister.2.properties.numberOfBackups=1
-
-# Set Default start level for framework
-osgi.bundles.defaultStartLevel=4
-# Extra packages to import from the boot class loader
-org.osgi.framework.system.packages.extra=sun.reflect,sun.reflect.misc,sun.misc,sun.nio.ch
-# This is not Eclipse App
-eclipse.ignoreApp=true
-# Don't shutdown equinox if the eclipse App has ended,
-# which is our case because we are not running any eclipse application
-osgi.noShutdown=true
-# Clean any cached data on restart of the framework
-osgi.clean=true
-
-# https://bugs.eclipse.org/bugs/show_bug.cgi?id=325578
-# Extend the framework to avoid the resources to be presented with
-# a URL of type bundleresource: but to be presented as file:
-osgi.hook.configurators.include=org.eclipse.virgo.kernel.equinox.extensions.hooks.ExtensionsHookConfigurator
-
-# Directory from where the fileinstall will monitor for new bundles
-felix.fileinstall.dir=./plugins
-# Immediately learn new bundles at startup
-felix.fileinstall.noInitialDelay=true
-# Auto start the bundles at level 4
-felix.fileinstall.start.level=4
-# Avoid to auto-install following bundles, that means those need
-# to be started manually or in other way like osgi.bundles
-felix.fileinstall.filter=^(?!org.apache.felix.fileinstall).*
-
-# logback configuration
-logback.configurationFile=configuration/logback.xml
-
-# Container configuration
-container.profile = Container
-
-# Connection manager configuration
-connection.scheme = ANY_CONTROLLER_ONE_MASTER
-
-# Embedded Tomcat configuration File
-org.eclipse.gemini.web.tomcat.config.path=configuration/tomcat-server.xml
-org.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true
-
-# Open Flow related system parameters
-# TCP port on which the controller is listening (default 6633)
-# of.listenPort=6633
-# IP address of the controller (default: wild card)
-# of.address = 127.0.0.1
-# The time (in milliseconds) the controller will wait for a response after sending a Barrier Request or a Statistic Request message (default 2000 msec)
-# of.messageResponseTimer=2000
-# The switch liveness timeout value (default 60500 msec)
-# of.switchLivenessTimeout=60500
-# The size of the queue holding pending statistics requests (default 64). For large networks of n switches, it is recommended to set the queue size to n
-# of.statsQueueSize = 64
-# The flow statistics polling interval in second (default 10 sec)
-# of.flowStatsPollInterval=10
-# The port statistics polling interval in second (default 5 sec)
-# of.portStatsPollInterval=5
-# The description statistics polling interval in second (default 60 sec)
-# of.descStatsPollInterval=60
-# The table statistics polling interval in second (default 10 sec)
-# of.tableStatsPollInterval=10
-# The maximum number of asynchronous messages can be sent before sending a Barrier Request (default 100)
-# of.barrierMessagePriorCount=100
-# The interval which determines how often the discovery packets should be sent (default 300 sec)
-# of.discoveryInterval=300
-# The timeout multiple of discovery interval
-# of.discoveryTimeoutMultiple=2
-# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 30 sec)
-# of.discoveryThreshold=30
-# The maximum number of ports handled in one discovery batch (default 512)
-# of.discoveryBatchMaxPorts=512
-
-# OVSDB configuration
-# ovsdb plugin supports both active and passive connections. It listens on port 6640 by default for Active connections.
-ovsdb.listenPort=6640
-
-# ovsdb creates Openflow nodes/bridges. This configuration configures the bridge's Openflow version.
-# default Openflow version = 1.3, we also support 1.0.
-ovsdb.of.version=1.3
-
-# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is
-# disabled by default.
-# ovsdb.l3.fwd.enabled=yes
-
-# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
-# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
-# specific arp/neighDiscovery lookup.
-# ovsdb.l3gateway.mac=00:00:5E:00:02:01
-
-# TLS configuration
-# To enable TLS, set secureChannelEnabled=true and specify the location of controller Java KeyStore and TrustStore files.
-# The Java KeyStore contains controller's private key and certificate. The Java TrustStore contains the trusted certificate
-# entries, including switches' Certification Authority (CA) certificates. For example,
-# secureChannelEnabled=true
-# controllerKeyStore=./configuration/ctlKeyStore
-# controllerKeyStorePassword=xxxxxxxx (this password should match the password used for KeyStore generation and at least 6 characters)
-# controllerTrustStore=./configuration/ctlTrustStore
-# controllerTrustStorePassword=xxxxxxxx (this password should match the password used for TrustStore generation and at least 6 characters)
-
-secureChannelEnabled=false
-controllerKeyStore=
-controllerKeyStorePassword=
-controllerTrustStore=
-controllerTrustStorePassword=
-
-# User Manager configurations
-enableStrongPasswordCheck = false
-
-#Jolokia configurations
-org.jolokia.listenForHttpService=false
-
-# Logging configuration for Tomcat-JUL logging
-java.util.logging.config.file=configuration/tomcat-logging.properties
-
-#Hosttracker hostsdb key scheme setting
-hosttracker.keyscheme=IP
-
-# LISP Flow Mapping configuration
-# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
-lisp.mappingOverwrite = true
-# Enable the Solicit-Map-Request (SMR) mechanism
-lisp.smr = false
-
-#RESTConf websocket listen port (default is 8181)
-restconf.websocket.port=8181
+++ /dev/null
-<Context crossContext="true" sessionCookiePath="/" useHttpOnly="false"/>
+++ /dev/null
-<!--
- ~ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- ~
- ~ This program and the accompanying materials are made available under the
- ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
- ~ and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-
-<Host>
- <!-- Filters are allowed here, only serving as a template -->
- <filter-template>
- <filter-name>CorsFilter</filter-name>
- <filter-class>org.apache.catalina.filters.CorsFilter</filter-class>
- <init-param>
- <param-name>cors.allowed.origins</param-name>
- <param-value>*</param-value>
- </init-param>
- <init-param>
- <param-name>cors.allowed.methods</param-name>
- <param-value>GET,POST,HEAD,OPTIONS,PUT,DELETE</param-value>
- </init-param>
- <init-param>
- <param-name>cors.allowed.headers</param-name>
- <param-value>Content-Type,X-Requested-With,accept,authorization,
- origin,Origin,Access-Control-Request-Method,Access-Control-Request-Headers
- </param-value>
- </init-param>
- <init-param>
- <param-name>cors.exposed.headers</param-name>
- <param-value>Access-Control-Allow-Origin,Access-Control-Allow-Credentials</param-value>
- </init-param>
- <init-param>
- <param-name>cors.support.credentials</param-name>
- <param-value>true</param-value>
- </init-param>
- <init-param>
- <param-name>cors.preflight.maxage</param-name>
- <param-value>10</param-value>
- </init-param>
- </filter-template>
-
- <Context path="/restconf">
- <filter>
- <filter-name>CorsFilter</filter-name>
- <!-- init params can be added/overriden if template is used -->
- </filter>
- <!-- references to templates without <filter> declaration are not allowed -->
- <filter-mapping>
- <filter-name>CorsFilter</filter-name>
- <url-pattern>/*</url-pattern>
- </filter-mapping>
- </Context>
-
- <Context path="/apidoc">
- <filter>
- <filter-name>CorsFilter</filter-name>
- <!-- init params can be added/overriden if template is used -->
- </filter>
- <!-- references to templates without <filter> declaration are not allowed -->
- <filter-mapping>
- <filter-name>CorsFilter</filter-name>
- <url-pattern>/*</url-pattern>
- </filter-mapping>
- </Context>
-
-
-</Host>
+++ /dev/null
- <configuration scan="true">
-
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
- <encoder>
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} %X{akkaSource} - %msg%n</pattern>
- </encoder>
- </appender>
- <appender name="opendaylight.log" class="ch.qos.logback.core.rolling.RollingFileAppender">
- <file>logs/opendaylight.log</file>
-
- <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
- <fileNamePattern>logs/opendaylight.%d.log.zip</fileNamePattern>
- <maxHistory>1</maxHistory>
- </rollingPolicy>
-
- <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
- <maxFileSize>10MB</maxFileSize>
- </triggeringPolicy>
-
- <encoder>
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{35} - %msg%n</pattern>
- </encoder>
- </appender>
- <appender name="audit-file" class="ch.qos.logback.core.FileAppender">
- <file>logs/audit.log</file>
- <append>true</append>
- <encoder>
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} %msg %n</pattern>
- </encoder>
- </appender>
- <root level="error">
- <appender-ref ref="STDOUT" />
- <appender-ref ref="opendaylight.log" />
- </root>
-
- <!-- Base log level -->
- <logger name="org.opendaylight" level="INFO"/>
-
-
- <!-- Controller log level -->
- <logger name="org.opendaylight.controller" level="INFO"/>
-
- <!-- OSGi logging bridge -->
- <logger name="org.opendaylight.controller.logging.bridge" level="WARN"/>
- <logger name="org.opendaylight.controller.logging.bridge.internal" level="WARN"/>
-
- <!-- Netty -->
- <logger name="io.netty" level="WARN"/>
-
- <!-- Openflow Protocol Plugin -->
- <logger name="org.opendaylight.controller.protocol_plugin.openflow" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.internal.DiscoveryService" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.internal.InventoryService" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.internal.InventoryServiceShim" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.internal.TopologyServices" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.internal.TopologyServiceShim" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.core.internal.Controller" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.core.internal.SwitchHandler" level="INFO"/>
- <logger name="org.opendaylight.controller.protocol_plugin.openflow.core.internal.SwitchIOSecureService" level="INFO"/>
- <!-- SAL -->
- <logger name="org.opendaylight.controller.sal" level="INFO"/>
- <logger name="org.opendaylight.controller.sal.implementation" level="INFO"/>
- <logger name="org.opendaylight.controller.sal.implementation.internal.Inventory" level="INFO"/>
- <logger name="org.opendaylight.controller.sal.implementation.internal.Topology" level="INFO"/>
- <!-- remoterpc router and remoterpc routing table -->
- <logger name="org.opendaylight.controller.sal.connector.remoterpc" level="INFO" />
- <!-- Functional Modules -->
- <logger name="org.opendaylight.controller.arphandler" level="INFO"/>
- <logger name="org.opendaylight.controller.hosttracker" level="INFO"/>
- <logger name="org.opendaylight.controller.routing" level="INFO"/>
- <logger name="org.opendaylight.controller.forwardingrulesmanager" level="INFO"/>
- <logger name="org.opendaylight.controller.forwarding.ipswitch" level="INFO"/>
- <logger name="org.opendaylight.controller.switchmanager" level="INFO"/>
- <logger name="org.opendaylight.controller.topologymanager" level="INFO"/>
- <logger name="org.opendaylight.controller.usermanager" level="INFO"/>
- <!-- Web modules -->
- <logger name="org.opendaylight.controller.web" level="INFO"/>
-
- <!-- Clustering -->
- <logger name="org.opendaylight.controller.cluster" level="INFO"/>
- <logger name="org.opendaylight.controller.cluster.datastore.node" level="INFO"/>
-
- <!--
- Unsynchronized controller startup causes models to crop up in random
- order, which results in temporary inability to fully resolve a model,
- which is usually resolved. Do not flood console, but keep the messages,
- as they may be indicating and error.
- -->
- <logger name="org.opendaylight.yangtools.yang.parser.util.ModuleDependencySort" level="INFO" additivity="false">
- <appender-ref ref="opendaylight.log"/>
- </logger>
-
- <!-- BGPCEP plugin -->
- <logger name="org.opendaylight.protocol" level="INFO"/>
- <logger name="org.opendaylight.bgpcep" level="INFO"/>
-
- <!-- To debug MD-SAL schema loading issues, uncomment this -->
- <!--logger name="org.opendaylight.yangtools.yang.parser.impl.util.URLSchemaContextResolver" level="DEBUG"/>
- <logger name="org.opendaylight.yangtools.sal.binding.generator.impl.RuntimeGeneratedMappingServiceImpl" level="TRACE"/-->
-
- <!-- additivity=false ensures analytics data only goes to the analytics log -->
- <logger name="audit" level="INFO" additivity="false">
- <appender-ref ref="audit-file"/>
- </logger>
-</configuration>
+++ /dev/null
-Directory where the opendaylight controller modules store their configuration files
+++ /dev/null
-############################################################
-# Configuration file for tomcat logging
-############################################################
-# Handlers:
-# "handlers" specifies a comma separated list of log Handler
-# classes. These handlers will be installed during VM startup.
-# Note that these classes must be on the system classpath.
-# Following line configures a ConsoleHandler and a FileHandler
-
-handlers= java.util.logging.FileHandler,java.util.logging.ConsoleHandler
-
-############################################################
-# Handler specific properties
-# Describes specific configuration info for Handlers
-# JUL does not support rolling file handler based on date
-# For now we will keep count of files to 5 with rolling size of 10MB
-############################################################
-
-java.util.logging.FileHandler.pattern = logs/tomcat%g.log
-java.util.logging.FileHandler.limit = 104857600
-java.util.logging.FileHandler.count = 5
-java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter
-java.util.logging.FileHandler.append = true
-java.util.logging.FileHandler.level = INFO
-
-# Limit the message that are printed on the console to SEVERE and above.
-java.util.logging.ConsoleHandler.level = WARNING
-java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
-
-# SimpleFormatter output format to print one-line log message like this:
-# <YYYY>-<MM>-<DD> <HH>:<MM>:<SS> <TimeZone> [<SOURCE>] ><LOG_LEVEL> <LOGGER> <MESSAGE> <THROWABLE>
-#
-java.util.logging.SimpleFormatter.format=%1$tF %1$tT %1$tZ [%3$s] %4$s %2$s %5$s%6$s%n
-
-############################################################
-# Facility specific properties.
-# Provides extra control for each logger.
-############################################################
-
-# For example, set the com.xyz.foo logger to only log SEVERE
-# messages
-#org.apache.catalina = SEVERE
+++ /dev/null
-<?xml version='1.0' encoding='utf-8'?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<Server>
- <!--APR library loader. Documentation at /docs/apr.html -->
- <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
- <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
- <Listener className="org.apache.catalina.core.JasperListener" />
- <!-- Prevent memory leaks due to use of particular java/javax APIs-->
- <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
- <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
- <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
-
- <Service name="Catalina">
- <Connector port="8080" protocol="HTTP/1.1"
- connectionTimeout="20000"
- redirectPort="8443" />
-
-<!--
- Please remove the comments around the following Connector tag to enable HTTPS Authentication support.
- Remember to add a valid keystore in the configuration folder.
- More info : http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
--->
-
- <!--
- <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
- maxThreads="150" scheme="https" secure="true"
- clientAuth="false" sslProtocol="TLS"
- keystoreFile="configuration/keystore"
- keystorePass="changeit"/>
- -->
-
- <Engine name="Catalina" defaultHost="localhost">
- <Host name="localhost" appBase=""
- unpackWARs="false" autoDeploy="false"
- deployOnStartup="false" createDirs="false">
- <Realm className="org.opendaylight.controller.security.ControllerCustomRealm" />
- <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-
- <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
- prefix="web_access_log_" suffix=".txt" resolveHosts="false"
- rotatable="true" fileDateFormat="yyyy-MM"
- pattern="%{yyyy-MM-dd HH:mm:ss.SSS z}t - [%a] - %r"/>
-
- <Valve className="org.opendaylight.controller.filtervalve.cors.FilterValve"
- configurationFile="configuration/cors-config.xml"
- />
- </Host>
- </Engine>
- </Service>
-</Server>
+++ /dev/null
-#!/bin/bash
-
-# Function harvestHelp searches in run.sh part for line starting with "##<name command>".
-# Next lines starting with "#<text>" will be printed without first char # (=help content).
-# Help content has to end with "##" on new line.
-# Example:
-##foo
-# Foo is world wide used synnonym for bar.
-##
-function harvestHelp() {
- key="$1"
- if [ -z "${key}" ]; then
- key='HELP'
- fi
- echo
- sed -rn "/^##${key}$/,/^##/ p" $0 | sed -r '1 d; $ d; s/^#/ /'
- grep "##${key}" $0 > /dev/null
-}
+++ /dev/null
-@ECHO OFF
-SETLOCAL ENABLEDELAYEDEXPANSION
-
-IF NOT EXIST "%JAVA_HOME%" (
- ECHO JAVA_HOME environment variable is not set
- EXIT /B 2
-)
-
-SET basedir=%~dp0
-SET debugport=8000
-SET consoleport=2400
-SET jmxport=1088
-SET jvmMaxMemory=
-SET extraJVMOpts=
-SET consoleOpts=-console -consoleLog
-SET PID=
-SET JAVA_H=%JAVA_HOME%\bin\jps.exe
-
-:LOOP
-IF "%~1" NEQ "" (
- SET CARG=%~1
- IF "!CARG!"=="-debug" (
- SET debugEnabled=true
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-debugsuspend" (
- SET debugEnabled=true
- SET debugSuspended=true
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-debugport" (
- SET debugEnabled=true
- SET debugport=%~2
- SHIFT & SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-jmx" (
- SET jmxEnabled=true
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-jmxport" (
- SET jmxEnabled=true
- SET jmxport=%~2
- SHIFT & SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-start" (
- SET startEnabled=true
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-consoleport" (
- SET consoleport=%~2
- SHIFT & SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-console" (
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-status" (
- for /F "TOKENS=1" %%G in ('""!JAVA_H!" -lvV ^| find /I "opendaylight""') do (
- set PID=%%G
- )
- if "!PID!" NEQ "" (
- ECHO Controller is running with PID !PID!
- ) else (
- ECHO Controller is not running.
- )
- GOTO :EOF
- )
- IF "!CARG!"=="-stop" (
- for /F "TOKENS=1" %%G in ('""!JAVA_H!" -lvV ^| find /I "opendaylight""') do (
- set PID=%%G
- )
- if "!PID!" NEQ "" (
- ECHO Stopping controller PID !PID!
- TASKKILL /F /PID !PID!
- ) else (
- ECHO Controller is not running.
- )
- GOTO :EOF
- )
- IF "!CARG:~0,4!"=="-Xmx" (
- SET jvmMaxMemory=!CARG!
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG:~0,2!"=="-D" (
- SET extraJVMOpts=!extraJVMOpts! !CARG!
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG:~0,2!"=="-X" (
- SET extraJVMOpts=!extraJVMOpts! !CARG!
- SHIFT
- GOTO :LOOP
- )
- IF "!CARG!"=="-help" (
- SHIFT
- SET CARG=%2
- IF "!CARG!" NEQ "" (
- CALL:!CARG!
- ) ELSE (
- CALL:helper
- )
- GOTO :EOF
- )
-
- ECHO "Unknown option: !CARG!"
- EXIT /B 1
-)
-
-IF "%debugEnabled%" NEQ "" (
- REM ECHO "DEBUG enabled"
- SET extraJVMOpts=!extraJVMOpts! -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=%debugport%
-)
-
-IF "%debugSuspended%" NEQ "" (
- REM ECHO "DEBUG enabled suspended"
- SET extraJVMOpts=!extraJVMOpts! -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=%debugport%
-)
-
-IF "%jvmMaxMemory%"=="" (
- SET jvmMaxMemory=-Xmx1G
- ECHO Setting maximum memory to 1G.
-)
-
-SET extraJVMOpts=!extraJVMOpts! %jvmMaxMemory%
-
-IF "%jmxEnabled%" NEQ "" (
- REM ECHO "JMX enabled "
- SET extraJVMOpts=!extraJVMOpts! -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=%jmxport% -Dcom.sun.management.jmxremote
-)
-IF "%startEnabled%" NEQ "" (
- REM ECHO "START enabled "
- SET consoleOpts=-console %consoleport% -consoleLog
-)
-
-REM Check if controller is already running
-for /F "TOKENS=1" %%G in ('""!JAVA_H!" -lvV ^| find /I "opendaylight""') do (
- SET PID=%%G
-)
-if "!PID!" NEQ "" (
- ECHO Controller is already running with PID !PID!
- EXIT /B 1
-)
-
-
-REM Now set the classpath:
-SET cp="%basedir%lib\org.eclipse.osgi-3.8.1.v20120830-144521.jar;%basedir%lib\org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar;%basedir%lib\org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar"
-
-REM Now set framework classpath
-SET fwcp="file:\%basedir%lib\org.eclipse.osgi-3.8.1.v20120830-144521.jar,file:\%basedir%lib\org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar,file:\%basedir%lib\org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar"
-
-SET RUN_CMD="%JAVA_HOME%\bin\java.exe" -Dopendaylight.controller !extraJVMOpts! -Djava.io.tmpdir="%basedir%work\tmp" -Djava.awt.headless=true -Dosgi.install.area=%basedir% -Dosgi.configuration.area="%basedir%configuration" -Dosgi.frameworkClassPath=%fwcp% -Dosgi.framework="file:\%basedir%lib\org.eclipse.osgi-3.8.1.v20120830-144521.jar" -classpath %cp% org.eclipse.equinox.launcher.Main %consoleOpts%
-
-ECHO !RUN_CMD!
-
-if "%startEnabled%" NEQ "" (
- START /B cmd /C CALL !RUN_CMD! > %basedir%\logs\controller.out 2>&1
- ECHO Running controller in the background.
- EXIT /B 1
-) else (
- !RUN_CMD!
- EXIT /B %ERRORLEVEL%
-)
-
-:helper
-echo. For more information on a specific command, type -help command-name.
-echo.
-echo jmx ^[-jmx^]
-echo jmxport ^[-jmxport ^<num^>^] - DEFAULT is 1088
-echo debug ^[-debug^]
-echo debugsuspend ^[-debugsuspend^]
-echo debugport ^[-debugport ^<num^>^] - DEFAULT is 8000
-echo start ^[-start ^[^<console port^>^]^] - DEFAULT port is 2400
-echo stop ^[-stop^]
-echo status ^[-status^]
-echo console ^[-console^]
-echo agentpath ^[-agentpath:^<path to lib^>^]
-exit/B 1
-
-:debugsuspend
-ECHO.
-ECHO. debugsuspend ^[-debugsuspend^]
-ECHO.
-ECHO. This command sets suspend on true in runjdwp in extra JVM options. If its true, VMStartEvent has a suspendPolicy of SUSPEND_ALL. If its false, VMStartEvent has a suspendPolicy of SUSPEND_NONE.
-ECHO.
-EXIT /B 1
-
-:debugport
-ECHO.
-ECHO. debugport ^[-debugport ^<num^>^] - DEFAULT is 8000
-ECHO.
-ECHO. Set address for settings in runjdwp in extra JVM options.
-ECHO. The address is transport address for the connection.
-ECHO. The address has to be in the range ^[1024,65535^]. If the option was not call, port will be set to default value.
-ECHO.
-EXIT /B 1
-
-:jmxport
-ECHO.
-ECHO. jmxport ^[-jmxport ^<num^>^] - DEFAULT is 1088
-ECHO.
-ECHO. Set jmx port for com.sun.management.jmxremote.port in JMX support. Port has to be in the range ^[1024,65535^]. If this option was not call, port will be set to default value.
-ECHO.
-EXIT /B 1
-
-:debug
-ECHO.
-ECHO. debug [-debug]
-ECHO.
-ECHO. Run ODL controller with -Xdebug and -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=^$^{debugport^}
-ECHO.
-ECHO. -Xdebug enables debugging capabilities in the JVM which are used by the Java Virtual Machine Tools Interface (JVMTI). JVMTI is a low-level debugging interface used by debuggers and profiling tools.
-ECHO.
-ECHO. -Xrunjdwp option loads the JPDA reference implementation of JDWP. This library resides in the target VM and uses JVMDI and JNI to interact with it. It uses a transport and the JDWP protocol to communicate with a separate debugger application.
-ECHO.
-ECHO. settings for -Xrunjdwp:
-ECHO. transport - name of the transport to use in connecting to debugger application
-ECHO. server - if 'y', listen for a debugger application to attach; otherwise, attach to the debugger application at the specified address
-ECHO. - if 'y' and no address is specified, choose a transport address at which to listen for a debugger application, and print the address to the standard output stream
-ECHO. suspend - if 'y', VMStartEvent has a suspend Policy of SUSPEND_ALL
-ECHO. - if 'n', VMStartEvent has a suspend policy of SUSPEND_NONE
-ECHO. address - transport address for the connection
-ECHO. - if server=n, attempt to attach to debugger application at this address
-ECHO. - if server=y, listen for a connection at this address
-ECHO.
-EXIT /B 1
-
-:jmx
-ECHO.
-ECHO. jmx [-jmx]
-ECHO.
-ECHO. Add JMX support. With settings for extra JVM options: -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=^$^{jmxport^} -Dcom.sun.management.jmxremote
-ECHO. jmxport can by set with option -jmxport ^<num^>. Default num for the option is 1088.
-ECHO.
-EXIT /B 1
-
-:stop
-ECHO.
-ECHO. stop ^[-stop^]
-ECHO.
-ECHO. If a controller is running, the command stop controller. Pid will be clean.
-ECHO.
-EXIT /B 1
-
-:status
-ECHO.
-ECHO. status ^[-status^]
-ECHO.
-ECHO. Find out whether a controller is running and print it.
-ECHO.
-EXIT /B 1
-
-:start
-ECHO.
-ECHO. start ^[-start ^[^<console port^>^]^]
-ECHO.
-ECHO. If controller is not running, the command with argument^(for set port, where controller has start^) will start new controller on a port. The port has to be in the range ^[1024,65535^]. If this option was not call, port will be set to default value. Pid will be create.
-EXIT /B 1
-
-:console
-ECHO.
-ECHO. console [-console]
-ECHO. Default option.
-EXIT /B 1
-
-:agentpath
-ECHO.
-ECHO. agentpath ^[-agentpath:^<path to lib^>^]
-ECHO.
-ECHO. Agentpath option passes path to agent to jvm in order to load native agent library, e.g. yourkit profiler agent.
-EXIT /B 1
-
-
+++ /dev/null
-#!/bin/bash
-
-##HELP
-# For more information on a specific command, type -help command-name.
-#
-# jmx [-jmx]
-# jmxport [-jmxport <num>] - DEFAULT is 1088
-# debug [-debug]
-# debugsuspend [-debugsuspend]
-# debugport [-debugport <num>] - DEFAULT is 8000
-# start [-start [<console port>]] - DEFAULT port is 2400
-# stop [-stop]
-# status [-status]
-# console [-console]
-# agentpath [-agentpath:<path to lib>]
-##
-
-platform='unknown'
-unamestr=`uname`
-if [[ "$unamestr" == 'Linux' ]]; then
- platform='linux'
-elif [[ "$unamestr" == 'Darwin' ]]; then
- platform='osx'
-fi
-
-if [[ $platform == 'linux' ]]; then
- fullpath=`readlink -f $0`
-
- if [[ -z ${JAVA_HOME} ]]; then
- # Find the actual location of the Java launcher:
- java_launcher=`command -v java`
- java_launcher=`readlink -f "${java_launcher}"`
-
- # Compute the Java home from the location of the Java launcher:
- export JAVA_HOME="${java_launcher%/bin/java}"
- fi
-elif [[ $platform == 'osx' ]]; then
- TARGET_FILE=$0
- cd `dirname "$TARGET_FILE"`
- TARGET_FILE=`basename $TARGET_FILE`
-
- # Iterate down a (possible) chain of symlinks
- while [ -L "$TARGET_FILE" ]
- do
- TARGET_FILE=`readlink "$TARGET_FILE"`
- cd `dirname "$TARGET_FILE"`
- TARGET_FILE=`basename "$TARGET_FILE"`
- done
-
- # Compute the canonicalized name by finding the physical path
- # for the directory we're in and appending the target file.
- PHYS_DIR=`pwd -P`
- RESULT=$PHYS_DIR/$TARGET_FILE
- fullpath=$RESULT
-
- [[ -z ${JAVA_HOME} ]] && [[ -x "/usr/libexec/java_home" ]] && export JAVA_HOME=`/usr/libexec/java_home -v 1.7`;
-
-fi
-
-[[ -z ${JAVA_HOME} ]] && echo "Need to set JAVA_HOME environment variable" && exit -1;
-[[ ! -x ${JAVA_HOME}/bin/java ]] && echo "Cannot find an executable \
-JVM at path ${JAVA_HOME}/bin/java check your JAVA_HOME" && exit -1;
-
-if [ -z ${ODL_BASEDIR} ]; then
- basedir=`dirname "${fullpath}"`
-else
- basedir=${ODL_BASEDIR}
-fi
-
-if [ -z ${ODL_DATADIR} ]; then
- datadir=`dirname "${fullpath}"`
-else
- datadir=${ODL_DATADIR}
-fi
-
-if [ -z ${TMP} ]; then
- pidfile="/tmp/opendaylight.PID"
-else
- pidfile="${TMP}/opendaylight.PID"
-fi
-debug=0
-debugsuspend=0
-debugport=8000
-debugportread=""
-startdaemon=0
-daemonport=2400
-daemonportread=""
-jmxport=1088
-jmxportread=""
-startjmx=0
-stopdaemon=0
-statusdaemon=0
-consolestart=1
-dohelp=0
-jvmMaxMemory="-Xmx1G"
-extraJVMOpts=""
-agentPath=""
-unknown_option=0
-helper=""
-while true ; do
- case "$1" in
- -debug) debug=1; shift ;;
- -help) dohelp=1; shift; helper=$1; break ;;
- -jmx) startjmx=1; shift ;;
- -debugsuspend) debugsuspend=1; shift ;;
- -debugport) shift; debugportread="$1"; if [[ "${debugportread}" =~ ^[0-9]+$ ]] ; then debugport=${debugportread}; shift; else echo "-debugport expects a number but was not found"; exit -1; fi;;
- -jmxport) shift; jmxportread="$1"; if [[ "${jmxportread}" =~ ^[0-9]+$ ]] ; then jmxport=${jmxportread}; shift; else echo "-jmxport expects a number but was not found"; exit -1; fi;;
- -start) startdaemon=1; shift; daemonportread="$1"; if [[ "${daemonportread}" =~ ^[0-9]+$ ]] ; then daemonport=${daemonportread}; shift; fi;;
- -stop) stopdaemon=1; shift ;;
- -status) statusdaemon=1; shift ;;
- -console) shift ;;
- -Xmx*) jvmMaxMemory="$1"; shift;;
- -D*) extraJVMOpts="${extraJVMOpts} $1"; shift;;
- -X*) extraJVMOpts="${extraJVMOpts} $1"; shift;;
- -J*) extraJVMOpts="${extraJVMOpts} -$(echo "$1" | cut -d'J' -f2)"; shift;;
- -agentpath:*) agentPath="$1"; shift;;
- "") break ;;
- *) echo "Unknown option $1"; unknown_option=1; break ;;
- esac
-done
-
-
-
-if [ "${unknown_option}" -eq 1 ]; then
- echo "Use -help for more information."
- exit 1
-fi
-
-
-if [ "${dohelp}" -eq 1 ]; then
- . ${basedir}/functions.sh
- harvestHelp ${helper}
- echo -e '\nFor other information type -help.\n'
- exit 1
-fi
-
-extraJVMOpts="${extraJVMOpts} ${jvmMaxMemory}"
-
-##debugport
-#debugport [-debugport <num>] - DEFAULT is 8000
-#
-# Set address for settings in runjdwp in extra JVM options.
-# The address is transport address for the connection.
-# The address has to be in the range [1024,65535]. If this option was not call, port will be set to default value.
-##
-# Validate debug port
-if [[ "${debugport}" -lt 1024 ]] || [[ "${debugport}" -gt 65535 ]]; then
- echo "Debug Port not in the range [1024,65535] ${debugport}"
- exit -1
-fi
-
-# Validate daemon console port
-if [[ "${daemonport}" -lt 1024 ]] || [[ "${daemonport}" -gt 65535 ]]; then
- echo "Daemon console Port not in the range [1024,65535] value is ${daemonport}"
- exit -1
-fi
-
-##jmxport
-#jmxport [-jmxport <num>] - DEFAULT is 1088
-#
-# Set jmx port for com.sun.management.jmxremote.port in JMX support. Port has to be in the range [1024,65535]. If this option was not call, port will be set to default value.
-##
-# Validate jmx port
-if [[ "${jmxport}" -lt 1024 ]] || [[ "${jmxport}" -gt 65535 ]]; then
- echo "JMX Port not in the range [1024,65535] value is ${jmxport}"
- exit -1
-fi
-##debug
-#debug [-debug]
-#
-#Run ODL controller with -Xdebug and -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=\${debugport}
-#-Xdebug enables debugging capabilities in the JVM which are used by the Java Virtual Machine Tools Interface (JVMTI). JVMTI is a low-level debugging interface used by debuggers and profiling tools.
-#-Xrunjdwp option loads the JPDA reference implementation of JDWP. This library resides in the target VM and uses JVMDI and JNI to interact with it. It uses a transport and the JDWP protocol to
-#communicate with a separate debugger application.
-#settings for -Xrunjdwp:
-# transport - name of the transport to use in connecting to debugger application
-# server - if “y”, listen for a debugger application to attach; otherwise, attach to the debugger application at the specified address
-# - if “y” and no address is specified, choose a transport address at which to listen for a debugger application, and print the address to the standard output stream
-# suspend - if “y”, VMStartEvent has a suspend Policy of SUSPEND_ALL
-# - if “n”, VMStartEvent has a suspend policy of SUSPEND_NONE
-# address - transport address for the connection
-# - if server=n, attempt to attach to debugger application at this address
-# - if server=y, listen for a connection at this address
-##
-
-##debugsuspend
-#debugsuspend [-debugsuspend]
-#
-#This command sets suspend on true in runjdwp in extra JVM options. If its true, VMStartEvent has a suspendPolicy of SUSPEND_ALL. If its false, VMStartEvent has a suspendPolicy of SUSPEND_NONE.
-##
-# Debug options
-if [ "${debugsuspend}" -eq 1 ]; then
- extraJVMOpts="${extraJVMOpts} -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=${debugport}"
-elif [ "${debug}" -eq 1 ]; then
- extraJVMOpts="${extraJVMOpts} -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=${debugport}"
-fi
-##jmx
-#jmx [-jmx]
-#
-#Add JMX support. With settings for extra JVM options: -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=\${jmxport} -Dcom.sun.management.jmxremote
-#jmxport can by set with command -jmxport <num>. Default num for the option is 1088.
-##
-# Add JMX support
-if [ "${startjmx}" -eq 1 ]; then
- extraJVMOpts="${extraJVMOpts} -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=${jmxport} -Dcom.sun.management.jmxremote"
-fi
-
-########################################
-# Now add to classpath the OSGi JAR
-########################################
-CLASSPATH=${CLASSPATH}:${basedir}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar
-FWCLASSPATH=file:"${basedir}"/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar
-
-########################################
-# Now add the extensions
-########################################
-
-# Extension 1: this is used to be able to convert all the
-# bundleresouce: URL in file: so packages that are not OSGi ready can
-# still work. Notably this is the case for spring classes
-CLASSPATH=${CLASSPATH}:${basedir}/lib/org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar
-FWCLASSPATH=${FWCLASSPATH},file:${basedir}/lib/org.eclipse.virgo.kernel.equinox.extensions-3.6.0.RELEASE.jar
-
-########################################
-# Now add the launcher
-########################################
-CLASSPATH=${CLASSPATH}:${basedir}/lib/org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar
-FWCLASSPATH=${FWCLASSPATH},file:${basedir}/lib/org.eclipse.equinox.launcher-1.3.0.v20120522-1813.jar
-
-cd $basedir
-
-##stop
-#stop [-stop]
-#
-#If a controller is running, the command stop controller. Pid will be clean.
-##
-if [ "${stopdaemon}" -eq 1 ]; then
- if [ -e "${pidfile}" ]; then
- daemonpid=`cat "${pidfile}"`
- kill "${daemonpid}"
- rm -f "${pidfile}"
- echo "Controller with PID: ${daemonpid} -- Stopped!"
- exit 0
- else
- echo "Doesn't seem any Controller daemon is currently running"
- exit -1
- fi
-fi
-
-##status
-#status [-status]
-#
-#Find out whether a controller is running and print it.
-##
-if [ "${statusdaemon}" -eq 1 ]; then
- if [ -e "${pidfile}" ]; then
- daemonpid=`cat "${pidfile}"`
- ps -p ${daemonpid} > /dev/null
- daemonexists=$?
- if [ "${daemonexists}" -eq 0 ]; then
- echo "Controller with PID: ${daemonpid} -- Running!"
- exit 0
- else
- echo "Controller with PID: ${daemonpid} -- Doesn't seem to exist"
- rm -f "${pidfile}"
- exit 1
- fi
- else
- echo "Doesn't seem any Controller daemon is currently running, at least no PID file has been found"
- exit -1
- fi
-fi
-
-iotmpdir=`echo "${datadir}" | sed 's/ /\\ /g'`
-bdir=`echo "${basedir}" | sed 's/ /\\ /g'`
-confarea=`echo "${datadir}" | sed 's/ /\\ /g'`
-fwclasspath=`echo "${FWCLASSPATH}" | sed 's/ /\\ /g'`
-
-##start
-#start [-start [<console port>]]
-#
-# If controller is not running, the command with argument(for set port, where controller has start) will start new controller on a port. The port has to be in the range [1024,65535]. If this option was not call, port will be set to default value. Pid will be create.
-##
-##console
-#console [-console]
-#
-# Default option.
-##
-##agentpath
-#agentpath [-agentpath:<path to lib>]
-#
-# Agentpath option passes path to agent to jvm in order to load native agent library, e.g. yourkit profiler agent.
-##
-echo "JVM maximum memory was set to ${jvmMaxMemory}."
-if [ "${startdaemon}" -eq 1 ]; then
- if [ -e "${pidfile}" ]; then
- echo "Another instance of controller running, check with $0 -status"
- exit -1
- fi
- $JAVA_HOME/bin/java ${extraJVMOpts} \
- ${agentPath} \
- -Djava.io.tmpdir="${iotmpdir}/work/tmp" \
- -Dosgi.install.area="${bdir}" \
- -Dosgi.configuration.area="${confarea}/configuration" \
- -Dosgi.frameworkClassPath="${fwclasspath}" \
- -Dosgi.framework=file:"${bdir}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar" \
- -Djava.awt.headless=true \
- -classpath "${CLASSPATH}" \
- org.eclipse.equinox.launcher.Main \
- -console ${daemonport} \
- -consoleLog &
- daemonpid=$!
- echo ${daemonpid} > ${pidfile}
-elif [ "${consolestart}" -eq 1 ]; then
- if [ -e "${pidfile}" ]; then
- echo "Another instance of controller running, check with $0 -status"
- exit -1
- fi
- $JAVA_HOME/bin/java ${extraJVMOpts} \
- ${agentPath} \
- -Djava.io.tmpdir="${iotmpdir}/work/tmp" \
- -Dosgi.install.area="${bdir}" \
- -Dosgi.configuration.area="${confarea}/configuration" \
- -Dosgi.frameworkClassPath="${fwclasspath}" \
- -Dosgi.framework=file:"${bdir}/lib/org.eclipse.osgi-3.8.1.v20120830-144521.jar" \
- -Djava.awt.headless=true \
- -classpath "${CLASSPATH}" \
- org.eclipse.equinox.launcher.Main \
- -console \
- -consoleLog
-fi
+++ /dev/null
-org.opendaylight.controller.version = 0.1
-org.opendaylight.controller.build.scm.version = ${buildNumber}
-org.opendaylight.controller.build.user = ${env.USER}
-org.opendaylight.controller.build.workspace = **********
-org.opendaylight.controller.build.timestamp = ${timestamp}
-org.opendaylight.controller.build.machine = **********
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<launchConfiguration type="org.eclipse.m2e.Maven2LaunchConfigurationType">
-<booleanAttribute key="M2_DEBUG_OUTPUT" value="false"/>
-<stringAttribute key="M2_GOALS" value="clean org.reficio:p2-maven-plugin:site"/>
-<booleanAttribute key="M2_NON_RECURSIVE" value="false"/>
-<booleanAttribute key="M2_OFFLINE" value="false"/>
-<stringAttribute key="M2_PROFILES" value=""/>
-<listAttribute key="M2_PROPERTIES"/>
-<stringAttribute key="M2_RUNTIME" value="EMBEDDED"/>
-<booleanAttribute key="M2_SKIP_TESTS" value="false"/>
-<booleanAttribute key="M2_UPDATE_SNAPSHOTS" value="false"/>
-<booleanAttribute key="M2_WORKSPACE_RESOLUTION" value="true"/>
-<stringAttribute key="org.eclipse.debug.core.ATTR_REFRESH_SCOPE" value="${project}"/>
-<stringAttribute key="org.eclipse.debug.core.source_locator_id" value="org.eclipse.m2e.launching.MavenSourceLocator"/>
-<stringAttribute key="org.eclipse.debug.core.source_locator_memento" value="<?xml version="1.0" encoding="UTF-8" standalone="no"?> <sourceLookupDirector> <sourceContainers duplicates="false"/> </sourceLookupDirector> "/>
-<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
-<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
-<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
-</listAttribute>
-<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xmx768m -XX:MaxPermSize=256m"/>
-<stringAttribute key="org.eclipse.jdt.launching.WORKING_DIRECTORY" value="${workspace_loc:/distribution.p2site}"/>
-</launchConfiguration>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
- <modelVersion>4.0.0</modelVersion>
- <prerequisites>
- <maven>3.0</maven>
- </prerequisites>
-
- <!-- p2site distribution is created with the intention of providing a -->
- <!-- target platform that could be use for PDE to create an OSGi -->
- <!-- launcher, this should allow to enable the hot code replacement on -->
- <!-- the controller. -->
-
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- </scm>
-
- <pluginRepositories>
- <pluginRepository>
- <id>reficio</id>
- <url>${nexusproxy}/repositories/reficioplugin/</url>
- </pluginRepository>
- </pluginRepositories>
-
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>distribution.p2site</artifactId>
- <version>0.2.0-SNAPSHOT</version>
- <packaging>pom</packaging>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.reficio</groupId>
- <artifactId>p2-maven-plugin</artifactId>
- <version>1.0.0</version>
- <executions>
- <execution>
- <id>default-cli</id>
- <configuration>
- <pedantic>true</pedantic>
- <artifacts>
- <artfiact>
- <id>org.slf4j:jcl-over-slf4j:${slf4j.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.slf4j:slf4j-api:${slf4j.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.slf4j:log4j-over-slf4j:${slf4j.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>ch.qos.logback:logback-core:${logback.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>ch.qos.logback:logback-classic:${logback.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.codehaus.jackson:jackson-mapper-asl:${jackson.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.codehaus.jackson:jackson-core-asl:${jackson.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.codehaus.jackson:jackson-jaxrs:${jackson.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.codehaus.jettison:jettison:1.3.3</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>commons-io:commons-io:2.4</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>commons-fileupload:commons-fileupload:1.2.2</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:javax.servlet:3.0.0.v201112011016</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:javax.servlet.jsp:2.2.0.v201112011158</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.equinox.ds:1.4.0.v20120522-1841</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.equinox.util:1.0.400.v20120522-2049</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.osgi.services:3.3.100.v20120522-1822</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.osgi:3.8.1.v20120830-144521</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.apache.felix.gogo.command:0.8.0.v201108120515</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.apache.felix.gogo.runtime:0.8.0.v201108120515</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.apache.felix.gogo.shell:0.8.0.v201110170705</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.equinox.cm:1.0.400.v20120522-1841</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.equinox.console:1.0.0.v20120522-1841</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>equinoxSDK381:org.eclipse.equinox.launcher:1.3.0.v20120522-1813</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.gemini.web.core:${geminiweb.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.gemini.web.extender:${geminiweb.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.gemini.web.tomcat:${geminiweb.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.kernel.equinox.extensions:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.util.common:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.util.io:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.util.math:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.util.osgi:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.util.osgi.manifest:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>geminiweb:org.eclipse.virgo.util.parser.manifest:${virgo.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.apache.felix:org.apache.felix.dependencymanager:3.1.0</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.apache.felix:org.apache.felix.dependencymanager.shell:3.0.1</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>com.google.code.gson:gson:2.1</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.jboss.spec.javax.transaction:jboss-transaction-api_1.1_spec:1.0.1.Final</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.apache.commons:commons-lang3:3.1</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>virgomirror:org.eclipse.jdt.core.compiler.batch:3.8.0.I20120518-2145</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>eclipselink:javax.persistence:2.0.4.v201112161009</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.activation:1.1.0.v201211130549</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.annotation:1.1.0.v201209060031</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.ejb:3.1.1.v201204261316</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.el:2.2.0.v201108011116</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.mail.glassfish:1.4.1.v201108011116</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.xml.rpc:1.1.0.v201005080400</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.catalina:7.0.32.v201211201336</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.catalina.ha:7.0.32.v201211201952</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.catalina.tribes:7.0.32.v201211201952</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.coyote:7.0.32.v201211201952</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.el:7.0.32.v201211081135</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.jasper:7.0.32.v201211201952</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.juli.extras:7.0.32.v201211081135</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.tomcat.api:7.0.32.v201211081135</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:org.apache.tomcat.util:7.0.32.v201211201952</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.servlet.jsp.jstl:1.2.0.v201105211821</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>orbit:javax.servlet.jsp.jstl.impl:1.2.0.v201210211230</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.asm:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.aop:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.context:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.context.support:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.core:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.beans:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.expression:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.web:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.aopalliance:com.springsource.org.aopalliance:1.0.0</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.web.servlet:${spring.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework.security:spring-security-config:${spring-security.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework.security:spring-security-core:${spring-security.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework.security:spring-security-web:${spring-security.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework.security:spring-security-taglibs:${spring-security.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.springframework:org.springframework.transaction:${spring-security.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>org.ow2.chameleon.management:chameleon-mbeans:1.0.0</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>com.sun.jersey:jersey-core:${jersey.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>com.sun.jersey:jersey-server:${jersey.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>com.sun.jersey:jersey-client:${jersey.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- <artfiact>
- <id>com.sun.jersey:jersey-json:${jersey.version}</id>
- <transitive>false</transitive>
- <override>false</override>
- </artfiact>
- </artifacts>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.mortbay.jetty</groupId>
- <artifactId>jetty-maven-plugin</artifactId>
- <version>8.1.5.v20120716</version>
- <configuration>
- <scanIntervalSeconds>10</scanIntervalSeconds>
- <webAppSourceDirectory>${basedir}/target/repository/</webAppSourceDirectory>
- <webApp>
- <contextPath>/site</contextPath>
- </webApp>
- </configuration>
- </plugin>
- </plugins>
- </build>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.5.0-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
-
- <artifactId>sanitytest</artifactId>
- <version>0.5.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <dependencies>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- <scope>provided</scope>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>org.opendaylight.controller.sanitytest</Export-Package>
- <Import-Package>javax.xml.bind.annotation,
- org.osgi.service.component,
- org.slf4j,
- org.eclipse.osgi.framework.console,
- org.osgi.framework,
- org.eclipse.osgi.baseadaptor,
- org.eclipse.osgi.framework.adaptor,
- org.osgi.framework.wiring</Import-Package>
- <Bundle-Activator>org.opendaylight.controller.sanitytest.internal.Activator</Bundle-Activator>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- </scm>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.distribution;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class Sanity {
-
- static void copy(InputStream in, OutputStream out) throws IOException {
- while (true) {
- int c = in.read();
- if (c == -1) break;
- out.write((char)c);
- }
- }
-
- public static void main(String[] args) throws IOException, InterruptedException {
- String cwd = System.getProperty("user.dir");
-
- System.out.println("Current working directory = " + cwd);
-
- String os = System.getProperty("os.name").toLowerCase();
- List<String> script = new ArrayList<String>();
-
- if(os.contains("windows")){
- script.add("cmd.exe");
- script.add("/c");
- script.add("runsanity.bat");
- } else {
- script.add("./runsanity.sh");
- }
-
- ProcessBuilder processBuilder = new ProcessBuilder();
- processBuilder.inheritIO().command(script);
- Process p = processBuilder.start();
-
- copy(p.getInputStream(), System.out);
-
- p.waitFor();
-
- System.out.println("Test exited with exitValue = " + p.exitValue());
-
- System.exit(p.exitValue());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sanitytest.internal;
-
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleActivator;
-import org.osgi.framework.BundleContext;
-import org.osgi.framework.wiring.BundleRevision;
-
-public class Activator implements BundleActivator {
- //10 Second initial, 1 second subsequent
- private static final int INITIAL_DELAY = 10000;
- private static final int SUBSEQUENT_DELAY = 1000;
- private static final int MAX_ATTEMPTS = 120;
-
-
- private String stateToString(int state) {
- switch (state) {
- case Bundle.ACTIVE:
- return "ACTIVE";
- case Bundle.INSTALLED:
- return "INSTALLED";
- case Bundle.RESOLVED:
- return "RESOLVED";
- case Bundle.UNINSTALLED:
- return "UNINSTALLED";
- case Bundle.STARTING:
- return "STARTING";
- default:
- return "Not CONVERTED: state value is " + state;
- }
- }
-
- public void start(final BundleContext bundleContext) throws Exception {
- Timer monitorTimer = new Timer("monitor timer", true);
- monitorTimer.schedule(new TimerTask() {
- @Override
- public void run() {
- int countup = 0;
- boolean failed = false;
- boolean resolved = false;
- while (!resolved) {
- resolved = true;
- failed = false;
- for(Bundle bundle : bundleContext.getBundles()){
- /*
- * A bundle should be ACTIVE, unless it a fragment, in which case it should be RESOLVED
- */
- int state = bundle.getState();
- if ((bundle.adapt(BundleRevision.class).getTypes() & BundleRevision.TYPE_FRAGMENT) != 0) {
- //fragment
- if (state != Bundle.RESOLVED) {
- System.out.println("------ Failed to activate/resolve fragment = " + bundle.getSymbolicName() + " state = " + stateToString(bundle.getState()));
- failed = true;
- if (state == Bundle.STARTING)
- resolved = false;
- }
- } else {
- if(state != Bundle.ACTIVE) {
- System.out.println("------ Failed to activate/resolve bundle = " + bundle.getSymbolicName() + " state = " + stateToString(bundle.getState()));
- failed = true;
- if (state == Bundle.STARTING)
- resolved = false;
- }
- }
- }
- if (!resolved) {
- countup++;
- if (countup < MAX_ATTEMPTS) {
- System.out.println("all bundles haven't finished starting, will repeat");
- try {
- Thread.sleep(SUBSEQUENT_DELAY);
- } catch (Exception e) {
- System.out.println("Thread.sleep interuptted.");
- break;
- }
- } else
- resolved = true;
- }
- }
-
- if(failed){
- System.out.flush();
- System.out.println("exiting with 1 as failed");
- System.out.close();
- Runtime.getRuntime().exit(1);
- } else {
- System.out.flush();
- System.out.println("exiting with 0 as succeeded");
- System.out.close();
- Runtime.getRuntime().exit(0);
- }
- }
- }, INITIAL_DELAY);
- }
-
- public void stop(BundleContext bundleContext) throws Exception {
-
- }
-}
<version>0.2.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
</parent>
- <groupId>org.opendaylight.controller</groupId>
<artifactId>karaf.branding</artifactId>
<version>1.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
</parent>
<modelVersion>4.0.0</modelVersion>
- <groupId>org.opendaylight.controller</groupId>
<artifactId>benchmark-data-store</artifactId>
<dependencies>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-inmemory-datastore</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-broker-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-simple</artifactId>
+ </dependency>
</dependencies>
<build>
</plugin>
</plugins>
</build>
-</project>
\ No newline at end of file
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Warmup;
+
+/**
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public abstract class AbstractInMemoryBrokerWriteTransactionBenchmark extends AbstractInMemoryWriteTransactionBenchmark {
+
+ protected DOMDataBrokerImpl domBroker;
+
+ protected void initTestNode() throws Exception {
+ final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
+ .build();
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, testPath, provideOuterListNode());
+
+ writeTx.submit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
+
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+ }
+
+ writeTx.submit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+
+ writeTx.submit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ }
+
+ writeTx.submit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+ writeTx.submit().get();
+ }
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ }
+ writeTx.submit().get();
+ }
+
+ @Benchmark
+ @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+ public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
+ for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+ writeTx.submit().get();
+ }
+ }
+}
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Warmup;
/**
* @author Lukas Sedlak <lsedlak@cisco.com>
*/
-public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark {
+public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark extends AbstractInMemoryWriteTransactionBenchmark {
- private static final int WARMUP_ITERATIONS = 20;
- private static final int MEASUREMENT_ITERATIONS = 20;
-
- private static final int OUTER_LIST_100K = 100000;
- private static final int OUTER_LIST_50K = 50000;
- private static final int OUTER_LIST_10K = 10000;
-
- private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
- private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
- private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
-
- private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
- final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
-
- for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
- paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
- .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
- .build();
- }
- return paths;
- }
-
- private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
- private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
- private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
-
- private static MapNode initInnerListItems(final int count) {
- final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
- .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
-
- for (int i = 1; i <= count; ++i) {
- mapEntryBuilder
- .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
- }
- return mapEntryBuilder.build();
- }
-
- private static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
- private static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
- private static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
-
- private static NormalizedNode<?,?>[] initOuterListItems(int outerListItemsCount, MapNode innerList) {
- final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
-
- for (int i = 0; i < outerListItemsCount; ++i) {
- int outerListKey = i;
- outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
- .withChild(innerList).build();
- }
- return outerListItems;
- }
-
- protected SchemaContext schemaContext;
protected InMemoryDOMDataStore domStore;
- abstract public void setUp() throws Exception;
-
- abstract public void tearDown();
-
protected void initTestNode() throws Exception {
final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
.build();
cohort.commit().get();
}
- private DataContainerChild<?, ?> provideOuterListNode() {
- return ImmutableContainerNodeBuilder
- .create()
- .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
- .withChild(
- ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
- .build()).build();
- }
-
@Benchmark
@Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public abstract class AbstractInMemoryWriteTransactionBenchmark {
+ protected static final int OUTER_LIST_100K = 100000;
+ protected static final int OUTER_LIST_50K = 50000;
+ protected static final int OUTER_LIST_10K = 10000;
+
+ protected static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
+ protected static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
+ protected static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
+
+ private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
+ final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
+
+ for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
+ paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
+ .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .build();
+ }
+ return paths;
+ }
+
+ protected static final int WARMUP_ITERATIONS = 20;
+ protected static final int MEASUREMENT_ITERATIONS = 20;
+
+ protected static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
+ protected static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
+ protected static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
+
+ private static MapNode initInnerListItems(final int count) {
+ final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
+ .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
+
+ for (int i = 1; i <= count; ++i) {
+ mapEntryBuilder
+ .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
+ }
+ return mapEntryBuilder.build();
+ }
+
+ protected static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
+ protected static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
+ protected static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
+
+ private static NormalizedNode<?,?>[] initOuterListItems(final int outerListItemsCount, final MapNode innerList) {
+ final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
+
+ for (int i = 0; i < outerListItemsCount; ++i) {
+ int outerListKey = i;
+ outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+ .withChild(innerList).build();
+ }
+ return outerListItems;
+ }
+
+ protected SchemaContext schemaContext;
+ abstract public void setUp() throws Exception;
+ abstract public void tearDown();
+
+ protected static DataContainerChild<?, ?> provideOuterListNode() {
+ return ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
+ .withChild(
+ ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
+ .build()).build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryBrokerWriteTransactionBenchmark extends AbstractInMemoryBrokerWriteTransactionBenchmark {
+ private ListeningExecutorService executor;
+
+ @Setup(Level.Trial)
+ @Override
+ public void setUp() throws Exception {
+ ListeningExecutorService dsExec = MoreExecutors.sameThreadExecutor();
+ executor = MoreExecutors.listeningDecorator(
+ MoreExecutors.getExitingExecutorService((ThreadPoolExecutor)Executors.newFixedThreadPool(1), 1L, TimeUnit.SECONDS));
+
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", dsExec,
+ MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", dsExec,
+ MoreExecutors.sameThreadExecutor());
+ Map<LogicalDatastoreType, DOMStore> datastores = ImmutableMap.of(
+ LogicalDatastoreType.OPERATIONAL, (DOMStore)operStore,
+ LogicalDatastoreType.CONFIGURATION, configStore);
+
+ domBroker = new DOMDataBrokerImpl(datastores, executor);
+ schemaContext = BenchmarkModel.createTestContext();
+ configStore.onGlobalContextUpdated(schemaContext);
+ operStore.onGlobalContextUpdated(schemaContext);
+ initTestNode();
+ }
+
+ @Override
+ public void tearDown() {
+ domBroker.close();
+ executor.shutdown();
+ }
+}
final Inet4Address inetDestAddress) {
String inetSourceAddressStr = InetAddresses
.toAddrString(inetSourceAddress);
- Ipv4Prefix ipv4SourcePrefix = new Ipv4Prefix(inetSourceAddressStr);
+ Ipv4Prefix ipv4SourcePrefix = new Ipv4Prefix(inetSourceAddressStr + "/32");
String inetDestAddressValue = InetAddresses
.toAddrString(inetDestAddress);
- Ipv4Prefix ipv4DestPrefix = new Ipv4Prefix(inetDestAddressValue);
+ Ipv4Prefix ipv4DestPrefix = new Ipv4Prefix(inetDestAddressValue + "/32");
ArpMatchBuilder arpMatchBuilder = new ArpMatchBuilder();
if(inetSourceAddress != null) {
String inetSrcAddressString = InetAddresses
.toAddrString(inetSourceAddress);
- layer4MatchBuild.setIpv4Source(new Ipv4Prefix(inetSrcAddressString));
+ layer4MatchBuild.setIpv4Source(new Ipv4Prefix(inetSrcAddressString + "/32"));
}
if(inetDestAddress != null) {
String inetDstAddressString = InetAddresses
.toAddrString(inetDestAddress);
layer4MatchBuild
- .setIpv4Destination(new Ipv4Prefix(inetDstAddressString));
+ .setIpv4Destination(new Ipv4Prefix(inetDstAddressString + "/32"));
}
return layer4MatchBuild.build();
if(inetSourceAddress != null) {
String inetSrcAddressString = InetAddresses
.toAddrString(inetSourceAddress);
- layer6MatchBuild.setIpv6Source(new Ipv6Prefix(inetSrcAddressString));
+ layer6MatchBuild.setIpv6Source(new Ipv6Prefix(inetSrcAddressString + "/128"));
}
if(inetDestAddress != null) {
String inetDstAddressString = InetAddresses
.toAddrString(inetDestAddress);
layer6MatchBuild
- .setIpv6Destination(new Ipv6Prefix(inetDstAddressString));
+ .setIpv6Destination(new Ipv6Prefix(inetDstAddressString + "/128"));
}
return layer6MatchBuild.build();
}
*/
package org.opendaylight.controller.sal.compatibility;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArrayList;
+import com.google.common.base.Optional;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.Iterables;
import org.opendaylight.controller.md.sal.binding.util.TypeSafeDataReader;
import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatistics;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.FlowTopologyDiscoveryService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.Link;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.Iterables;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInInventoryService, OpendaylightFlowStatisticsListener, OpendaylightFlowTableStatisticsListener, OpendaylightPortStatisticsListener {
private static final Logger LOG = LoggerFactory.getLogger(InventoryAndReadAdapter.class);
private static final short OPENFLOWV10_TABLE_ID = 0;
+ private static final int SLEEP_FOR_NOTIFICATIONS_MILLIS = 500;
private final InventoryNotificationProvider inventoryNotificationProvider = new InventoryNotificationProvider();
private final Map<PathArgument,List<PathArgument>> nodeToNodeConnectorsMap = new ConcurrentHashMap<>();
private List<IPluginOutInventoryService> inventoryPublisher = new CopyOnWriteArrayList<>();
private List<IPluginOutReadService> statisticsPublisher = new CopyOnWriteArrayList<>();
+ private Cache<String, TransactionNotificationList<? extends TransactionAware>> txCache;
private OpendaylightFlowTableStatisticsService flowTableStatisticsService;
private OpendaylightPortStatisticsService nodeConnectorStatisticsService;
public void startAdapter() {
inventoryNotificationProvider.setDataProviderService(getDataProviderService());
inventoryNotificationProvider.setInventoryPublisher(getInventoryPublisher());
+ txCache = CacheBuilder.newBuilder().expireAfterWrite(60L, TimeUnit.SECONDS).maximumSize(10000).build();
// inventoryNotificationProvider.start();
}
@Override
public List<FlowOnNode> readAllFlow(final Node node, final boolean cached) {
- final ArrayList<FlowOnNode> output = new ArrayList<>();
- final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
- if (table != null) {
- final List<Flow> flows = table.getFlow();
- LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
+ final ArrayList<FlowOnNode> ret= new ArrayList<>();
+ if (cached) {
+ final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
+ if (table != null) {
+ final List<Flow> flows = table.getFlow();
+ LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
+
+ for (final Flow flow : flows) {
+ final FlowStatisticsData statsFromDataStore = flow.getAugmentation(FlowStatisticsData.class);
+ if (statsFromDataStore != null) {
+ final FlowOnNode it = new FlowOnNode(ToSalConversionsUtils.toFlow(flow, node));
+ ret.add(addFlowStats(it, statsFromDataStore.getFlowStatistics()));
+ }
+ }
+ }
+ } else {
+ LOG.debug("readAllFlow cached:{}", cached);
+ GetAllFlowStatisticsFromFlowTableInput input =
+ new GetAllFlowStatisticsFromFlowTableInputBuilder()
+ .setNode(NodeMapping.toNodeRef(node))
+ .setTableId(new TableId(OPENFLOWV10_TABLE_ID))
+ .build();
+
+ Future<RpcResult<GetAllFlowStatisticsFromFlowTableOutput>> future =
+ getFlowStatisticsService().getAllFlowStatisticsFromFlowTable(input);
- for (final Flow flow : flows) {
- final FlowStatisticsData statsFromDataStore = flow.getAugmentation(FlowStatisticsData.class);
- if (statsFromDataStore != null) {
- final FlowOnNode it = new FlowOnNode(ToSalConversionsUtils.toFlow(flow, node));
- output.add(addFlowStats(it, statsFromDataStore.getFlowStatistics()));
+ RpcResult<GetAllFlowStatisticsFromFlowTableOutput> result = null;
+ try {
+ // having a blocking call is fine here, as we need to join
+ // the notifications and return the result
+ result = future.get();
+ } catch (Exception e) {
+ LOG.error("Exception in getAllFlowStatisticsFromFlowTable ", e);
+ return ret;
+ }
+
+ GetAllFlowStatisticsFromFlowTableOutput output = result.getResult();
+ if (output == null) {
+ return ret;
+ }
+
+ TransactionId transactionId = output.getTransactionId();
+ String cacheKey = buildCacheKey(transactionId, NodeMapping.toNodeId(node));
+ LOG.info("readAllFlow transactionId:{} cacheKey:{}", transactionId, cacheKey);
+
+ // insert an entry in tempcache, will get updated when notification is received
+ txCache.put(cacheKey, new TransactionNotificationList<FlowsStatisticsUpdate>(
+ transactionId, node.getNodeIDString()));
+
+ TransactionNotificationList<FlowsStatisticsUpdate> txnList =
+ (TransactionNotificationList<FlowsStatisticsUpdate>) txCache.getIfPresent(cacheKey);
+
+ // this loop would not be infinite as the cache will remove an entry
+ // after defined time if not written to
+ while (txnList != null && !txnList.areAllNotificationsGathered()) {
+ LOG.debug("readAllFlow waiting for notification...");
+ waitForNotification();
+ txnList = (TransactionNotificationList<FlowsStatisticsUpdate>) txCache.getIfPresent(cacheKey);
+ }
+
+ if (txnList == null) {
+ return ret;
+ }
+
+ List<FlowsStatisticsUpdate> notifications = txnList.getNotifications();
+ for (FlowsStatisticsUpdate flowsStatisticsUpdate : notifications) {
+ List<FlowAndStatisticsMapList> flowAndStatisticsMapList = flowsStatisticsUpdate.getFlowAndStatisticsMapList();
+ if (flowAndStatisticsMapList != null) {
+ for (FlowAndStatisticsMapList flowAndStatistics : flowAndStatisticsMapList) {
+ final FlowOnNode it = new FlowOnNode(ToSalConversionsUtils.toFlow(flowAndStatistics, node));
+ ret.add(addFlowStats(it, flowAndStatistics));
+ }
}
}
}
+ return ret;
+ }
+
+ private String buildCacheKey(final TransactionId id, final NodeId nodeId) {
+ return String.valueOf(id.getValue()) + "-" + nodeId.getValue();
+ }
- return output;
+ private void waitForNotification() {
+ try {
+ // going for a simple sleep approach,as wait-notify on a monitor would require
+ // us to maintain monitors per txn-node combo
+ Thread.sleep(SLEEP_FOR_NOTIFICATIONS_MILLIS);
+ LOG.trace("statCollector is waking up from a wait stat Response sleep");
+ } catch (final InterruptedException e) {
+ LOG.warn("statCollector has been interrupted waiting stat Response sleep", e);
+ }
}
@Override
for (final IPluginOutReadService statsPublisher : getStatisticsPublisher()) {
statsPublisher.nodeFlowStatisticsUpdated(aDNode, adsalFlowsStatistics);
}
+
+ updateTransactionCache(notification, notification.getId(), !notification.isMoreReplies());
}
/**
private List<PathArgument> removeNodeConnectors(final InstanceIdentifier<? extends Object> nodeIdentifier) {
return this.nodeToNodeConnectorsMap.remove(Iterables.get(nodeIdentifier.getPathArguments(), 1));
}
+
+ private <T extends TransactionAware> void updateTransactionCache(T notification, NodeId nodeId, boolean lastNotification) {
+
+ String cacheKey = buildCacheKey(notification.getTransactionId(), nodeId);
+ TransactionNotificationList<T> txnList = (TransactionNotificationList<T>) txCache.getIfPresent(cacheKey);
+ final Optional<TransactionNotificationList<T>> optional = Optional.<TransactionNotificationList<T>>fromNullable(txnList);
+ if (optional.isPresent()) {
+ LOG.info("updateTransactionCache cacheKey:{}, lastNotification:{}, txnList-present:{}", cacheKey, lastNotification, optional.isPresent());
+ TransactionNotificationList<T> txn = optional.get();
+ txn.addNotification(notification);
+ txn.setAllNotificationsGathered(lastNotification);
+ }
+ }
+
+ private class TransactionNotificationList<T extends TransactionAware> {
+ private TransactionId id;
+ private String nId;
+ private List<T> notifications;
+ private boolean allNotificationsGathered;
+
+ public TransactionNotificationList(TransactionId id, String nId) {
+ this.nId = nId;
+ this.id = id;
+ notifications = new ArrayList<T>();
+ }
+
+ public void addNotification(T notification) {
+ notifications.add(notification);
+ }
+
+ public void setAllNotificationsGathered(boolean allNotificationsGathered) {
+ this.allNotificationsGathered = allNotificationsGathered;
+ }
+
+ public boolean areAllNotificationsGathered() {
+ return allNotificationsGathered;
+ }
+
+ public List<T> getNotifications() {
+ return notifications;
+ }
+
+ }
+
}
public static Address toInetAddress(final InetAddress address) {
if (address instanceof Inet4Address) {
return new Ipv4Builder()
- .setIpv4Address(new Ipv4Prefix(InetAddresses.toAddrString(address)))
+ .setIpv4Address(new Ipv4Prefix(InetAddresses.toAddrString(address) + "/32"))
.build();
}
if (address instanceof Inet6Address) {
return new Ipv6Builder()
- .setIpv6Address(new Ipv6Prefix(InetAddresses.toAddrString(address)))
+ .setIpv6Address(new Ipv6Prefix(InetAddresses.toAddrString(address) + "/128"))
.build();
}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
-import java.math.BigInteger;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.regex.Pattern;
import org.opendaylight.controller.sal.common.util.Arguments;
import org.opendaylight.controller.sal.core.AdvertisedBandwidth;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.math.BigInteger;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.List;
+import java.util.regex.Pattern;
+
public final class NodeMapping {
private static final Logger LOG = LoggerFactory
* @param aDNode
* @return
*/
- private static NodeId toNodeId(org.opendaylight.controller.sal.core.Node aDNode) {
+ public static NodeId toNodeId(org.opendaylight.controller.sal.core.Node aDNode) {
String targetPrefix = null;
if (NodeIDType.OPENFLOW.equals(aDNode.getType())) {
targetPrefix = OPENFLOW_ID_PREFIX;
import org.opendaylight.controller.sal.action.SwPath;
import org.opendaylight.controller.sal.core.ConstructionException;
import org.opendaylight.controller.sal.core.Node;
+import org.opendaylight.controller.sal.core.Node.NodeIDType;
import org.opendaylight.controller.sal.core.NodeConnector;
import org.opendaylight.controller.sal.flowprogrammer.Flow;
import org.opendaylight.controller.sal.match.Match;
Uri nodeConnector = ((OutputActionCase) sourceAction).getOutputAction().getOutputNodeConnector();
if (nodeConnector != null) {
- //for (Uri uri : nodeConnectors) {
- Uri fullNodeConnector = new Uri(node.getType()+":"+node.getID()+":"+nodeConnector.getValue());
+ // TODO: We should really have a bi-directional map from AD-SAL node types to
+ // MD-SAL node types, but lets fix that later.
+ String type = node.getType();
+ if( type.equals(NodeIDType.OPENFLOW) ){
+ type = NodeMapping.OPENFLOW_ID_PREFIX;
+ }else{
+ type = type + ":";
+ }
+ Uri fullNodeConnector = new Uri(type+node.getID()+":"+nodeConnector.getValue());
targetAction.add(new Output(fromNodeConnectorRef(fullNodeConnector, node)));
- //}
}
} else if (sourceAction instanceof PopMplsActionCase) {
// TODO: define maping
*/
package org.opendaylight.controller.sal.compatibility.test;
-import junit.framework.Assert;
-
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.action.Action;
import org.opendaylight.controller.sal.action.PushVlan;
Layer3Match layer3Match = match.getLayer3Match();
boolean arpFound = false;
if (layer3Match instanceof ArpMatch) {
- assertEquals("Source IP address is wrong.", "192.168.100.100", ((ArpMatch) layer3Match)
+ assertEquals("Source IP address is wrong.", "192.168.100.100/32", ((ArpMatch) layer3Match)
.getArpSourceTransportAddress().getValue());
- assertEquals("Destination IP address is wrong.", "192.168.100.101", ((ArpMatch) layer3Match)
+ assertEquals("Destination IP address is wrong.", "192.168.100.101/32", ((ArpMatch) layer3Match)
.getArpTargetTransportAddress().getValue());
assertEquals("Source MAC address is wrong.", "ff:ee:dd:cc:bb:aa", ((ArpMatch) layer3Match)
.getArpSourceHardwareAddress().getAddress().getValue());
boolean ipv4Found = false;
layer3Match = match.getLayer3Match();
if (layer3Match instanceof Ipv4Match) {
- assertEquals("Source IP address is wrong.", "192.168.100.102", ((Ipv4Match) layer3Match)
+ assertEquals("Source IP address is wrong.", "192.168.100.102/32", ((Ipv4Match) layer3Match)
.getIpv4Source().getValue());
- assertEquals("Destination IP address is wrong.", "192.168.100.103", ((Ipv4Match) layer3Match)
+ assertEquals("Destination IP address is wrong.", "192.168.100.103/32", ((Ipv4Match) layer3Match)
.getIpv4Destination().getValue());
}
assertNotNull("Ipv4 wasn't found", ipv4Found);
boolean ipv6Found = false;
layer3Match = match.getLayer3Match();
if (layer3Match instanceof Ipv6Match) {
- assertEquals("Source IP address is wrong.", "2001:db8:85a3::8a2e:370:7335", ((Ipv6Match) layer3Match)
+ assertEquals("Source IP address is wrong.", "2001:db8:85a3::8a2e:370:7335/128", ((Ipv6Match) layer3Match)
.getIpv6Source().getValue());
- assertEquals("Destination IP address is wrong.", "2001:db8:85a3::8a2e:370:7336",
+ assertEquals("Destination IP address is wrong.", "2001:db8:85a3::8a2e:370:7336/128",
((Ipv6Match) layer3Match).getIpv6Destination().getValue());
}
assertNotNull("Ipv6 wasn't found", ipv6Found);
boolean ipv4AddressFound = false;
if (address instanceof Ipv4) {
ipv4AddressFound = true;
- assertEquals("Wrong IP address type in SetNextHopAction.", "192.168.100.100", ((Ipv4) address)
+ assertEquals("Wrong IP address type in SetNextHopAction.", "192.168.100.100/32", ((Ipv4) address)
.getIpv4Address().getValue());
}
assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
boolean ipv4AddressFound = false;
if (address instanceof Ipv4) {
ipv4AddressFound = true;
- assertEquals("Wrong IP address type in SetNwDstAction.", "192.168.100.101", ((Ipv4) address)
+ assertEquals("Wrong IP address type in SetNwDstAction.", "192.168.100.101/32", ((Ipv4) address)
.getIpv4Address().getValue());
}
assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
boolean ipv4AddressFound = false;
if (address instanceof Ipv4) {
ipv4AddressFound = true;
- assertEquals("Wrong IP address type in SetNwSrcAction.", "192.168.100.102", ((Ipv4) address)
+ assertEquals("Wrong IP address type in SetNwSrcAction.", "192.168.100.102/32", ((Ipv4) address)
.getIpv4Address().getValue());
}
assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
package org.opendaylight.controller.sal.compatibility.test;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.opendaylight.controller.sal.compatibility.ProtocolConstants.CRUDP;
import static org.opendaylight.controller.sal.compatibility.ProtocolConstants.ETHERNET_ARP;
import java.util.Collections;
import java.util.List;
-import junit.framework.Assert;
-
import org.junit.Test;
import org.opendaylight.controller.sal.action.Flood;
import org.opendaylight.controller.sal.action.FloodAll;
public void testFromNodeConnectorRef() throws ConstructionException {
Node node = new Node(NodeIDType.OPENFLOW, 42L);
NodeConnector nodeConnector = ToSalConversionsUtils.fromNodeConnectorRef(new Uri("1"), node);
- Assert.assertEquals("OF|1@OF|00:00:00:00:00:00:00:2a", nodeConnector.toString());
+ assertEquals("OF|1@OF|00:00:00:00:00:00:00:2a", nodeConnector.toString());
+ }
+
+ @Test
+ public void testActionFrom() throws ConstructionException {
+ // Bug 2021: Convert AD-SAL notation into MD-SAL notation before calling NodeConnector
+ Node node = new Node(NodeIDType.OPENFLOW, 42L);
+ List<Action> odActions = new ArrayList<>();
+
+ OutputActionBuilder outputActionBuilder = new OutputActionBuilder();
+ outputActionBuilder.setOutputNodeConnector(new Uri("CONTROLLER"));
+ OutputActionCaseBuilder outputActionCaseBuilder = new OutputActionCaseBuilder();
+ outputActionCaseBuilder.setOutputAction(outputActionBuilder.build());
+ odActions.add(new ActionBuilder().setAction(outputActionCaseBuilder.build()).build());
+
+ List<org.opendaylight.controller.sal.action.Action> targetAction =
+ ToSalConversionsUtils.actionFrom(odActions, node);
+ assertNotNull(targetAction);
+ assertTrue( Output.class.isInstance(targetAction.get(0)) );
+ Output targetActionOutput = (Output) targetAction.get(0);
+ NodeConnector port = targetActionOutput.getPort();
+ assertNotNull(port);
+ assertEquals(port.getType(), NodeConnectorIDType.CONTROLLER);
+ assertEquals(port.getID(), org.opendaylight.controller.sal.core.NodeConnector.SPECIALNODECONNECTORID);
}
private void checkSalMatch(org.opendaylight.controller.sal.match.Match match, MtchType mt) throws ConstructionException {
private Address prapareIpv4Address(String ipv4Address) {
Ipv4Builder ipv4Builder = new Ipv4Builder();
- ipv4Builder.setIpv4Address(new Ipv4Prefix(ipv4Address));
+ ipv4Builder.setIpv4Address(new Ipv4Prefix(ipv4Address + "/32"));
return ipv4Builder.build();
}
private Layer3Match prepLayer3MatchIpv4() {
Ipv4MatchBuilder ipv4MatchBuilder = new Ipv4MatchBuilder();
- ipv4MatchBuilder.setIpv4Source(new Ipv4Prefix("192.168.1.104"));
- ipv4MatchBuilder.setIpv4Destination(new Ipv4Prefix("192.168.1.105"));
+ ipv4MatchBuilder.setIpv4Source(new Ipv4Prefix("192.168.1.104/32"));
+ ipv4MatchBuilder.setIpv4Destination(new Ipv4Prefix("192.168.1.105/32"));
return ipv4MatchBuilder.build();
}
private Layer3Match prepLayer3MatchArp() {
ArpMatchBuilder arpMatchBuilder = new ArpMatchBuilder();
- arpMatchBuilder.setArpSourceTransportAddress(new Ipv4Prefix("192.168.1.101"));
- arpMatchBuilder.setArpTargetTransportAddress(new Ipv4Prefix("192.168.1.102"));
+ arpMatchBuilder.setArpSourceTransportAddress(new Ipv4Prefix("192.168.1.101/32"));
+ arpMatchBuilder.setArpTargetTransportAddress(new Ipv4Prefix("192.168.1.102/32"));
ArpSourceHardwareAddressBuilder arpSourAddressBuild = new ArpSourceHardwareAddressBuilder();
arpSourAddressBuild.setAddress(new MacAddress("22:44:66:88:AA:CC"));
*/
package org.opendaylight.controller.sal.compatibility.topology.test;
-import junit.framework.Assert;
+import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.opendaylight.controller.sal.compatibility.topology.TopologyMapping;
NodeId nodeId = new NodeId("openflow:1");
String observedNodeId = TopologyMapping.toADNodeId(nodeId);
- Assert.assertEquals("1", observedNodeId);
+ assertEquals("1", observedNodeId);
}
/**
TpId source = new TpId("foo:2");
NodeConnector observedNodeConnector = TopologyMapping.toADNodeConnector(source, nodeId);
- Assert.assertEquals("OF|2@OF|00:00:00:00:00:00:00:01", observedNodeConnector.toString());
+ assertEquals("OF|2@OF|00:00:00:00:00:00:00:01", observedNodeConnector.toString());
}
/**
TpId source = new TpId("foo:2");
String observedNodeConnectorId = TopologyMapping.toADNodeConnectorId(source);
- Assert.assertEquals("2", observedNodeConnectorId);
+ assertEquals("2", observedNodeConnectorId);
}
/**
NodeId nodeId = new NodeId("openflow:1");
Node observedNode = TopologyMapping.toADNode(nodeId);
- Assert.assertEquals("OF|00:00:00:00:00:00:00:01", observedNode.toString());
+ assertEquals("OF|00:00:00:00:00:00:00:01", observedNode.toString());
}
/**
TpId source = new TpId("192.168.0.1");
NodeConnector observedNodeConnector = TopologyMapping.toADNodeConnector(source, nodeId);
- Assert.assertEquals("MD_SAL_DEPRECATED|192.168.0.1@MD_SAL_DEPRECATED|some_unknown_node", observedNodeConnector.toString());
+ assertEquals("MD_SAL_DEPRECATED|192.168.0.1@MD_SAL_DEPRECATED|some_unknown_node", observedNodeConnector.toString());
}
}
-/**ab
+/**
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
builder.setFlowRef(new FlowRef(identifier));
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
- builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
+
+ // This method is called only when a given flow object has been
+ // removed from datastore. So FRM always needs to set strict flag
+ // into remove-flow input so that only a flow entry associated with
+ // a given flow object is removed.
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId())).
+ setStrict(Boolean.TRUE);
provider.getSalFlowService().removeFlow(builder.build());
}
}
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setFlowRef(new FlowRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
- builder.setOriginalFlow((new OriginalFlowBuilder(original)).build());
+
+ // This method is called only when a given flow object in datastore
+ // has been updated. So FRM always needs to set strict flag into
+ // update-flow input so that only a flow entry associated with
+ // a given flow object is updated.
+ builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).setStrict(Boolean.TRUE).build());
+ builder.setOriginalFlow((new OriginalFlowBuilder(original)).setStrict(Boolean.TRUE).build());
provider.getSalFlowService().updateFlow(builder.build());
}
assertEquals(1, updateFlowCalls.size());
assertEquals("DOM-1", updateFlowCalls.get(0).getTransactionUri().getValue());
assertEquals(flowII, updateFlowCalls.get(0).getFlowRef().getValue());
+ assertEquals(Boolean.TRUE, updateFlowCalls.get(0).getOriginalFlow().isStrict());
+ assertEquals(Boolean.TRUE, updateFlowCalls.get(0).getUpdatedFlow().isStrict());
forwardingRulesManager.close();
}
assertEquals(1, removeFlowCalls.size());
assertEquals("DOM-1", removeFlowCalls.get(0).getTransactionUri().getValue());
assertEquals(flowII, removeFlowCalls.get(0).getFlowRef().getValue());
+ assertEquals(Boolean.TRUE, removeFlowCalls.get(0).isStrict());
forwardingRulesManager.close();
}
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.InstanceIdentifierBuilder;
import org.slf4j.Logger;
InstanceIdentifierBuilder<Node> builder = ((InstanceIdentifier<Node>) ref.getValue()).builder();
InstanceIdentifierBuilder<FlowCapableNode> augmentation = builder.augmentation(FlowCapableNode.class);
final InstanceIdentifier<FlowCapableNode> path = augmentation.build();
- CheckedFuture readFuture = tx.read(LogicalDatastoreType.OPERATIONAL, path);
- Futures.addCallback(readFuture, new FutureCallback<Optional<? extends DataObject>>() {
+ CheckedFuture<Optional<FlowCapableNode>, ?> readFuture = tx.read(LogicalDatastoreType.OPERATIONAL, path);
+ Futures.addCallback(readFuture, new FutureCallback<Optional<FlowCapableNode>>() {
@Override
- public void onSuccess(Optional<? extends DataObject> optional) {
+ public void onSuccess(Optional<FlowCapableNode> optional) {
enqueueWriteNodeDataTx(node, flowNode, path);
if (!optional.isPresent()) {
enqueuePutTable0Tx(ref);
type uint32;
}
- leaf perc_level {
+ leaf prec_level {
description "Number of drop precedence level to add";
type uint8;
}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-commons</artifactId>
- <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
public static Props props(final ActorRef target){
return Props.create(new Creator<ClientActor>(){
+ private static final long serialVersionUID = 1L;
@Override public ClientActor create() throws Exception {
return new ClientActor(target);
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.japi.Creator;
-
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
-
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
import org.opendaylight.controller.cluster.example.messages.PrintState;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.util.HashMap;
-import java.util.Map;
-
/**
* A sample actor showing how the RaftActor is to be extended
*/
public class ExampleActor extends RaftActor {
- private final Map<String, String> state = new HashMap();
+ private final Map<String, String> state = new HashMap<>();
+ private final DataPersistenceProvider dataPersistenceProvider;
private long persistIdentifier = 1;
- public ExampleActor(String id, Map<String, String> peerAddresses,
- Optional<ConfigParams> configParams) {
+ public ExampleActor(final String id, final Map<String, String> peerAddresses,
+ final Optional<ConfigParams> configParams) {
super(id, peerAddresses, configParams);
+ this.dataPersistenceProvider = new PersistentDataProvider();
}
public static Props props(final String id, final Map<String, String> peerAddresses,
final Optional<ConfigParams> configParams){
return Props.create(new Creator<ExampleActor>(){
+ private static final long serialVersionUID = 1L;
@Override public ExampleActor create() throws Exception {
return new ExampleActor(id, peerAddresses, configParams);
});
}
- @Override public void onReceiveCommand(Object message){
+ @Override public void onReceiveCommand(final Object message) throws Exception{
if(message instanceof KeyValue){
if(isLeader()) {
String persistId = Long.toString(persistIdentifier++);
} else if (message instanceof PrintRole) {
if(LOG.isDebugEnabled()) {
- LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers());
+ String followers = "";
+ if (getRaftState() == RaftState.Leader || getRaftState() == RaftState.IsolatedLeader) {
+ followers = ((Leader)this.getCurrentBehavior()).printFollowerStates();
+ LOG.debug("{} = {}, Peers={}, followers={}", getId(), getRaftState(), getPeers(), followers);
+ } else {
+ LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers());
+ }
+
+
}
} else {
}
}
- @Override protected void applyState(ActorRef clientActor, String identifier,
- Object data) {
+ @Override protected void applyState(final ActorRef clientActor, final String identifier,
+ final Object data) {
if(data instanceof KeyValue){
KeyValue kv = (KeyValue) data;
state.put(kv.getKey(), kv.getValue());
getSelf().tell(new CaptureSnapshotReply(bs), null);
}
- @Override protected void applySnapshot(ByteString snapshot) {
+ @Override protected void applySnapshot(final ByteString snapshot) {
state.clear();
try {
- state.putAll((HashMap) toObject(snapshot));
+ state.putAll((Map<String, String>) toObject(snapshot));
} catch (Exception e) {
LOG.error(e, "Exception in applying snapshot");
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
+ LOG.debug("Snapshot applied to state : {}", ((Map<?, ?>) state).size());
}
}
- private ByteString fromObject(Object snapshot) throws Exception {
+ private ByteString fromObject(final Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
try {
}
}
- private Object toObject(ByteString bs) throws ClassNotFoundException, IOException {
+ private Object toObject(final ByteString bs) throws ClassNotFoundException, IOException {
Object obj = null;
ByteArrayInputStream bis = null;
ObjectInputStream ois = null;
}
- @Override public void onReceiveRecover(Object message) {
+ @Override
+ protected DataPersistenceProvider persistence() {
+ return dataPersistenceProvider;
+ }
+
+ @Override public void onReceiveRecover(final Object message)throws Exception {
super.onReceiveRecover(message);
}
}
@Override
- protected void startLogRecoveryBatch(int maxBatchSize) {
+ protected void startLogRecoveryBatch(final int maxBatchSize) {
}
@Override
- protected void appendRecoveredLogEntry(Payload data) {
+ protected void appendRecoveredLogEntry(final Payload data) {
}
@Override
}
@Override
- protected void applyRecoverySnapshot(ByteString snapshot) {
+ protected void applyRecoverySnapshot(final ByteString snapshot) {
}
}
public class ExampleConfigParamsImpl extends DefaultConfigParamsImpl {
@Override
public long getSnapshotBatchCount() {
- return 50;
+ return 25;
}
@Override
import org.opendaylight.controller.cluster.example.messages.PrintRole;
import org.opendaylight.controller.cluster.example.messages.PrintState;
import org.opendaylight.controller.cluster.raft.ConfigParams;
-import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
import java.io.BufferedReader;
import java.io.InputStreamReader;
* Commands
* bye
* createNodes:{num}
- * addNodes:{num}
* stopNode:{nodeName}
* reinstateNode:{nodeName}
* addClients:{num}
* stopLoggingForClient:{nodeName}
* printNodes
* printState
+ *
+ * Note: when run on IDE and on debug log level, the debug logs in
+ * AbstractUptypedActor and AbstractUptypedPersistentActor would need to be commented out.
+ * Also RaftActor handleCommand(), debug log which prints for every command other than AE/AER
+ *
* @param args
* @throws Exception
*/
int n = Integer.parseInt(arr[1]);
td.createNodes(n);
- } else if (command.startsWith("addNodes")) {
- String[] arr = command.split(":");
- int n = Integer.parseInt(arr[1]);
- td.addNodes(n);
-
} else if (command.startsWith("addClients")) {
String[] arr = command.split(":");
int n = Integer.parseInt(arr[1]);
}
}
- // add new nodes , pass in the count
- public void addNodes(int num) {
- Map<String, String> newPeers = new HashMap<>();
- for (int i=0; i < num; i++) {
- nameCounter = nameCounter + 1;
- newPeers.put("example-"+nameCounter, "akka://default/user/example-"+nameCounter);
- allPeers.put("example-"+nameCounter, "akka://default/user/example-"+nameCounter);
-
- }
- Map<String, ActorRef> newActorRefs = new HashMap<String, ActorRef>(num);
- for (Map.Entry<String, String> entry : newPeers.entrySet()) {
- ActorRef exampleActor = createExampleActor(entry.getKey());
- newActorRefs.put(entry.getKey(), exampleActor);
-
- //now also add these new nodes as peers from the previous nodes
- for (ActorRef actor : actorRefs.values()) {
- actor.tell(new AddRaftPeer(entry.getKey(), entry.getValue()), null);
- }
-
- System.out.println("Added node:" + entry);
- }
-
- actorRefs.putAll(newActorRefs);
- }
-
-
// add num clients to all nodes in the system
public void addClients(int num) {
for(Map.Entry<String,ActorRef> actorRefEntry : actorRefs.entrySet()) {
import java.util.Map;
public class KeyValue extends Payload implements Serializable {
+ private static final long serialVersionUID = 1L;
private String key;
private String value;
import java.io.Serializable;
public class KeyValueSaved implements Serializable {
+ private static final long serialVersionUID = 1L;
}
import java.io.Serializable;
public class PrintRole implements Serializable {
+ private static final long serialVersionUID = 1L;
}
import java.io.Serializable;
public class PrintState implements Serializable {
+ private static final long serialVersionUID = 1L;
}
*/
package org.opendaylight.controller.cluster.raft;
-import com.google.protobuf.ByteString;
-
import java.util.ArrayList;
import java.util.List;
// We define this as ArrayList so we can use ensureCapacity.
protected ArrayList<ReplicatedLogEntry> journal;
- protected ByteString snapshot;
+
protected long snapshotIndex = -1;
protected long snapshotTerm = -1;
// to be used for rollback during save snapshot failure
protected ArrayList<ReplicatedLogEntry> snapshottedJournal;
- protected ByteString previousSnapshot;
protected long previousSnapshotIndex = -1;
protected long previousSnapshotTerm = -1;
- public AbstractReplicatedLogImpl(ByteString state, long snapshotIndex,
+ public AbstractReplicatedLogImpl(long snapshotIndex,
long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries) {
- this.snapshot = state;
this.snapshotIndex = snapshotIndex;
this.snapshotTerm = snapshotTerm;
this.journal = new ArrayList<>(unAppliedEntries);
}
-
public AbstractReplicatedLogImpl() {
- this.snapshot = null;
this.journal = new ArrayList<>();
}
return logEntryIndex <= snapshotIndex && snapshotIndex != -1;
}
- @Override
- public ByteString getSnapshot() {
- return snapshot;
- }
-
@Override
public long getSnapshotIndex() {
return snapshotIndex;
this.snapshotTerm = snapshotTerm;
}
- @Override
- public void setSnapshot(ByteString snapshot) {
- this.snapshot = snapshot;
- }
-
@Override
public void clear(int startIndex, int endIndex) {
journal.subList(startIndex, endIndex).clear();
}
@Override
- public void snapshotPreCommit(ByteString snapshot, long snapshotCapturedIndex, long snapshotCapturedTerm) {
+ public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm) {
snapshottedJournal = new ArrayList<>(journal.size());
snapshottedJournal.addAll(journal.subList(0, (int)(snapshotCapturedIndex - snapshotIndex)));
previousSnapshotTerm = snapshotTerm;
setSnapshotTerm(snapshotCapturedTerm);
-
- previousSnapshot = getSnapshot();
- setSnapshot(snapshot);
}
@Override
snapshottedJournal = null;
previousSnapshotIndex = -1;
previousSnapshotTerm = -1;
- previousSnapshot = null;
}
@Override
snapshotTerm = previousSnapshotTerm;
previousSnapshotTerm = -1;
-
- snapshot = previousSnapshot;
- previousSnapshot = null;
-
}
}
* The number of journal log entries to batch on recovery before applying.
*/
int getJournalRecoveryLogBatchSize();
+
+ /**
+ * The interval in which the leader needs to check itself if its isolated
+ * @return FiniteDuration
+ */
+ FiniteDuration getIsolatedCheckInterval();
}
private FiniteDuration heartBeatInterval = HEART_BEAT_INTERVAL;
private long snapshotBatchCount = SNAPSHOT_BATCH_COUNT;
private int journalRecoveryLogBatchSize = JOURNAL_RECOVERY_LOG_BATCH_SIZE;
+ private FiniteDuration isolatedLeaderCheckInterval =
+ new FiniteDuration(HEART_BEAT_INTERVAL.length() * 1000, HEART_BEAT_INTERVAL.unit());
public void setHeartBeatInterval(FiniteDuration heartBeatInterval) {
this.heartBeatInterval = heartBeatInterval;
this.journalRecoveryLogBatchSize = journalRecoveryLogBatchSize;
}
+ public void setIsolatedLeaderCheckInterval(FiniteDuration isolatedLeaderCheckInterval) {
+ this.isolatedLeaderCheckInterval = isolatedLeaderCheckInterval;
+ }
+
@Override
public long getSnapshotBatchCount() {
return snapshotBatchCount;
public int getJournalRecoveryLogBatchSize() {
return journalRecoveryLogBatchSize;
}
+
+ @Override
+ public FiniteDuration getIsolatedCheckInterval() {
+ return isolatedLeaderCheckInterval;
+ }
}
*/
public AtomicLong getMatchIndex();
+ /**
+ * Checks if the follower is active by comparing the last updated with the duration
+ * @return boolean
+ */
+ public boolean isFollowerActive();
+
+ /**
+ * restarts the timeout clock of the follower
+ */
+ public void markFollowerActive();
+
}
package org.opendaylight.controller.cluster.raft;
+import com.google.common.base.Stopwatch;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
public class FollowerLogInformationImpl implements FollowerLogInformation{
private final AtomicLong matchIndex;
+ private final Stopwatch stopwatch;
+
+ private final long followerTimeoutMillis;
+
public FollowerLogInformationImpl(String id, AtomicLong nextIndex,
- AtomicLong matchIndex) {
+ AtomicLong matchIndex, FiniteDuration followerTimeoutDuration) {
this.id = id;
this.nextIndex = nextIndex;
this.matchIndex = matchIndex;
+ this.stopwatch = new Stopwatch();
+ this.followerTimeoutMillis = followerTimeoutDuration.toMillis();
}
public long incrNextIndex(){
return matchIndex;
}
+ @Override
+ public boolean isFollowerActive() {
+ long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ return (stopwatch.isRunning()) && (elapsed <= followerTimeoutMillis);
+ }
+
+ @Override
+ public void markFollowerActive() {
+ if (stopwatch.isRunning()) {
+ stopwatch.reset();
+ }
+ stopwatch.start();
+ }
}
import akka.persistence.SaveSnapshotSuccess;
import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
-import akka.persistence.UntypedPersistentActor;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Stopwatch;
import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
-import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
+
import java.io.Serializable;
import java.util.Map;
* <li> when a snapshot should be saved </li>
* </ul>
*/
-public abstract class RaftActor extends UntypedPersistentActor {
+public abstract class RaftActor extends AbstractUntypedPersistentActor {
protected final LoggingAdapter LOG =
Logging.getLogger(getContext().system(), this);
public void preStart() throws Exception {
LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
context.getConfigParams().getJournalRecoveryLogBatchSize());
+
super.preStart();
}
@Override
- public void onReceiveRecover(Object message) {
- if (message instanceof SnapshotOffer) {
- onRecoveredSnapshot((SnapshotOffer)message);
- } else if (message instanceof ReplicatedLogEntry) {
- onRecoveredJournalLogEntry((ReplicatedLogEntry)message);
- } else if (message instanceof ApplyLogEntries) {
- onRecoveredApplyLogEntries((ApplyLogEntries)message);
- } else if (message instanceof DeleteEntries) {
- replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
- } else if (message instanceof RecoveryCompleted) {
- onRecoveryCompletedMessage();
+ public void handleRecover(Object message) {
+ if(persistence().isRecoveryApplicable()) {
+ if (message instanceof SnapshotOffer) {
+ onRecoveredSnapshot((SnapshotOffer) message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
+ } else if (message instanceof ApplyLogEntries) {
+ onRecoveredApplyLogEntries((ApplyLogEntries) message);
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ }
+ } else {
+ if (message instanceof RecoveryCompleted) {
+ // Delete all the messages from the akka journal so that we do not end up with consistency issues
+ // Note I am not using the dataPersistenceProvider and directly using the akka api here
+ deleteMessages(lastSequenceNr());
+
+ // Delete all the akka snapshots as they will not be needed
+ deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), scala.Long.MaxValue()));
+
+ onRecoveryComplete();
+ currentBehavior = new Follower(context);
+ onStateChanged();
+ }
}
}
onStateChanged();
}
- @Override public void onReceiveCommand(Object message) {
+ @Override public void handleCommand(Object message) {
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
if(LOG.isDebugEnabled()) {
LOG.debug("Persisting ApplyLogEntries with index={}", ale.getToIndex());
}
- persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
+ persistence().persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
@Override
public void apply(ApplyLogEntries param) throws Exception {
}
SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
LOG.info("SaveSnapshotSuccess received for snapshot");
- context.getReplicatedLog().snapshotCommit();
+ long sequenceNumber = success.metadata().sequenceNr();
- // TODO: Not sure if we want to be this aggressive with trimming stuff
- trimPersistentData(success.metadata().sequenceNr());
+ commitSnapshot(sequenceNumber);
} else if (message instanceof SaveSnapshotFailure) {
SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
context.getReplicatedLog().getSnapshotTerm(),
context.getReplicatedLog().size());
- } else if (message instanceof AddRaftPeer){
-
- // FIXME : Do not add raft peers like this.
- // When adding a new Peer we have to ensure that the a majority of
- // the peers know about the new Peer. Doing it this way may cause
- // a situation where multiple Leaders may emerge
- AddRaftPeer arp = (AddRaftPeer)message;
- context.addToPeers(arp.getName(), arp.getAddress());
-
- } else if (message instanceof RemoveRaftPeer){
-
- RemoveRaftPeer rrp = (RemoveRaftPeer)message;
- context.removePeer(rrp.getName());
-
} else if (message instanceof CaptureSnapshot) {
LOG.info("CaptureSnapshot received by actor");
CaptureSnapshot cs = (CaptureSnapshot)message;
}
public java.util.Set<String> getPeers() {
+
return context.getPeerAddresses().keySet();
}
context.setPeerAddress(peerId, peerAddress);
}
+ protected void commitSnapshot(long sequenceNumber) {
+ context.getReplicatedLog().snapshotCommit();
+ // TODO: Not sure if we want to be this aggressive with trimming stuff
+ trimPersistentData(sequenceNumber);
+ }
/**
* The applyState method will be called by the RaftActor when some data
/**
* This method is called during recovery to append state data to the current batch. This method
- * is called 1 or more times after {@link #startRecoveryStateBatch}.
+ * is called 1 or more times after {@link #startLogRecoveryBatch}.
*
* @param data the state data
*/
/**
* This method is called during recovery at the end of a batch to apply the current batched
- * log entries. This method is called after {@link #appendRecoveryLogEntry}.
+ * log entries. This method is called after {@link #appendRecoveredLogEntry}.
*/
protected abstract void applyCurrentLogRecoveryBatch();
*/
protected abstract void onStateChanged();
+ protected abstract DataPersistenceProvider persistence();
+
protected void onLeaderChanged(String oldLeader, String newLeader){};
private void trimPersistentData(long sequenceNumber) {
// Trim akka snapshots
// FIXME : Not sure how exactly the SnapshotSelectionCriteria is applied
// For now guessing that it is ANDed.
- deleteSnapshots(new SnapshotSelectionCriteria(
+ persistence().deleteSnapshots(new SnapshotSelectionCriteria(
sequenceNumber - context.getConfigParams().getSnapshotBatchCount(), 43200000));
// Trim akka journal
- deleteMessages(sequenceNumber);
+ persistence().deleteMessages(sequenceNumber);
}
private String getLeaderAddress(){
captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
- saveSnapshot(sn);
+ persistence().saveSnapshot(sn);
LOG.info("Persisting of snapshot done:{}", sn.getLogMessage());
//be greedy and remove entries from in-mem journal which are in the snapshot
// and update snapshotIndex and snapshotTerm without waiting for the success,
- context.getReplicatedLog().snapshotPreCommit(stateInBytes,
+ context.getReplicatedLog().snapshotPreCommit(
captureSnapshot.getLastAppliedIndex(),
captureSnapshot.getLastAppliedTerm());
"and term:{}", captureSnapshot.getLastAppliedIndex(),
captureSnapshot.getLastAppliedTerm());
+ if (isLeader() && captureSnapshot.isInstallSnapshotInitiated()) {
+ // this would be call straight to the leader and won't initiate in serialization
+ currentBehavior.handleMessage(getSelf(), new SendInstallSnapshot(stateInBytes));
+ }
+
captureSnapshot = null;
hasSnapshotCaptureInitiated = false;
}
-
private class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
public ReplicatedLogImpl(Snapshot snapshot) {
- super(ByteString.copyFrom(snapshot.getState()),
- snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
+ super(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
snapshot.getUnAppliedEntries());
}
// FIXME: Maybe this should be done after the command is saved
journal.subList(adjustedIndex , journal.size()).clear();
- persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
+ persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
@Override public void apply(DeleteEntries param)
throws Exception {
// persist call and the execution(s) of the associated event
// handler. This also holds for multiple persist calls in context
// of a single command.
- persist(replicatedLogEntry,
+ persistence().persist(replicatedLogEntry,
new Procedure<ReplicatedLogEntry>() {
@Override
public void apply(ReplicatedLogEntry evt) throws Exception {
}
- private static class DeleteEntries implements Serializable {
+ static class DeleteEntries implements Serializable {
+ private static final long serialVersionUID = 1L;
private final int fromIndex;
-
public DeleteEntries(int fromIndex) {
this.fromIndex = fromIndex;
}
public void updateAndPersist(long currentTerm, String votedFor){
update(currentTerm, votedFor);
// FIXME : Maybe first persist then update the state
- persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
+ persistence().persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
@Override public void apply(UpdateElectionTerm param)
throws Exception {
}
}
- private static class UpdateElectionTerm implements Serializable {
+ static class UpdateElectionTerm implements Serializable {
+ private static final long serialVersionUID = 1L;
private final long currentTerm;
private final String votedFor;
}
}
+ protected class NonPersistentRaftDataProvider extends NonPersistentDataProvider {
+
+ public NonPersistentRaftDataProvider(){
+
+ }
+
+ /**
+ * The way snapshotting works is,
+ * <ol>
+ * <li> RaftActor calls createSnapshot on the Shard
+ * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
+ * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save the snapshot.
+ * The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the RaftActor gets SaveSnapshot
+ * success it commits the snapshot to the in-memory journal. This commitSnapshot is mimicking what is done
+ * in SaveSnapshotSuccess.
+ * </ol>
+ * @param o
+ */
+ @Override
+ public void saveSnapshot(Object o) {
+ // Make saving Snapshot successful
+ commitSnapshot(-1L);
+ }
+ }
+
+ @VisibleForTesting
+ void setCurrentBehavior(AbstractRaftActorBehavior behavior) {
+ currentBehavior = behavior;
+ }
+
+ protected RaftActorBehavior getCurrentBehavior() {
+ return currentBehavior;
+ }
}
public enum RaftState {
Candidate,
Follower,
- Leader
+ Leader,
+ IsolatedLeader;
}
package org.opendaylight.controller.cluster.raft;
-import com.google.protobuf.ByteString;
-
import java.util.List;
/**
*/
boolean isInSnapshot(long index);
- /**
- * Get the snapshot
- *
- * @return an object representing the snapshot if it exists. null otherwise
- */
- ByteString getSnapshot();
-
/**
* Get the index of the snapshot
*
*/
public void setSnapshotTerm(long snapshotTerm);
- /**
- * sets the snapshot in bytes
- * @param snapshot
- */
- public void setSnapshot(ByteString snapshot);
-
/**
* Clears the journal entries with startIndex(inclusive) and endIndex (exclusive)
* @param startIndex
/**
* Handles all the bookkeeping in order to perform a rollback in the
* event of SaveSnapshotFailure
- * @param snapshot
* @param snapshotCapturedIndex
* @param snapshotCapturedTerm
*/
- public void snapshotPreCommit(ByteString snapshot,
- long snapshotCapturedIndex, long snapshotCapturedTerm);
+ public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
/**
* Sets the Replicated log to state after snapshot success.
public class ReplicatedLogImplEntry implements ReplicatedLogEntry,
Serializable {
+ private static final long serialVersionUID = 1L;
private final long index;
private final long term;
public class Snapshot implements Serializable {
+ private static final long serialVersionUID = 1L;
private final byte[] state;
private final List<ReplicatedLogEntry> unAppliedEntries;
private final long lastIndex;
*
*/
public class ApplyLogEntries implements Serializable {
+ private static final long serialVersionUID = 1L;
private final int toIndex;
public ApplyLogEntries(int toIndex) {
* Internal message, issued by follower to its actor
*/
public class ApplySnapshot implements Serializable {
+ private static final long serialVersionUID = 1L;
private final Snapshot snapshot;
public ApplySnapshot(Snapshot snapshot) {
import java.io.Serializable;
public class ApplyState implements Serializable {
+ private static final long serialVersionUID = 1L;
private final ActorRef clientActor;
private final String identifier;
private final ReplicatedLogEntry replicatedLogEntry;
private long lastAppliedTerm;
private long lastIndex;
private long lastTerm;
+ private boolean installSnapshotInitiated;
public CaptureSnapshot(long lastIndex, long lastTerm,
long lastAppliedIndex, long lastAppliedTerm) {
+ this(lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm, false);
+ }
+
+ public CaptureSnapshot(long lastIndex, long lastTerm,long lastAppliedIndex,
+ long lastAppliedTerm, boolean installSnapshotInitiated) {
this.lastIndex = lastIndex;
this.lastTerm = lastTerm;
this.lastAppliedIndex = lastAppliedIndex;
this.lastAppliedTerm = lastAppliedTerm;
+ this.installSnapshotInitiated = installSnapshotInitiated;
}
public long getLastAppliedIndex() {
public long getLastTerm() {
return lastTerm;
}
+
+ public boolean isInstallSnapshotInitiated() {
+ return installSnapshotInitiated;
+ }
}
* Message sent to commit an entry to the log
*/
public class CommitEntry implements Serializable {
+ private static final long serialVersionUID = 1L;
}
import java.io.Serializable;
public class ElectionTimeout implements Serializable {
+ private static final long serialVersionUID = 1L;
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+/**
+ * Internal message by Leader to initiate an install snapshot
+ */
+public class InitiateInstallSnapshot {
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+/**
+ * Message sent by the IsolatedLeaderCheck scheduler in the Leader to itself
+ * in order to check if its isolated.
+ */
+public class IsolatedLeaderCheck {
+}
* Message sent to Persist an entry into the transaction journal
*/
public class PersistEntry implements Serializable {
+ private static final long serialVersionUID = 1L;
}
import java.io.Serializable;
public class Replicate implements Serializable {
+ private static final long serialVersionUID = 1L;
private final ActorRef clientActor;
private final String identifier;
private final ReplicatedLogEntry replicatedLogEntry;
* it and use it to save it's state
*/
public class SaveSnapshot implements Serializable {
+ private static final long serialVersionUID = 1L;
}
* Typically the Leader to itself on a schedule
*/
public class SendHeartBeat implements Serializable {
+ private static final long serialVersionUID = 1L;
}
package org.opendaylight.controller.cluster.raft.base.messages;
-import java.io.Serializable;
+import com.google.protobuf.ByteString;
-public class SendInstallSnapshot implements Serializable {
+public class SendInstallSnapshot {
+ private ByteString snapshot;
+
+ public SendInstallSnapshot(ByteString snapshot) {
+ this.snapshot = snapshot;
+ }
+
+ public ByteString getSnapshot() {
+ return snapshot;
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Cancellable;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
+import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
+import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
+import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * The behavior of a RaftActor when it is in the Leader state
+ * <p/>
+ * Leaders:
+ * <ul>
+ * <li> Upon election: send initial empty AppendEntries RPCs
+ * (heartbeat) to each server; repeat during idle periods to
+ * prevent election timeouts (§5.2)
+ * <li> If command received from client: append entry to local log,
+ * respond after entry applied to state machine (§5.3)
+ * <li> If last log index ≥ nextIndex for a follower: send
+ * AppendEntries RPC with log entries starting at nextIndex
+ * <ul>
+ * <li> If successful: update nextIndex and matchIndex for
+ * follower (§5.3)
+ * <li> If AppendEntries fails because of log inconsistency:
+ * decrement nextIndex and retry (§5.3)
+ * </ul>
+ * <li> If there exists an N such that N > commitIndex, a majority
+ * of matchIndex[i] ≥ N, and log[N].term == currentTerm:
+ * set commitIndex = N (§5.3, §5.4).
+ */
+public abstract class AbstractLeader extends AbstractRaftActorBehavior {
+ protected final Map<String, FollowerLogInformation> followerToLog = new HashMap<>();
+ protected final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
+
+ protected final Set<String> followers;
+
+ private Cancellable heartbeatSchedule = null;
+
+ private List<ClientRequestTracker> trackerList = new ArrayList<>();
+
+ protected final int minReplicationCount;
+
+ protected final int minIsolatedLeaderPeerCount;
+
+ private Optional<ByteString> snapshot;
+
+ public AbstractLeader(RaftActorContext context) {
+ super(context);
+
+ followers = context.getPeerAddresses().keySet();
+
+ for (String followerId : followers) {
+ FollowerLogInformation followerLogInformation =
+ new FollowerLogInformationImpl(followerId,
+ new AtomicLong(context.getCommitIndex()),
+ new AtomicLong(-1),
+ context.getConfigParams().getElectionTimeOutInterval());
+
+ followerToLog.put(followerId, followerLogInformation);
+ }
+
+ leaderId = context.getId();
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Election:Leader has following peers: {}", followers);
+ }
+
+ minReplicationCount = getMajorityVoteCount(followers.size());
+
+ // the isolated Leader peer count will be 1 less than the majority vote count.
+ // this is because the vote count has the self vote counted in it
+ // for e.g
+ // 0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
+ // 2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
+ // 4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
+ minIsolatedLeaderPeerCount = minReplicationCount > 0 ? (minReplicationCount - 1) : 0;
+
+ snapshot = Optional.absent();
+
+ // Immediately schedule a heartbeat
+ // Upon election: send initial empty AppendEntries RPCs
+ // (heartbeat) to each server; repeat during idle periods to
+ // prevent election timeouts (§5.2)
+ scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
+ }
+
+ private Optional<ByteString> getSnapshot() {
+ return snapshot;
+ }
+
+ @VisibleForTesting
+ void setSnapshot(Optional<ByteString> snapshot) {
+ this.snapshot = snapshot;
+ }
+
+ @Override
+ protected RaftActorBehavior handleAppendEntries(ActorRef sender,
+ AppendEntries appendEntries) {
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntries.toString());
+ }
+
+ return this;
+ }
+
+ @Override
+ protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
+ AppendEntriesReply appendEntriesReply) {
+
+ if(! appendEntriesReply.isSuccess()) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntriesReply.toString());
+ }
+ }
+
+ // Update the FollowerLogInformation
+ String followerId = appendEntriesReply.getFollowerId();
+ FollowerLogInformation followerLogInformation =
+ followerToLog.get(followerId);
+
+ if(followerLogInformation == null){
+ LOG.error("Unknown follower {}", followerId);
+ return this;
+ }
+
+ followerLogInformation.markFollowerActive();
+
+ if (appendEntriesReply.isSuccess()) {
+ followerLogInformation
+ .setMatchIndex(appendEntriesReply.getLogLastIndex());
+ followerLogInformation
+ .setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
+ } else {
+
+ // TODO: When we find that the follower is out of sync with the
+ // Leader we simply decrement that followers next index by 1.
+ // Would it be possible to do better than this? The RAFT spec
+ // does not explicitly deal with it but may be something for us to
+ // think about
+
+ followerLogInformation.decrNextIndex();
+ }
+
+ // Now figure out if this reply warrants a change in the commitIndex
+ // If there exists an N such that N > commitIndex, a majority
+ // of matchIndex[i] ≥ N, and log[N].term == currentTerm:
+ // set commitIndex = N (§5.3, §5.4).
+ for (long N = context.getCommitIndex() + 1; ; N++) {
+ int replicatedCount = 1;
+
+ for (FollowerLogInformation info : followerToLog.values()) {
+ if (info.getMatchIndex().get() >= N) {
+ replicatedCount++;
+ }
+ }
+
+ if (replicatedCount >= minReplicationCount) {
+ ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(N);
+ if (replicatedLogEntry != null &&
+ replicatedLogEntry.getTerm() == currentTerm()) {
+ context.setCommitIndex(N);
+ }
+ } else {
+ break;
+ }
+ }
+
+ // Apply the change to the state machine
+ if (context.getCommitIndex() > context.getLastApplied()) {
+ applyLogToStateMachine(context.getCommitIndex());
+ }
+
+ return this;
+ }
+
+ protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+
+ ClientRequestTracker toRemove = findClientRequestTracker(logIndex);
+ if(toRemove != null) {
+ trackerList.remove(toRemove);
+ }
+
+ return toRemove;
+ }
+
+ protected ClientRequestTracker findClientRequestTracker(long logIndex) {
+ for (ClientRequestTracker tracker : trackerList) {
+ if (tracker.getIndex() == logIndex) {
+ return tracker;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
+ RequestVoteReply requestVoteReply) {
+ return this;
+ }
+
+ @Override
+ public RaftState state() {
+ return RaftState.Leader;
+ }
+
+ @Override
+ public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
+ Preconditions.checkNotNull(sender, "sender should not be null");
+
+ Object message = fromSerializableMessage(originalMessage);
+
+ if (message instanceof RaftRPC) {
+ RaftRPC rpc = (RaftRPC) message;
+ // If RPC request or response contains term T > currentTerm:
+ // set currentTerm = T, convert to follower (§5.1)
+ // This applies to all RPC messages and responses
+ if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+ context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+
+ return switchBehavior(new Follower(context));
+ }
+ }
+
+ try {
+ if (message instanceof SendHeartBeat) {
+ sendHeartBeat();
+ return this;
+
+ } else if(message instanceof InitiateInstallSnapshot) {
+ installSnapshotIfNeeded();
+
+ } else if(message instanceof SendInstallSnapshot) {
+ // received from RaftActor
+ setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
+ sendInstallSnapshot();
+
+ } else if (message instanceof Replicate) {
+ replicate((Replicate) message);
+
+ } else if (message instanceof InstallSnapshotReply){
+ handleInstallSnapshotReply((InstallSnapshotReply) message);
+
+ }
+ } finally {
+ scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
+ }
+
+ return super.handleMessage(sender, message);
+ }
+
+ private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
+ String followerId = reply.getFollowerId();
+ FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+ FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
+ followerLogInformation.markFollowerActive();
+
+ if (followerToSnapshot != null &&
+ followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
+
+ if (reply.isSuccess()) {
+ if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
+ //this was the last chunk reply
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("InstallSnapshotReply received, " +
+ "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
+ reply.getChunkIndex(), followerId,
+ context.getReplicatedLog().getSnapshotIndex() + 1
+ );
+ }
+
+ followerLogInformation.setMatchIndex(
+ context.getReplicatedLog().getSnapshotIndex());
+ followerLogInformation.setNextIndex(
+ context.getReplicatedLog().getSnapshotIndex() + 1);
+ mapFollowerToSnapshot.remove(followerId);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" +
+ followerToLog.get(followerId).getNextIndex().get());
+ }
+
+ if (mapFollowerToSnapshot.isEmpty()) {
+ // once there are no pending followers receiving snapshots
+ // we can remove snapshot from the memory
+ setSnapshot(Optional.<ByteString>absent());
+ }
+
+ } else {
+ followerToSnapshot.markSendStatus(true);
+ }
+ } else {
+ LOG.info("InstallSnapshotReply received, " +
+ "sending snapshot chunk failed, Will retry, Chunk:{}",
+ reply.getChunkIndex()
+ );
+ followerToSnapshot.markSendStatus(false);
+ }
+
+ } else {
+ LOG.error("ERROR!!" +
+ "FollowerId in InstallSnapshotReply not known to Leader" +
+ " or Chunk Index in InstallSnapshotReply not matching {} != {}",
+ followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
+ );
+ }
+ }
+
+ private void replicate(Replicate replicate) {
+ long logIndex = replicate.getReplicatedLogEntry().getIndex();
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Replicate message {}", logIndex);
+ }
+
+ // Create a tracker entry we will use this later to notify the
+ // client actor
+ trackerList.add(
+ new ClientRequestTrackerImpl(replicate.getClientActor(),
+ replicate.getIdentifier(),
+ logIndex)
+ );
+
+ if (followers.size() == 0) {
+ context.setCommitIndex(logIndex);
+ applyLogToStateMachine(logIndex);
+ } else {
+ sendAppendEntries();
+ }
+ }
+
+ private void sendAppendEntries() {
+ // Send an AppendEntries to all followers
+ for (String followerId : followers) {
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+
+ if (followerActor != null) {
+ FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
+ long followerNextIndex = followerLogInformation.getNextIndex().get();
+ boolean isFollowerActive = followerLogInformation.isFollowerActive();
+ List<ReplicatedLogEntry> entries = null;
+
+ if (mapFollowerToSnapshot.get(followerId) != null) {
+ // if install snapshot is in process , then sent next chunk if possible
+ if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
+ sendSnapshotChunk(followerActor, followerId);
+ } else {
+ // we send a heartbeat even if we have not received a reply for the last chunk
+ sendAppendEntriesToFollower(followerActor, followerNextIndex,
+ Collections.<ReplicatedLogEntry>emptyList());
+ }
+
+ } else {
+ long leaderLastIndex = context.getReplicatedLog().lastIndex();
+ long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
+
+ if (isFollowerActive &&
+ context.getReplicatedLog().isPresent(followerNextIndex)) {
+ // FIXME : Sending one entry at a time
+ entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
+
+ } else if (isFollowerActive && followerNextIndex >= 0 &&
+ leaderLastIndex >= followerNextIndex ) {
+ // if the followers next index is not present in the leaders log, and
+ // if the follower is just not starting and if leader's index is more than followers index
+ // then snapshot should be sent
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("InitiateInstallSnapshot to follower:{}," +
+ "follower-nextIndex:{}, leader-snapshot-index:{}, " +
+ "leader-last-index:{}", followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+ );
+ }
+ actor().tell(new InitiateInstallSnapshot(), actor());
+
+ // we would want to sent AE as the capture snapshot might take time
+ entries = Collections.<ReplicatedLogEntry>emptyList();
+
+ } else {
+ //we send an AppendEntries, even if the follower is inactive
+ // in-order to update the followers timestamp, in case it becomes active again
+ entries = Collections.<ReplicatedLogEntry>emptyList();
+ }
+
+ sendAppendEntriesToFollower(followerActor, followerNextIndex, entries);
+
+ }
+ }
+ }
+ }
+
+ private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
+ List<ReplicatedLogEntry> entries) {
+ followerActor.tell(
+ new AppendEntries(currentTerm(), context.getId(),
+ prevLogIndex(followerNextIndex),
+ prevLogTerm(followerNextIndex), entries,
+ context.getCommitIndex()).toSerializable(),
+ actor()
+ );
+ }
+
+ /**
+ * An installSnapshot is scheduled at a interval that is a multiple of
+ * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
+ * snapshots at every heartbeat.
+ *
+ * Install Snapshot works as follows
+ * 1. Leader sends a InitiateInstallSnapshot message to self
+ * 2. Leader then initiates the capture snapshot by sending a CaptureSnapshot message to actor
+ * 3. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
+ * and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
+ * 4. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
+ * 5. On complete, Follower sends back a InstallSnapshotReply.
+ * 6. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
+ * and replenishes the memory by deleting the snapshot in Replicated log.
+ *
+ */
+ private void installSnapshotIfNeeded() {
+ for (String followerId : followers) {
+ ActorSelection followerActor =
+ context.getPeerActorSelection(followerId);
+
+ if(followerActor != null) {
+ FollowerLogInformation followerLogInformation =
+ followerToLog.get(followerId);
+
+ long nextIndex = followerLogInformation.getNextIndex().get();
+
+ if (!context.getReplicatedLog().isPresent(nextIndex) &&
+ context.getReplicatedLog().isInSnapshot(nextIndex)) {
+ LOG.info("{} follower needs a snapshot install", followerId);
+ if (snapshot.isPresent()) {
+ // if a snapshot is present in the memory, most likely another install is in progress
+ // no need to capture snapshot
+ sendSnapshotChunk(followerActor, followerId);
+
+ } else {
+ initiateCaptureSnapshot();
+ //we just need 1 follower who would need snapshot to be installed.
+ // when we have the snapshot captured, we would again check (in SendInstallSnapshot)
+ // who needs an install and send to all who need
+ break;
+ }
+
+ }
+ }
+ }
+ }
+
+ // on every install snapshot, we try to capture the snapshot.
+ // Once a capture is going on, another one issued will get ignored by RaftActor.
+ private void initiateCaptureSnapshot() {
+ LOG.info("Initiating Snapshot Capture to Install Snapshot, Leader:{}", getLeaderId());
+ ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
+ long lastAppliedIndex = -1;
+ long lastAppliedTerm = -1;
+
+ if (lastAppliedEntry != null) {
+ lastAppliedIndex = lastAppliedEntry.getIndex();
+ lastAppliedTerm = lastAppliedEntry.getTerm();
+ } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
+ lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
+ lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
+ }
+
+ boolean isInstallSnapshotInitiated = true;
+ actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
+ lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
+ actor());
+ }
+
+
+ private void sendInstallSnapshot() {
+ for (String followerId : followers) {
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
+
+ if(followerActor != null) {
+ FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
+ long nextIndex = followerLogInformation.getNextIndex().get();
+
+ if (!context.getReplicatedLog().isPresent(nextIndex) &&
+ context.getReplicatedLog().isInSnapshot(nextIndex)) {
+ sendSnapshotChunk(followerActor, followerId);
+ }
+ }
+ }
+ }
+
+ /**
+ * Sends a snapshot chunk to a given follower
+ * InstallSnapshot should qualify as a heartbeat too.
+ */
+ private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
+ try {
+ if (snapshot.isPresent()) {
+ followerActor.tell(
+ new InstallSnapshot(currentTerm(), context.getId(),
+ context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm(),
+ getNextSnapshotChunk(followerId,snapshot.get()),
+ mapFollowerToSnapshot.get(followerId).incrementChunkIndex(),
+ mapFollowerToSnapshot.get(followerId).getTotalChunks()
+ ).toSerializable(),
+ actor()
+ );
+ LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
+ followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
+ mapFollowerToSnapshot.get(followerId).getTotalChunks());
+ }
+ } catch (IOException e) {
+ LOG.error(e, "InstallSnapshot failed for Leader.");
+ }
+ }
+
+ /**
+ * Acccepts snaphot as ByteString, enters into map for future chunks
+ * creates and return a ByteString chunk
+ */
+ private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
+ FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+ if (followerToSnapshot == null) {
+ followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
+ mapFollowerToSnapshot.put(followerId, followerToSnapshot);
+ }
+ ByteString nextChunk = followerToSnapshot.getNextChunk();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+ }
+ return nextChunk;
+ }
+
+ private void sendHeartBeat() {
+ if (followers.size() > 0) {
+ sendAppendEntries();
+ }
+ }
+
+ private void stopHeartBeat() {
+ if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
+ heartbeatSchedule.cancel();
+ }
+ }
+
+ private void scheduleHeartBeat(FiniteDuration interval) {
+ if(followers.size() == 0){
+ // Optimization - do not bother scheduling a heartbeat as there are
+ // no followers
+ return;
+ }
+
+ stopHeartBeat();
+
+ // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
+ // message is sent to itself.
+ // Scheduling the heartbeat only once here because heartbeats do not
+ // need to be sent if there are other messages being sent to the remote
+ // actor.
+ heartbeatSchedule = context.getActorSystem().scheduler().scheduleOnce(
+ interval, context.getActor(), new SendHeartBeat(),
+ context.getActorSystem().dispatcher(), context.getActor());
+ }
+
+ @Override
+ public void close() throws Exception {
+ stopHeartBeat();
+ }
+
+ @Override
+ public String getLeaderId() {
+ return context.getId();
+ }
+
+ protected boolean isLeaderIsolated() {
+ int minPresent = minIsolatedLeaderPeerCount;
+ for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
+ if (followerLogInformation.isFollowerActive()) {
+ --minPresent;
+ if (minPresent == 0) {
+ break;
+ }
+ }
+ }
+ return (minPresent != 0);
+ }
+
+ /**
+ * Encapsulates the snapshot bytestring and handles the logic of sending
+ * snapshot chunks
+ */
+ protected class FollowerToSnapshot {
+ private ByteString snapshotBytes;
+ private int offset = 0;
+ // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
+ private int replyReceivedForOffset;
+ // if replyStatus is false, the previous chunk is attempted
+ private boolean replyStatus = false;
+ private int chunkIndex;
+ private int totalChunks;
+
+ public FollowerToSnapshot(ByteString snapshotBytes) {
+ this.snapshotBytes = snapshotBytes;
+ replyReceivedForOffset = -1;
+ chunkIndex = 1;
+ int size = snapshotBytes.size();
+ totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
+ ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Snapshot {} bytes, total chunks to send:{}",
+ size, totalChunks);
+ }
+ }
+
+ public ByteString getSnapshotBytes() {
+ return snapshotBytes;
+ }
+
+ public int incrementOffset() {
+ if(replyStatus) {
+ // if prev chunk failed, we would want to sent the same chunk again
+ offset = offset + context.getConfigParams().getSnapshotChunkSize();
+ }
+ return offset;
+ }
+
+ public int incrementChunkIndex() {
+ if (replyStatus) {
+ // if prev chunk failed, we would want to sent the same chunk again
+ chunkIndex = chunkIndex + 1;
+ }
+ return chunkIndex;
+ }
+
+ public int getChunkIndex() {
+ return chunkIndex;
+ }
+
+ public int getTotalChunks() {
+ return totalChunks;
+ }
+
+ public boolean canSendNextChunk() {
+ // we only send a false if a chunk is sent but we have not received a reply yet
+ return replyReceivedForOffset == offset;
+ }
+
+ public boolean isLastChunk(int chunkIndex) {
+ return totalChunks == chunkIndex;
+ }
+
+ public void markSendStatus(boolean success) {
+ if (success) {
+ // if the chunk sent was successful
+ replyReceivedForOffset = offset;
+ replyStatus = true;
+ } else {
+ // if the chunk sent was failure
+ replyReceivedForOffset = offset;
+ replyStatus = false;
+ }
+ }
+
+ public ByteString getNextChunk() {
+ int snapshotLength = getSnapshotBytes().size();
+ int start = incrementOffset();
+ int size = context.getConfigParams().getSnapshotChunkSize();
+ if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
+ size = snapshotLength;
+ } else {
+ if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
+ size = snapshotLength - start;
+ }
+ }
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("length={}, offset={},size={}",
+ snapshotLength, start, size);
+ }
+ return getSnapshotBytes().substring(start, start + size);
+
+ }
+ }
+
+ // called from example-actor for printing the follower-states
+ public String printFollowerStates() {
+ StringBuilder sb = new StringBuilder();
+ for(FollowerLogInformation followerLogInformation : followerToLog.values()) {
+ boolean isFollowerActive = followerLogInformation.isFollowerActive();
+ sb.append("{"+followerLogInformation.getId() + " state:" + isFollowerActive + "},");
+
+ }
+ return "[" + sb.toString() + "]";
+ }
+
+ @VisibleForTesting
+ void markFollowerActive(String followerId) {
+ followerToLog.get(followerId).markFollowerActive();
+ }
+}
}
protected RaftActorBehavior switchBehavior(RaftActorBehavior behavior) {
- LOG.info("Switching from behavior {} to {}", this.state(), behavior.state());
+ LOG.info("{} :- Switching from behavior {} to {}", context.getId(), this.state(), behavior.state());
try {
close();
} catch (Exception e) {
return behavior;
}
+
+ protected int getMajorityVoteCount(int numPeers) {
+ // Votes are required from a majority of the peers including self.
+ // The numMajority field therefore stores a calculated value
+ // of the number of votes required for this candidate to win an
+ // election based on it's known peers.
+ // If a peer was added during normal operation and raft replicas
+ // came to know about them then the new peer would also need to be
+ // taken into consideration when calculating this value.
+ // Here are some examples for what the numMajority would be for n
+ // peers
+ // 0 peers = 1 numMajority -: (0 + 1) / 2 + 1 = 1
+ // 2 peers = 2 numMajority -: (2 + 1) / 2 + 1 = 2
+ // 4 peers = 3 numMajority -: (4 + 1) / 2 + 1 = 3
+
+ int numMajority = 0;
+ if (numPeers > 0) {
+ int self = 1;
+ numMajority = (numPeers + self) / 2 + 1;
+ }
+ return numMajority;
+
+ }
}
LOG.debug("Election:Candidate has following peers: {}", peers);
}
- if(peers.size() > 0) {
- // Votes are required from a majority of the peers including self.
- // The votesRequired field therefore stores a calculated value
- // of the number of votes required for this candidate to win an
- // election based on it's known peers.
- // If a peer was added during normal operation and raft replicas
- // came to know about them then the new peer would also need to be
- // taken into consideration when calculating this value.
- // Here are some examples for what the votesRequired would be for n
- // peers
- // 0 peers = 1 votesRequired (0 + 1) / 2 + 1 = 1
- // 2 peers = 2 votesRequired (2 + 1) / 2 + 1 = 2
- // 4 peers = 3 votesRequired (4 + 1) / 2 + 1 = 3
- int noOfPeers = peers.size();
- int self = 1;
- votesRequired = (noOfPeers + self) / 2 + 1;
- } else {
- votesRequired = 0;
- }
+ votesRequired = getMajorityVoteCount(peers.size());
startNewTerm();
scheduleElection(electionDuration());
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+
+/**
+ * Leader which is termed as isolated.
+ * <p/>
+ * If the reply from the majority of the followers is not received then the leader changes its behavior
+ * to IsolatedLeader. An isolated leader may have followers and they would continue to receive replicated messages.
+ * <p/>
+ * A schedule is run, at an interval of (10 * Heartbeat-time-interval), in the Leader
+ * to check if its isolated or not.
+ * <p/>
+ * In the Isolated Leader , on every AppendEntriesReply, we aggressively check if the leader is isolated.
+ * If no, then the state is switched back to Leader.
+ *
+ */
+public class IsolatedLeader extends AbstractLeader {
+ public IsolatedLeader(RaftActorContext context) {
+ super(context);
+ }
+
+ // we received an Append Entries reply, we should switch the Behavior to Leader
+ @Override
+ protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
+ AppendEntriesReply appendEntriesReply) {
+ RaftActorBehavior ret = super.handleAppendEntriesReply(sender, appendEntriesReply);
+
+ // it can happen that this isolated leader interacts with a new leader in the cluster and
+ // changes its state to Follower, hence we only need to switch to Leader if the state is still Isolated
+ if (ret.state() == RaftState.IsolatedLeader && !isLeaderIsolated()) {
+ LOG.info("IsolatedLeader {} switching from IsolatedLeader to Leader", leaderId);
+ return switchBehavior(new Leader(context));
+ }
+ return ret;
+ }
+
+ @Override
+ public RaftState state() {
+ return RaftState.IsolatedLeader;
+ }
+}
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
import akka.actor.Cancellable;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import com.google.protobuf.ByteString;
-import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
-import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
-import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
-import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
-import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
-import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
-import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
-import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
-import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
-import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.IsolatedLeaderCheck;
import scala.concurrent.duration.FiniteDuration;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
/**
* The behavior of a RaftActor when it is in the Leader state
* <p/>
* of matchIndex[i] ≥ N, and log[N].term == currentTerm:
* set commitIndex = N (§5.3, §5.4).
*/
-public class Leader extends AbstractRaftActorBehavior {
-
-
- protected final Map<String, FollowerLogInformation> followerToLog =
- new HashMap();
- protected final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
-
- private final Set<String> followers;
-
- private Cancellable heartbeatSchedule = null;
- private Cancellable appendEntriesSchedule = null;
+public class Leader extends AbstractLeader {
private Cancellable installSnapshotSchedule = null;
-
- private List<ClientRequestTracker> trackerList = new ArrayList<>();
-
- private final int minReplicationCount;
+ private Cancellable isolatedLeaderCheckSchedule = null;
public Leader(RaftActorContext context) {
super(context);
- followers = context.getPeerAddresses().keySet();
-
- for (String followerId : followers) {
- FollowerLogInformation followerLogInformation =
- new FollowerLogInformationImpl(followerId,
- new AtomicLong(context.getCommitIndex()),
- new AtomicLong(-1));
-
- followerToLog.put(followerId, followerLogInformation);
- }
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Election:Leader has following peers: {}", followers);
- }
-
- if (followers.size() > 0) {
- minReplicationCount = (followers.size() + 1) / 2 + 1;
- } else {
- minReplicationCount = 0;
- }
-
-
- // Immediately schedule a heartbeat
- // Upon election: send initial empty AppendEntries RPCs
- // (heartbeat) to each server; repeat during idle periods to
- // prevent election timeouts (§5.2)
- scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
-
- scheduleInstallSnapshotCheck(
- new FiniteDuration(context.getConfigParams().getHeartBeatInterval().length() * 1000,
- context.getConfigParams().getHeartBeatInterval().unit())
- );
-
- }
-
- @Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
- AppendEntries appendEntries) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(appendEntries.toString());
- }
-
- return this;
- }
-
- @Override protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
- AppendEntriesReply appendEntriesReply) {
-
- if(! appendEntriesReply.isSuccess()) {
- if(LOG.isDebugEnabled()) {
- LOG.debug(appendEntriesReply.toString());
- }
- }
-
- // Update the FollowerLogInformation
- String followerId = appendEntriesReply.getFollowerId();
- FollowerLogInformation followerLogInformation =
- followerToLog.get(followerId);
-
- if(followerLogInformation == null){
- LOG.error("Unknown follower {}", followerId);
- return this;
- }
-
- if (appendEntriesReply.isSuccess()) {
- followerLogInformation
- .setMatchIndex(appendEntriesReply.getLogLastIndex());
- followerLogInformation
- .setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
- } else {
-
- // TODO: When we find that the follower is out of sync with the
- // Leader we simply decrement that followers next index by 1.
- // Would it be possible to do better than this? The RAFT spec
- // does not explicitly deal with it but may be something for us to
- // think about
-
- followerLogInformation.decrNextIndex();
- }
-
- // Now figure out if this reply warrants a change in the commitIndex
- // If there exists an N such that N > commitIndex, a majority
- // of matchIndex[i] ≥ N, and log[N].term == currentTerm:
- // set commitIndex = N (§5.3, §5.4).
- for (long N = context.getCommitIndex() + 1; ; N++) {
- int replicatedCount = 1;
-
- for (FollowerLogInformation info : followerToLog.values()) {
- if (info.getMatchIndex().get() >= N) {
- replicatedCount++;
- }
- }
-
- if (replicatedCount >= minReplicationCount) {
- ReplicatedLogEntry replicatedLogEntry =
- context.getReplicatedLog().get(N);
- if (replicatedLogEntry != null
- && replicatedLogEntry.getTerm()
- == currentTerm()) {
- context.setCommitIndex(N);
- }
- } else {
- break;
- }
- }
-
- // Apply the change to the state machine
- if (context.getCommitIndex() > context.getLastApplied()) {
- applyLogToStateMachine(context.getCommitIndex());
- }
-
- return this;
- }
-
- protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
-
- ClientRequestTracker toRemove = findClientRequestTracker(logIndex);
- if(toRemove != null) {
- trackerList.remove(toRemove);
- }
-
- return toRemove;
- }
-
- protected ClientRequestTracker findClientRequestTracker(long logIndex) {
- for (ClientRequestTracker tracker : trackerList) {
- if (tracker.getIndex() == logIndex) {
- return tracker;
- }
- }
-
- return null;
- }
-
- @Override protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
- RequestVoteReply requestVoteReply) {
- return this;
- }
+ scheduleInstallSnapshotCheck(context.getConfigParams().getIsolatedCheckInterval());
- @Override public RaftState state() {
- return RaftState.Leader;
+ scheduleIsolatedLeaderCheck(
+ new FiniteDuration(context.getConfigParams().getHeartBeatInterval().length() * 10,
+ context.getConfigParams().getHeartBeatInterval().unit()));
}
@Override public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Preconditions.checkNotNull(sender, "sender should not be null");
- Object message = fromSerializableMessage(originalMessage);
-
- if (message instanceof RaftRPC) {
- RaftRPC rpc = (RaftRPC) message;
- // If RPC request or response contains term T > currentTerm:
- // set currentTerm = T, convert to follower (§5.1)
- // This applies to all RPC messages and responses
- if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
- context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
-
- return switchBehavior(new Follower(context));
- }
- }
-
- try {
- if (message instanceof SendHeartBeat) {
- sendHeartBeat();
- return this;
- } else if(message instanceof SendInstallSnapshot) {
- installSnapshotIfNeeded();
- } else if (message instanceof Replicate) {
- replicate((Replicate) message);
- } else if (message instanceof InstallSnapshotReply){
- handleInstallSnapshotReply(
- (InstallSnapshotReply) message);
- }
- } finally {
- scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
- }
-
- return super.handleMessage(sender, message);
- }
-
- private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
- String followerId = reply.getFollowerId();
- FollowerToSnapshot followerToSnapshot =
- mapFollowerToSnapshot.get(followerId);
-
- if (followerToSnapshot != null &&
- followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
-
- if (reply.isSuccess()) {
- if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
- //this was the last chunk reply
- if(LOG.isDebugEnabled()) {
- LOG.debug("InstallSnapshotReply received, " +
- "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
- reply.getChunkIndex(), followerId,
- context.getReplicatedLog().getSnapshotIndex() + 1
- );
- }
-
- FollowerLogInformation followerLogInformation =
- followerToLog.get(followerId);
- followerLogInformation.setMatchIndex(
- context.getReplicatedLog().getSnapshotIndex());
- followerLogInformation.setNextIndex(
- context.getReplicatedLog().getSnapshotIndex() + 1);
- mapFollowerToSnapshot.remove(followerId);
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" +
- followerToLog.get(followerId).getNextIndex().get());
- }
-
- } else {
- followerToSnapshot.markSendStatus(true);
- }
- } else {
- LOG.info("InstallSnapshotReply received, " +
- "sending snapshot chunk failed, Will retry, Chunk:{}",
- reply.getChunkIndex()
- );
- followerToSnapshot.markSendStatus(false);
+ if (originalMessage instanceof IsolatedLeaderCheck) {
+ if (isLeaderIsolated()) {
+ LOG.info("At least {} followers need to be active, Switching {} from Leader to IsolatedLeader",
+ minIsolatedLeaderPeerCount, leaderId);
+ return switchBehavior(new IsolatedLeader(context));
}
-
- } else {
- LOG.error("ERROR!!" +
- "FollowerId in InstallSnapshotReply not known to Leader" +
- " or Chunk Index in InstallSnapshotReply not matching {} != {}",
- followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
- );
- }
- }
-
- private void replicate(Replicate replicate) {
- long logIndex = replicate.getReplicatedLogEntry().getIndex();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Replicate message {}", logIndex);
}
- // Create a tracker entry we will use this later to notify the
- // client actor
- trackerList.add(
- new ClientRequestTrackerImpl(replicate.getClientActor(),
- replicate.getIdentifier(),
- logIndex)
- );
-
- if (followers.size() == 0) {
- context.setCommitIndex(logIndex);
- applyLogToStateMachine(logIndex);
- } else {
- sendAppendEntries();
- }
+ return super.handleMessage(sender, originalMessage);
}
- private void sendAppendEntries() {
- // Send an AppendEntries to all followers
- for (String followerId : followers) {
- ActorSelection followerActor = context.getPeerActorSelection(followerId);
-
- if (followerActor != null) {
- FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
- long followerNextIndex = followerLogInformation.getNextIndex().get();
- List<ReplicatedLogEntry> entries = Collections.emptyList();
-
- if (mapFollowerToSnapshot.get(followerId) != null) {
- if (mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
- sendSnapshotChunk(followerActor, followerId);
- }
-
- } else {
-
- if (context.getReplicatedLog().isPresent(followerNextIndex)) {
- // FIXME : Sending one entry at a time
- entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
-
- followerActor.tell(
- new AppendEntries(currentTerm(), context.getId(),
- prevLogIndex(followerNextIndex),
- prevLogTerm(followerNextIndex), entries,
- context.getCommitIndex()).toSerializable(),
- actor()
- );
-
- } else {
- // if the followers next index is not present in the leaders log, then snapshot should be sent
- long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
- long leaderLastIndex = context.getReplicatedLog().lastIndex();
- if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) {
- // if the follower is just not starting and leader's index
- // is more than followers index
- if(LOG.isDebugEnabled()) {
- LOG.debug("SendInstallSnapshot to follower:{}," +
- "follower-nextIndex:{}, leader-snapshot-index:{}, " +
- "leader-last-index:{}", followerId,
- followerNextIndex, leaderSnapShotIndex, leaderLastIndex
- );
- }
-
- actor().tell(new SendInstallSnapshot(), actor());
- } else {
- followerActor.tell(
- new AppendEntries(currentTerm(), context.getId(),
- prevLogIndex(followerNextIndex),
- prevLogTerm(followerNextIndex), entries,
- context.getCommitIndex()).toSerializable(),
- actor()
- );
- }
- }
- }
- }
- }
- }
-
- /**
- * An installSnapshot is scheduled at a interval that is a multiple of
- * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
- * snapshots at every heartbeat.
- */
- private void installSnapshotIfNeeded(){
- for (String followerId : followers) {
- ActorSelection followerActor =
- context.getPeerActorSelection(followerId);
-
- if(followerActor != null) {
- FollowerLogInformation followerLogInformation =
- followerToLog.get(followerId);
-
- long nextIndex = followerLogInformation.getNextIndex().get();
-
- if (!context.getReplicatedLog().isPresent(nextIndex) &&
- context.getReplicatedLog().isInSnapshot(nextIndex)) {
- sendSnapshotChunk(followerActor, followerId);
- }
- }
- }
- }
-
- /**
- * Sends a snapshot chunk to a given follower
- * InstallSnapshot should qualify as a heartbeat too.
- */
- private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
- try {
- followerActor.tell(
- new InstallSnapshot(currentTerm(), context.getId(),
- context.getReplicatedLog().getSnapshotIndex(),
- context.getReplicatedLog().getSnapshotTerm(),
- getNextSnapshotChunk(followerId,
- context.getReplicatedLog().getSnapshot()),
- mapFollowerToSnapshot.get(followerId).incrementChunkIndex(),
- mapFollowerToSnapshot.get(followerId).getTotalChunks()
- ).toSerializable(),
- actor()
- );
- LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
- followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
- mapFollowerToSnapshot.get(followerId).getTotalChunks());
- } catch (IOException e) {
- LOG.error(e, "InstallSnapshot failed for Leader.");
- }
- }
-
- /**
- * Acccepts snaphot as ByteString, enters into map for future chunks
- * creates and return a ByteString chunk
- */
- private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
- FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
- if (followerToSnapshot == null) {
- followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
- mapFollowerToSnapshot.put(followerId, followerToSnapshot);
- }
- ByteString nextChunk = followerToSnapshot.getNextChunk();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
- }
-
- return nextChunk;
- }
-
- private void sendHeartBeat() {
- if (followers.size() > 0) {
- sendAppendEntries();
- }
- }
-
- private void stopHeartBeat() {
- if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
- heartbeatSchedule.cancel();
- }
- }
-
- private void stopInstallSnapshotSchedule() {
+ protected void stopInstallSnapshotSchedule() {
if (installSnapshotSchedule != null && !installSnapshotSchedule.isCancelled()) {
installSnapshotSchedule.cancel();
}
}
- private void scheduleHeartBeat(FiniteDuration interval) {
- if(followers.size() == 0){
- // Optimization - do not bother scheduling a heartbeat as there are
- // no followers
- return;
- }
-
- stopHeartBeat();
-
- // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
- // message is sent to itself.
- // Scheduling the heartbeat only once here because heartbeats do not
- // need to be sent if there are other messages being sent to the remote
- // actor.
- heartbeatSchedule =
- context.getActorSystem().scheduler().scheduleOnce(
- interval,
- context.getActor(), new SendHeartBeat(),
- context.getActorSystem().dispatcher(), context.getActor());
- }
-
-
- private void scheduleInstallSnapshotCheck(FiniteDuration interval) {
+ protected void scheduleInstallSnapshotCheck(FiniteDuration interval) {
if(followers.size() == 0){
// Optimization - do not bother scheduling a heartbeat as there are
// no followers
installSnapshotSchedule =
context.getActorSystem().scheduler().scheduleOnce(
interval,
- context.getActor(), new SendInstallSnapshot(),
+ context.getActor(), new InitiateInstallSnapshot(),
context.getActorSystem().dispatcher(), context.getActor());
}
-
-
- @Override public void close() throws Exception {
- stopHeartBeat();
+ protected void stopIsolatedLeaderCheckSchedule() {
+ if (isolatedLeaderCheckSchedule != null && !isolatedLeaderCheckSchedule.isCancelled()) {
+ isolatedLeaderCheckSchedule.cancel();
+ }
}
- @Override public String getLeaderId() {
- return context.getId();
+ protected void scheduleIsolatedLeaderCheck(FiniteDuration isolatedCheckInterval) {
+ isolatedLeaderCheckSchedule = context.getActorSystem().scheduler().schedule(isolatedCheckInterval, isolatedCheckInterval,
+ context.getActor(), new IsolatedLeaderCheck(),
+ context.getActorSystem().dispatcher(), context.getActor());
}
- /**
- * Encapsulates the snapshot bytestring and handles the logic of sending
- * snapshot chunks
- */
- protected class FollowerToSnapshot {
- private ByteString snapshotBytes;
- private int offset = 0;
- // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
- private int replyReceivedForOffset;
- // if replyStatus is false, the previous chunk is attempted
- private boolean replyStatus = false;
- private int chunkIndex;
- private int totalChunks;
-
- public FollowerToSnapshot(ByteString snapshotBytes) {
- this.snapshotBytes = snapshotBytes;
- replyReceivedForOffset = -1;
- chunkIndex = 1;
- int size = snapshotBytes.size();
- totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
- ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Snapshot {} bytes, total chunks to send:{}",
- size, totalChunks);
- }
- }
-
- public ByteString getSnapshotBytes() {
- return snapshotBytes;
- }
-
- public int incrementOffset() {
- if(replyStatus) {
- // if prev chunk failed, we would want to sent the same chunk again
- offset = offset + context.getConfigParams().getSnapshotChunkSize();
- }
- return offset;
- }
-
- public int incrementChunkIndex() {
- if (replyStatus) {
- // if prev chunk failed, we would want to sent the same chunk again
- chunkIndex = chunkIndex + 1;
- }
- return chunkIndex;
- }
-
- public int getChunkIndex() {
- return chunkIndex;
- }
-
- public int getTotalChunks() {
- return totalChunks;
- }
-
- public boolean canSendNextChunk() {
- // we only send a false if a chunk is sent but we have not received a reply yet
- return replyReceivedForOffset == offset;
- }
-
- public boolean isLastChunk(int chunkIndex) {
- return totalChunks == chunkIndex;
- }
-
- public void markSendStatus(boolean success) {
- if (success) {
- // if the chunk sent was successful
- replyReceivedForOffset = offset;
- replyStatus = true;
- } else {
- // if the chunk sent was failure
- replyReceivedForOffset = offset;
- replyStatus = false;
- }
- }
-
- public ByteString getNextChunk() {
- int snapshotLength = getSnapshotBytes().size();
- int start = incrementOffset();
- int size = context.getConfigParams().getSnapshotChunkSize();
- if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
- size = snapshotLength;
- } else {
- if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
- size = snapshotLength - start;
- }
- }
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("length={}, offset={},size={}",
- snapshotLength, start, size);
- }
- return getSnapshotBytes().substring(start, start + size);
-
- }
+ @Override public void close() throws Exception {
+ stopInstallSnapshotSchedule();
+ stopIsolatedLeaderCheckSchedule();
+ super.close();
}
+ @VisibleForTesting
+ void markFollowerActive(String followerId) {
+ followerToLog.get(followerId).markFollowerActive();
+ }
}
+++ /dev/null
-package org.opendaylight.controller.cluster.raft.client.messages;
-
-/**
- * Created by kramesha on 7/17/14.
- */
-public class AddRaftPeer {
-
- private String name;
- private String address;
-
- public AddRaftPeer(String name, String address) {
- this.name = name;
- this.address = address;
- }
-
- public String getName() {
- return name;
- }
-
- public String getAddress() {
- return address;
- }
-}
import java.io.Serializable;
public class FindLeader implements Serializable{
+ private static final long serialVersionUID = 1L;
}
import java.io.Serializable;
public class FindLeaderReply implements Serializable {
+ private static final long serialVersionUID = 1L;
private final String leaderActor;
public FindLeaderReply(String leaderActor) {
+++ /dev/null
-package org.opendaylight.controller.cluster.raft.client.messages;
-
-/**
- * Created by kramesha on 7/17/14.
- */
-public class RemoveRaftPeer {
- private String name;
-
- public RemoveRaftPeer(String name) {
- this.name = name;
- }
-
- public String getName() {
- return name;
- }
-}
package org.opendaylight.controller.cluster.raft.messages;
public class AbstractRaftRPC implements RaftRPC {
+ private static final long serialVersionUID = 1L;
// term
protected long term;
* heartbeat (§5.2).
*/
public class AppendEntries extends AbstractRaftRPC {
-
- public static final Class SERIALIZABLE_CLASS = AppendEntriesMessages.AppendEntries.class;
+ public static final Class<AppendEntriesMessages.AppendEntries> SERIALIZABLE_CLASS = AppendEntriesMessages.AppendEntries.class;
private static final org.slf4j.Logger LOG = org.slf4j.LoggerFactory.getLogger(AppendEntries.class);
+ private static final long serialVersionUID = 1L;
// So that follower can redirect clients
private final String leaderId;
* Reply for the AppendEntriesRpc message
*/
public class AppendEntriesReply extends AbstractRaftRPC {
+ private static final long serialVersionUID = 1L;
// true if follower contained entry matching
// prevLogIndex and prevLogTerm
public class InstallSnapshot extends AbstractRaftRPC {
- public static final Class SERIALIZABLE_CLASS = InstallSnapshotMessages.InstallSnapshot.class;
+ public static final Class<InstallSnapshotMessages.InstallSnapshot> SERIALIZABLE_CLASS = InstallSnapshotMessages.InstallSnapshot.class;
+ private static final long serialVersionUID = 1L;
private final String leaderId;
private final long lastIncludedIndex;
package org.opendaylight.controller.cluster.raft.messages;
public class InstallSnapshotReply extends AbstractRaftRPC {
+ private static final long serialVersionUID = 1L;
// The followerId - this will be used to figure out which follower is
// responding
* Invoked by candidates to gather votes (§5.2).
*/
public class RequestVote extends AbstractRaftRPC {
+ private static final long serialVersionUID = 1L;
// candidate requesting vote
private String candidateId;
package org.opendaylight.controller.cluster.raft.messages;
public class RequestVoteReply extends AbstractRaftRPC {
+ private static final long serialVersionUID = 1L;
// true means candidate received vot
private final boolean voteGranted;
*/
package org.opendaylight.controller.cluster.raft;
-import junit.framework.Assert;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
-import static org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockReplicatedLogEntry;
/**
*
*/
// now create a snapshot of 3 entries, with 1 unapplied entry left in the log
// It removes the entries which have made it to snapshot
// and updates the snapshot index and term
- Map state = takeSnapshot(3);
+ Map<Long, String> state = takeSnapshot(3);
// check the values after the snapshot.
// each index value passed in the test is the logical index (log entry index)
}
// create a snapshot for test
- public Map takeSnapshot(int numEntries) {
- Map map = new HashMap(numEntries);
+ public Map<Long, String> takeSnapshot(final int numEntries) {
+ Map<Long, String> map = new HashMap<>(numEntries);
List<ReplicatedLogEntry> entries = replicatedLogImpl.getEntriesTill(numEntries);
for (ReplicatedLogEntry entry : entries) {
map.put(entry.getIndex(), entry.getData().toString());
}
class MockAbstractReplicatedLogImpl extends AbstractReplicatedLogImpl {
@Override
- public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry) {
+ public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) {
}
@Override
- public void removeFromAndPersist(long index) {
+ public void removeFromAndPersist(final long index) {
}
- public void setSnapshotIndex(long snapshotIndex) {
+ @Override
+ public void setSnapshotIndex(final long snapshotIndex) {
this.snapshotIndex = snapshotIndex;
}
- public void setSnapshotTerm(long snapshotTerm) {
+ @Override
+ public void setSnapshotTerm(final long snapshotTerm) {
this.snapshotTerm = snapshotTerm;
}
- public List<ReplicatedLogEntry> getEntriesTill(int index) {
+ public List<ReplicatedLogEntry> getEntriesTill(final int index) {
return journal.subList(0, index);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Test;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class FollowerLogInformationImplTest {
+
+ @Test
+ public void testIsFollowerActive() {
+
+ FiniteDuration timeoutDuration =
+ new FiniteDuration(500, TimeUnit.MILLISECONDS);
+
+ FollowerLogInformation followerLogInformation =
+ new FollowerLogInformationImpl(
+ "follower1", new AtomicLong(10), new AtomicLong(9), timeoutDuration);
+
+
+
+ assertFalse("Follower should be termed inactive before stopwatch starts",
+ followerLogInformation.isFollowerActive());
+
+ followerLogInformation.markFollowerActive();
+ if (sleepWithElaspsedTimeReturned(200) > 200) {
+ return;
+ }
+ assertTrue("Follower should be active", followerLogInformation.isFollowerActive());
+
+ if (sleepWithElaspsedTimeReturned(400) > 400) {
+ return;
+ }
+ assertFalse("Follower should be inactive after time lapsed",
+ followerLogInformation.isFollowerActive());
+
+ followerLogInformation.markFollowerActive();
+ assertTrue("Follower should be active from inactive",
+ followerLogInformation.isFollowerActive());
+ }
+
+ // we cannot rely comfortably that the sleep will indeed sleep for the desired time
+ // hence getting the actual elapsed time and do a match.
+ // if the sleep has spilled over, then return the test gracefully
+ private long sleepWithElaspsedTimeReturned(long millis) {
+ Stopwatch stopwatch = new Stopwatch();
+ stopwatch.start();
+ Uninterruptibles.sleepUninterruptibly(millis, TimeUnit.MILLISECONDS);
+ stopwatch.stop();
+ return stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ }
+}
private long lastApplied = 0;
private final ElectionTerm electionTerm;
private ReplicatedLog replicatedLog;
- private Map<String, String> peerAddresses = new HashMap();
+ private Map<String, String> peerAddresses = new HashMap<>();
private ConfigParams configParams;
public MockRaftActorContext(){
}
public static class MockPayload extends Payload implements Serializable {
+ private static final long serialVersionUID = 1L;
private String value = "";
public MockPayload(){
}
public static class MockReplicatedLogEntry implements ReplicatedLogEntry, Serializable {
+ private static final long serialVersionUID = 1L;
private final long term;
private final long index;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.Terminated;
-import akka.event.Logging;
import akka.japi.Creator;
+import akka.japi.Procedure;
+import akka.pattern.Patterns;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SaveSnapshotFailure;
+import akka.persistence.SaveSnapshotSuccess;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotOffer;
+import akka.persistence.SnapshotSelectionCriteria;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
+import akka.util.Timeout;
import com.google.common.base.Optional;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.behaviors.Follower;
+import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
+
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
public class RaftActorTest extends AbstractActorTest {
public static class MockRaftActor extends RaftActor {
+ private final DataPersistenceProvider dataPersistenceProvider;
+ private final RaftActor delegate;
+
public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
+ private static final long serialVersionUID = 1L;
private final Map<String, String> peerAddresses;
private final String id;
private final Optional<ConfigParams> config;
+ private final DataPersistenceProvider dataPersistenceProvider;
private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
- Optional<ConfigParams> config) {
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider) {
this.peerAddresses = peerAddresses;
this.id = id;
this.config = config;
+ this.dataPersistenceProvider = dataPersistenceProvider;
}
@Override
public MockRaftActor create() throws Exception {
- return new MockRaftActor(id, peerAddresses, config);
+ return new MockRaftActor(id, peerAddresses, config, dataPersistenceProvider);
}
}
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+
private final List<Object> state;
- public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config) {
+ public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider) {
super(id, peerAddresses, config);
state = new ArrayList<>();
+ this.delegate = mock(RaftActor.class);
+ if(dataPersistenceProvider == null){
+ this.dataPersistenceProvider = new PersistentDataProvider();
+ } else {
+ this.dataPersistenceProvider = dataPersistenceProvider;
+ }
}
public void waitForRecoveryComplete() {
public static Props props(final String id, final Map<String, String> peerAddresses,
Optional<ConfigParams> config){
- return Props.create(new MockRaftActorCreator(peerAddresses, id, config));
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null));
+ }
+
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider));
}
+
@Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+ delegate.applyState(clientActor, identifier, data);
+ LOG.info("applyState called");
}
+
+
+
@Override
protected void startLogRecoveryBatch(int maxBatchSize) {
}
@Override
protected void onRecoveryComplete() {
+ delegate.onRecoveryComplete();
recoveryComplete.countDown();
}
@Override
protected void applyRecoverySnapshot(ByteString snapshot) {
+ delegate.applyRecoverySnapshot(snapshot);
try {
Object data = toObject(snapshot);
System.out.println("!!!!!applyRecoverySnapshot: "+data);
if (data instanceof List) {
- state.addAll((List) data);
+ state.addAll((List<?>) data);
}
} catch (Exception e) {
e.printStackTrace();
}
@Override protected void createSnapshot() {
- throw new UnsupportedOperationException("createSnapshot");
+ delegate.createSnapshot();
}
@Override protected void applySnapshot(ByteString snapshot) {
+ delegate.applySnapshot(snapshot);
}
@Override protected void onStateChanged() {
+ delegate.onStateChanged();
+ }
+
+ @Override
+ protected DataPersistenceProvider persistence() {
+ return this.dataPersistenceProvider;
}
@Override public String persistenceId() {
return obj;
}
+ public ReplicatedLog getReplicatedLog(){
+ return this.getRaftActorContext().getReplicatedLog();
+ }
}
super(actorSystem);
raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
- Collections.EMPTY_MAP, Optional.<ConfigParams>absent()), actorName);
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>absent()), actorName);
}
- public boolean waitForStartup(){
+ public ActorRef getRaftActor() {
+ return raftActor;
+ }
+
+ public boolean waitForLogMessage(final Class<?> logEventClass, String message){
// Wait for a specific log message to show up
return
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
+ new JavaTestKit.EventFilter<Boolean>(logEventClass
) {
@Override
protected Boolean run() {
return true;
}
}.from(raftActor.path().toString())
- .message("Switching from behavior Candidate to Leader")
+ .message(message)
.occurrences(1).exec();
}
- public void findLeader(final String expectedLeader){
- raftActor.tell(new FindLeader(), getRef());
-
- FindLeaderReply reply = expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
- assertEquals("getLeaderActor", expectedLeader, reply.getLeaderActor());
+ protected void waitUntilLeader(){
+ waitUntilLeader(raftActor);
}
- public ActorRef getRaftActor() {
- return raftActor;
+ protected void waitUntilLeader(ActorRef actorRef) {
+ FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
+ for(int i = 0; i < 20 * 5; i++) {
+ Future<Object> future = Patterns.ask(actorRef, new FindLeader(), new Timeout(duration));
+ try {
+ FindLeaderReply resp = (FindLeaderReply) Await.result(future, duration);
+ if(resp.getLeaderActor() != null) {
+ return;
+ }
+ } catch(TimeoutException e) {
+ } catch(Exception e) {
+ System.err.println("FindLeader threw ex");
+ e.printStackTrace();
+ }
+
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Leader not found for actorRef " + actorRef.path());
}
+
}
@Test
public void testConstruction() {
- boolean started = new RaftActorTestKit(getSystem(), "testConstruction").waitForStartup();
- assertEquals(true, started);
+ new RaftActorTestKit(getSystem(), "testConstruction").waitUntilLeader();
}
@Test
public void testFindLeaderWhenLeaderIsSelf(){
RaftActorTestKit kit = new RaftActorTestKit(getSystem(), "testFindLeader");
- kit.waitForStartup();
- kit.findLeader(kit.getRaftActor().path().toString());
+ kit.waitUntilLeader();
}
@Test
config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
ActorRef followerActor = getSystem().actorOf(MockRaftActor.props(persistenceId,
- Collections.EMPTY_MAP, Optional.<ConfigParams>of(config)), persistenceId);
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
watch(followerActor);
//reinstate the actor
TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
- MockRaftActor.props(persistenceId, Collections.EMPTY_MAP,
+ MockRaftActor.props(persistenceId, Collections.<String,String>emptyMap(),
Optional.<ConfigParams>of(config)));
ref.underlyingActor().waitForRecoveryComplete();
}};
}
+ /**
+ * This test verifies that when recovery is applicable (typically when persistence is true) the RaftActor does
+ * process recovery messages
+ *
+ * @throws Exception
+ */
+
+ @Test
+ public void testHandleRecoveryWhenDataPersistenceRecoveryApplicable() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testHandleRecoveryWhenDataPersistenceRecoveryApplicable";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config)), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ // Wait for akka's recovery to complete so it doesn't interfere.
+ mockRaftActor.waitForRecoveryComplete();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ Lists.<ReplicatedLogEntry>newArrayList(), 3, 1 ,3, 1);
+
+ mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
+
+ verify(mockRaftActor.delegate).applyRecoverySnapshot(eq(snapshotBytes));
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
+
+ ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
+
+ assertEquals("add replicated log entry", 1, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
+
+ assertEquals("add replicated log entry", 2, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ApplyLogEntries(1));
+
+ assertEquals("commit index 1", 1, mockRaftActor.getRaftActorContext().getCommitIndex());
+
+ // The snapshot had 4 items + we added 2 more items during the test
+ // We start removing from 5 and we should get 1 item in the replicated log
+ mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(5));
+
+ assertEquals("remove log entries", 1, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+
+ assertEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
+ assertEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
+
+ mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }};
+ }
+
+ /**
+ * This test verifies that when recovery is not applicable (typically when persistence is false) the RaftActor does
+ * not process recovery messages
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), new DataPersistenceProviderMonitor()), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ // Wait for akka's recovery to complete so it doesn't interfere.
+ mockRaftActor.waitForRecoveryComplete();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ Lists.<ReplicatedLogEntry>newArrayList(), 3, 1 ,3, 1);
+
+ mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
+
+ verify(mockRaftActor.delegate, times(0)).applyRecoverySnapshot(any(ByteString.class));
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
+
+ ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
+
+ assertEquals("add replicated log entry", 0, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
+
+ assertEquals("add replicated log entry", 0, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ApplyLogEntries(1));
+
+ assertEquals("commit index -1", -1, mockRaftActor.getRaftActorContext().getCommitIndex());
+
+ mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(2));
+
+ assertEquals("remove log entries", 0, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+
+ assertNotEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
+ assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
+
+ mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+ }};
+ }
+
+
+ @Test
+ public void testUpdatingElectionTermCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testUpdatingElectionTermCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch persistLatch = new CountDownLatch(1);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.getRaftActorContext().getTermInformation().updateAndPersist(10, "foobar");
+
+ assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testAddingReplicatedLogEntryCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testAddingReplicatedLogEntryCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ MockRaftActorContext.MockReplicatedLogEntry logEntry = new MockRaftActorContext.MockReplicatedLogEntry(10, 10, mock(Payload.class));
+
+ mockRaftActor.getRaftActorContext().getReplicatedLog().appendAndPersist(logEntry);
+
+ verify(dataPersistenceProvider).persist(eq(logEntry), any(Procedure.class));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testRemovingReplicatedLogEntryCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testRemovingReplicatedLogEntryCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.getReplicatedLog().appendAndPersist(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
+
+ mockRaftActor.getRaftActorContext().getReplicatedLog().removeFromAndPersist(0);
+
+ verify(dataPersistenceProvider, times(2)).persist(anyObject(), any(Procedure.class));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testApplyLogEntriesCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testApplyLogEntriesCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.onReceiveCommand(new ApplyLogEntries(10));
+
+ verify(dataPersistenceProvider, times(1)).persist(anyObject(), any(Procedure.class));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testCaptureSnapshotReplyCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testCaptureSnapshotReplyCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(),
+ MockRaftActor.props(persistenceId,Collections.<String,String>emptyMap(),
+ Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1,1,-1,1));
+
+ RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
+
+ mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes));
+
+ verify(dataPersistenceProvider).saveSnapshot(anyObject());
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testSaveSnapshotSuccessCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testSaveSnapshotSuccessCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,0, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,1, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,2, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,3, mock(Payload.class)));
+ mockRaftActor.getReplicatedLog().append(new MockRaftActorContext.MockReplicatedLogEntry(1,4, mock(Payload.class)));
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
+ mockRaftActor.setCurrentBehavior(new Follower(raftActorContext));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1, 1, 2, 1));
+
+ verify(mockRaftActor.delegate).createSnapshot();
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes));
+
+ mockRaftActor.onReceiveCommand(new SaveSnapshotSuccess(new SnapshotMetadata("foo", 100, 100)));
+
+ verify(dataPersistenceProvider).deleteSnapshots(any(SnapshotSelectionCriteria.class));
+
+ verify(dataPersistenceProvider).deleteMessages(100);
+
+ assertEquals(2, mockRaftActor.getReplicatedLog().size());
+
+ assertNotNull(mockRaftActor.getReplicatedLog().get(3));
+ assertNotNull(mockRaftActor.getReplicatedLog().get(4));
+
+ // Index 2 will not be in the log because it was removed due to snapshotting
+ assertNull(mockRaftActor.getReplicatedLog().get(2));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testApplyState() throws Exception {
+
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testApplyState";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProvider), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ReplicatedLogEntry entry = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
+ new MockRaftActorContext.MockPayload("F"));
+
+ mockRaftActor.onReceiveCommand(new ApplyState(mockActorRef, "apply-state", entry));
+
+ verify(mockRaftActor.delegate).applyState(eq(mockActorRef), eq("apply-state"), anyObject());
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testApplySnapshot() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testApplySnapshot";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ReplicatedLog oldReplicatedLog = mockRaftActor.getReplicatedLog();
+
+ oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,0,mock(Payload.class)));
+ oldReplicatedLog.append(new MockRaftActorContext.MockReplicatedLogEntry(1,1,mock(Payload.class)));
+ oldReplicatedLog.append(
+ new MockRaftActorContext.MockReplicatedLogEntry(1, 2,
+ mock(Payload.class)));
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = mock(Snapshot.class);
+
+ doReturn(snapshotBytes.toByteArray()).when(snapshot).getState();
+
+ doReturn(3L).when(snapshot).getLastAppliedIndex();
+
+ mockRaftActor.onReceiveCommand(new ApplySnapshot(snapshot));
+
+ verify(mockRaftActor.delegate).applySnapshot(eq(snapshotBytes));
+
+ assertTrue("The replicatedLog should have changed",
+ oldReplicatedLog != mockRaftActor.getReplicatedLog());
+
+ assertEquals("lastApplied should be same as in the snapshot",
+ (Long) 3L, mockRaftActor.getLastApplied());
+
+ assertEquals(0, mockRaftActor.getReplicatedLog().size());
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testSaveSnapshotFailure() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testSaveSnapshotFailure";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.<String,String>emptyMap(), Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ RaftActorContext raftActorContext = mockRaftActor.getRaftActorContext();
+
+ mockRaftActor.setCurrentBehavior(new Leader(raftActorContext));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1,1,-1,1));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes));
+
+ mockRaftActor.onReceiveCommand(new SaveSnapshotFailure(new SnapshotMetadata("foobar", 10L, 1234L),
+ new Exception()));
+
+ assertEquals("Snapshot index should not have advanced because save snapshot failed", -1,
+ mockRaftActor.getReplicatedLog().getSnapshotIndex());
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
private ByteString fromObject(Object snapshot) throws Exception {
ByteArrayOutputStream b = null;
ObjectOutputStream o = null;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.RequestVote;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-
import static org.junit.Assert.assertEquals;
public class CandidateTest extends AbstractRaftActorBehaviorTest {
Candidate candidate = new Candidate(createActorContext(getTestActor()));
- candidate.handleMessage(getTestActor(), new AppendEntries(0, "test", 0,0,Collections.EMPTY_LIST, 0));
+ candidate.handleMessage(getTestActor(), new AppendEntries(0, "test", 0,0,Collections.<ReplicatedLogEntry>emptyList(), 0));
final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"), "AppendEntriesResponse") {
// do not put code outside this method, will run afterwards
import akka.actor.Props;
import akka.testkit.JavaTestKit;
import com.google.protobuf.ByteString;
-import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
RaftActorBehavior raftBehavior =
follower.handleMessage(followerActor, new ElectionTimeout());
- Assert.assertTrue(raftBehavior instanceof Candidate);
+ assertTrue(raftBehavior instanceof Candidate);
}
@Test
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.JavaTestKit;
+import java.util.HashMap;
+import java.util.Map;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class IsolatedLeaderTest extends AbstractRaftActorBehaviorTest {
+
+ private ActorRef leaderActor =
+ getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ private ActorRef senderActor =
+ getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ @Override
+ protected RaftActorBehavior createBehavior(
+ RaftActorContext actorContext) {
+ return new Leader(actorContext);
+ }
+
+ @Override
+ protected RaftActorContext createActorContext() {
+ return createActorContext(leaderActor);
+ }
+
+
+ @Test
+ public void testHandleMessageWithThreeMembers() {
+ new JavaTestKit(getSystem()) {{
+ String followerAddress1 = "akka://test/user/$a";
+ String followerAddress2 = "akka://test/user/$b";
+
+ MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerAddress1);
+ peerAddresses.put("follower-2", followerAddress2);
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ IsolatedLeader isolatedLeader = new IsolatedLeader(leaderActorContext);
+ assertTrue(isolatedLeader.state() == RaftState.IsolatedLeader);
+
+ // in a 3 node cluster, even if 1 follower is returns a reply, the isolatedLeader is not isolated
+ RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
+ new AppendEntriesReply("follower-1", isolatedLeader.lastTerm() - 1, true,
+ isolatedLeader.lastIndex() - 1, isolatedLeader.lastTerm() - 1));
+
+ assertEquals(RaftState.Leader, behavior.state());
+
+ behavior = isolatedLeader.handleMessage(senderActor,
+ new AppendEntriesReply("follower-2", isolatedLeader.lastTerm() - 1, true,
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+
+ assertEquals(RaftState.Leader, behavior.state());
+ }};
+ }
+
+ @Test
+ public void testHandleMessageWithFiveMembers() {
+ new JavaTestKit(getSystem()) {{
+
+ String followerAddress1 = "akka://test/user/$a";
+ String followerAddress2 = "akka://test/user/$b";
+ String followerAddress3 = "akka://test/user/$c";
+ String followerAddress4 = "akka://test/user/$d";
+
+ MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerAddress1);
+ peerAddresses.put("follower-2", followerAddress2);
+ peerAddresses.put("follower-3", followerAddress3);
+ peerAddresses.put("follower-4", followerAddress4);
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ IsolatedLeader isolatedLeader = new IsolatedLeader(leaderActorContext);
+ assertEquals(RaftState.IsolatedLeader, isolatedLeader.state());
+
+ // in a 5 member cluster, atleast 2 followers need to be active and return a reply
+ RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
+ new AppendEntriesReply("follower-1", isolatedLeader.lastTerm() - 1, true,
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+
+ assertEquals(RaftState.IsolatedLeader, behavior.state());
+
+ behavior = isolatedLeader.handleMessage(senderActor,
+ new AppendEntriesReply("follower-2", isolatedLeader.lastTerm() - 1, true,
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+
+ assertEquals(RaftState.Leader, behavior.state());
+
+ behavior = isolatedLeader.handleMessage(senderActor,
+ new AppendEntriesReply("follower-3", isolatedLeader.lastTerm() - 1, true,
+ isolatedLeader.lastIndex() -1, isolatedLeader.lastTerm() -1 ));
+
+ assertEquals(RaftState.Leader, behavior.state());
+ }};
+ }
+
+ @Test
+ public void testHandleMessageFromAnotherLeader() {
+ new JavaTestKit(getSystem()) {{
+ String followerAddress1 = "akka://test/user/$a";
+ String followerAddress2 = "akka://test/user/$b";
+
+ MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerAddress1);
+ peerAddresses.put("follower-2", followerAddress2);
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ IsolatedLeader isolatedLeader = new IsolatedLeader(leaderActorContext);
+ assertTrue(isolatedLeader.state() == RaftState.IsolatedLeader);
+
+ // if an append-entries reply is received by the isolated-leader, and that reply
+ // has a term > than its own term, then IsolatedLeader switches to Follower
+ // bowing itself to another leader in the cluster
+ RaftActorBehavior behavior = isolatedLeader.handleMessage(senderActor,
+ new AppendEntriesReply("follower-1", isolatedLeader.lastTerm() + 1, true,
+ isolatedLeader.lastIndex() + 1, isolatedLeader.lastTerm() + 1));
+
+ assertEquals(RaftState.Follower, behavior.state());
+ }};
+
+ }
+}
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
import akka.actor.Props;
+import akka.actor.Terminated;
import akka.testkit.JavaTestKit;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
-import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
+import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.IsolatedLeaderCheck;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectOutputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
+import scala.concurrent.duration.FiniteDuration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
ActorRef followerActor = getTestActor();
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
+ MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext();
- Map<String, String> peerAddresses = new HashMap();
+ Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put(followerActor.path().toString(),
followerActor.path().toString());
MockRaftActorContext actorContext =
(MockRaftActorContext) createActorContext();
- Map<String, String> peerAddresses = new HashMap();
+ Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put(followerActor.path().toString(),
followerActor.path().toString());
}.get(); // this extracts the received message
assertEquals("match", out);
-
}
-
-
};
}};
}
}
@Test
- public void testSendInstallSnapshot() {
- new LeaderTestKit(getSystem()) {{
+ public void testSendAppendEntriesOnAnInProgressInstallSnapshot() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ ActorRef followerActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
- new Within(duration("1 seconds")) {
- protected void run() {
- ActorRef followerActor = getTestActor();
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
- Map<String, String> peerAddresses = new HashMap();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext(leaderActor);
+ actorContext.setPeerAddresses(peerAddresses);
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext(getRef());
- actorContext.setPeerAddresses(peerAddresses);
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.setCommitIndex(followersLastIndex);
+ //set follower timeout to 2 mins, helps during debugging
+ actorContext.setConfigParams(new MockConfigParamsImpl(120000L, 10));
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ MockLeader leader = new MockLeader(actorContext);
+
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ //update follower timestamp
+ leader.markFollowerActive(followerActor.path().toString());
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshot(
- toByteString(leadersSnapshot));
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+ leader.createFollowerToSnapshot(followerActor.path().toString(), bs);
- MockLeader leader = new MockLeader(actorContext);
- // set the follower info in leader
- leader.addToFollowerToLog(followerActor.path().toString(), followersLastIndex, -1);
+ //send first chunk and no InstallSnapshotReply received yet
+ leader.getFollowerToSnapshot().getNextChunk();
+ leader.getFollowerToSnapshot().incrementChunkIndex();
- // new entry
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
+ leader.handleMessage(leaderActor, new SendHeartBeat());
- // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
- RaftActorBehavior raftBehavior = leader.handleMessage(
- senderActor, new Replicate(null, "state-id", entry));
+ AppendEntriesMessages.AppendEntries aeproto = (AppendEntriesMessages.AppendEntries)MessageCollectorActor.getFirstMatching(
+ followerActor, AppendEntries.SERIALIZABLE_CLASS);
- assertTrue(raftBehavior instanceof Leader);
+ assertNotNull("AppendEntries should be sent even if InstallSnapshotReply is not " +
+ "received", aeproto);
- // we might receive some heartbeat messages, so wait till we SendInstallSnapshot
- Boolean[] matches = new ReceiveWhile<Boolean>(Boolean.class, duration("2 seconds")) {
- @Override
- protected Boolean match(Object o) throws Exception {
- if (o instanceof SendInstallSnapshot) {
- return true;
- }
- return false;
- }
- }.get();
+ AppendEntries ae = (AppendEntries) SerializationUtils.fromSerializable(aeproto);
- boolean sendInstallSnapshotReceived = false;
- for (Boolean b: matches) {
- sendInstallSnapshotReceived = b | sendInstallSnapshotReceived;
- }
+ assertTrue("AppendEntries should be sent with empty entries", ae.getEntries().isEmpty());
+
+ //InstallSnapshotReply received
+ leader.getFollowerToSnapshot().markSendStatus(true);
+
+ leader.handleMessage(senderActor, new SendHeartBeat());
+
+ InstallSnapshotMessages.InstallSnapshot isproto = (InstallSnapshotMessages.InstallSnapshot)
+ MessageCollectorActor.getFirstMatching(followerActor,
+ InstallSnapshot.SERIALIZABLE_CLASS);
+
+ assertNotNull("Installsnapshot should get called for sending the next chunk of snapshot",
+ isproto);
+
+ InstallSnapshot is = (InstallSnapshot) SerializationUtils.fromSerializable(isproto);
+
+ assertEquals(snapshotIndex, is.getLastIncludedIndex());
+
+ }};
+ }
+
+ @Test
+ public void testSendAppendEntriesSnapshotScenario() {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef followerActor = getTestActor();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext(getRef());
+ actorContext.setPeerAddresses(peerAddresses);
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.setCommitIndex(followersLastIndex);
+
+ Leader leader = new Leader(actorContext);
+
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
- assertTrue(sendInstallSnapshotReceived);
+ //update follower timestamp
+ leader.markFollowerActive(followerActor.path().toString());
+ // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
+ RaftActorBehavior raftBehavior = leader.handleMessage(
+ senderActor, new Replicate(null, "state-id", entry));
+
+ assertTrue(raftBehavior instanceof Leader);
+
+ // we might receive some heartbeat messages, so wait till we InitiateInstallSnapshot
+ Boolean[] matches = new ReceiveWhile<Boolean>(Boolean.class, duration("2 seconds")) {
+ @Override
+ protected Boolean match(Object o) throws Exception {
+ if (o instanceof InitiateInstallSnapshot) {
+ return true;
+ }
+ return false;
}
- };
+ }.get();
+
+ boolean initiateInitiateInstallSnapshot = false;
+ for (Boolean b: matches) {
+ initiateInitiateInstallSnapshot = b | initiateInitiateInstallSnapshot;
+ }
+
+ assertTrue(initiateInitiateInstallSnapshot);
}};
}
@Test
- public void testInstallSnapshot() {
- new LeaderTestKit(getSystem()) {{
+ public void testInitiateInstallSnapshot() throws Exception {
+ new JavaTestKit(getSystem()) {{
- new Within(duration("1 seconds")) {
- protected void run() {
- ActorRef followerActor = getTestActor();
+ ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
- Map<String, String> peerAddresses = new HashMap();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ ActorRef followerActor = getTestActor();
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
- actorContext.setPeerAddresses(peerAddresses);
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext(leaderActor);
+ actorContext.setPeerAddresses(peerAddresses);
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshot(toByteString(leadersSnapshot));
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.setLastApplied(3);
+ actorContext.setCommitIndex(followersLastIndex);
- MockLeader leader = new MockLeader(actorContext);
- // set the follower info in leader
- leader.addToFollowerToLog(followerActor.path().toString(), followersLastIndex, -1);
+ Leader leader = new Leader(actorContext);
+ // set the snapshot as absent and check if capture-snapshot is invoked.
+ leader.setSnapshot(Optional.<ByteString>absent());
- // new entry
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
- new MockRaftActorContext.MockPayload("D"));
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
- RaftActorBehavior raftBehavior = leader.handleMessage(senderActor, new SendInstallSnapshot());
+ actorContext.getReplicatedLog().append(entry);
- assertTrue(raftBehavior instanceof Leader);
+ // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
+ RaftActorBehavior raftBehavior = leader.handleMessage(
+ leaderActor, new InitiateInstallSnapshot());
- // check if installsnapshot gets called with the correct values.
- final String out =
- new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- protected String match(Object in) {
- if (in instanceof InstallSnapshotMessages.InstallSnapshot) {
- InstallSnapshot is = (InstallSnapshot)
- SerializationUtils.fromSerializable(in);
- if (is.getData() == null) {
- return "InstallSnapshot data is null";
- }
- if (is.getLastIncludedIndex() != snapshotIndex) {
- return is.getLastIncludedIndex() + "!=" + snapshotIndex;
- }
- if (is.getLastIncludedTerm() != snapshotTerm) {
- return is.getLastIncludedTerm() + "!=" + snapshotTerm;
- }
- if (is.getTerm() == currentTerm) {
- return is.getTerm() + "!=" + currentTerm;
- }
+ CaptureSnapshot cs = (CaptureSnapshot) MessageCollectorActor.
+ getFirstMatching(leaderActor, CaptureSnapshot.class);
- return "match";
+ assertNotNull(cs);
- } else {
- return "message mismatch:" + in.getClass();
- }
+ assertTrue(cs.isInstallSnapshotInitiated());
+ assertEquals(3, cs.getLastAppliedIndex());
+ assertEquals(1, cs.getLastAppliedTerm());
+ assertEquals(4, cs.getLastIndex());
+ assertEquals(2, cs.getLastTerm());
+ }};
+ }
+
+ @Test
+ public void testInstallSnapshot() {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef followerActor = getTestActor();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext();
+ actorContext.setPeerAddresses(peerAddresses);
+
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+ actorContext.setCommitIndex(followersLastIndex);
+
+ Leader leader = new Leader(actorContext);
+
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
+
+ RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
+ new SendInstallSnapshot(toByteString(leadersSnapshot)));
+
+ assertTrue(raftBehavior instanceof Leader);
+
+ // check if installsnapshot gets called with the correct values.
+ final String out =
+ new ExpectMsg<String>(duration("1 seconds"), "match hint") {
+ // do not put code outside this method, will run afterwards
+ protected String match(Object in) {
+ if (in instanceof InstallSnapshotMessages.InstallSnapshot) {
+ InstallSnapshot is = (InstallSnapshot)
+ SerializationUtils.fromSerializable(in);
+ if (is.getData() == null) {
+ return "InstallSnapshot data is null";
+ }
+ if (is.getLastIncludedIndex() != snapshotIndex) {
+ return is.getLastIncludedIndex() + "!=" + snapshotIndex;
+ }
+ if (is.getLastIncludedTerm() != snapshotTerm) {
+ return is.getLastIncludedTerm() + "!=" + snapshotTerm;
+ }
+ if (is.getTerm() == currentTerm) {
+ return is.getTerm() + "!=" + currentTerm;
}
- }.get(); // this extracts the received message
- assertEquals("match", out);
- }
- };
+ return "match";
+
+ } else {
+ return "message mismatch:" + in.getClass();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ assertEquals("match", out);
}};
}
@Test
public void testHandleInstallSnapshotReplyLastChunk() {
- new LeaderTestKit(getSystem()) {{
- new Within(duration("1 seconds")) {
- protected void run() {
- ActorRef followerActor = getTestActor();
+ new JavaTestKit(getSystem()) {{
- Map<String, String> peerAddresses = new HashMap();
- peerAddresses.put(followerActor.path().toString(),
- followerActor.path().toString());
+ ActorRef followerActor = getTestActor();
- MockRaftActorContext actorContext =
- (MockRaftActorContext) createActorContext();
- actorContext.setPeerAddresses(peerAddresses);
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
- final int followersLastIndex = 2;
- final int snapshotIndex = 3;
- final int newEntryIndex = 4;
- final int snapshotTerm = 1;
- final int currentTerm = 2;
-
- MockLeader leader = new MockLeader(actorContext);
- // set the follower info in leader
- leader.addToFollowerToLog(followerActor.path().toString(), followersLastIndex, -1);
-
- Map<String, String> leadersSnapshot = new HashMap<>();
- leadersSnapshot.put("1", "A");
- leadersSnapshot.put("2", "B");
- leadersSnapshot.put("3", "C");
-
- // set the snapshot variables in replicatedlog
- actorContext.getReplicatedLog().setSnapshot(
- toByteString(leadersSnapshot));
- actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
- actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
- actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
-
- ByteString bs = toByteString(leadersSnapshot);
- leader.createFollowerToSnapshot(followerActor.path().toString(), bs);
- while(!leader.getFollowerToSnapshot().isLastChunk(leader.getFollowerToSnapshot().getChunkIndex())) {
- leader.getFollowerToSnapshot().getNextChunk();
- leader.getFollowerToSnapshot().incrementChunkIndex();
- }
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
- //clears leaders log
- actorContext.getReplicatedLog().removeFrom(0);
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext();
+ actorContext.setPeerAddresses(peerAddresses);
+ actorContext.setCommitIndex(followersLastIndex);
- RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
- new InstallSnapshotReply(currentTerm, followerActor.path().toString(),
- leader.getFollowerToSnapshot().getChunkIndex(), true));
+ MockLeader leader = new MockLeader(actorContext);
- assertTrue(raftBehavior instanceof Leader);
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
- assertEquals(leader.mapFollowerToSnapshot.size(), 0);
- assertEquals(leader.followerToLog.size(), 1);
- assertNotNull(leader.followerToLog.get(followerActor.path().toString()));
- FollowerLogInformation fli = leader.followerToLog.get(followerActor.path().toString());
- assertEquals(snapshotIndex, fli.getMatchIndex().get());
- assertEquals(snapshotIndex, fli.getMatchIndex().get());
- assertEquals(snapshotIndex + 1, fli.getNextIndex().get());
- }
- };
+ // set the snapshot variables in replicatedlog
+
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.setSnapshot(Optional.of(bs));
+ leader.createFollowerToSnapshot(followerActor.path().toString(), bs);
+ while(!leader.getFollowerToSnapshot().isLastChunk(leader.getFollowerToSnapshot().getChunkIndex())) {
+ leader.getFollowerToSnapshot().getNextChunk();
+ leader.getFollowerToSnapshot().incrementChunkIndex();
+ }
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
+ new InstallSnapshotReply(currentTerm, followerActor.path().toString(),
+ leader.getFollowerToSnapshot().getChunkIndex(), true));
+
+ assertTrue(raftBehavior instanceof Leader);
+
+ assertEquals(leader.mapFollowerToSnapshot.size(), 0);
+ assertEquals(leader.followerToLog.size(), 1);
+ assertNotNull(leader.followerToLog.get(followerActor.path().toString()));
+ FollowerLogInformation fli = leader.followerToLog.get(followerActor.path().toString());
+ assertEquals(snapshotIndex, fli.getMatchIndex().get());
+ assertEquals(snapshotIndex, fli.getMatchIndex().get());
+ assertEquals(snapshotIndex + 1, fli.getNextIndex().get());
}};
}
ForwardMessageToBehaviorActor.setBehavior(follower);
- Map<String, String> peerAddresses = new HashMap();
+ Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put(followerActor.path().toString(),
followerActor.path().toString());
followerActorContext.setCommitIndex(1);
Leader leader = new Leader(leaderActorContext);
+ leader.markFollowerActive(followerActor.path().toString());
leader.handleMessage(leaderActor, new SendHeartBeat());
ForwardMessageToBehaviorActor.setBehavior(follower);
- Map<String, String> peerAddresses = new HashMap();
+ Map<String, String> peerAddresses = new HashMap<>();
peerAddresses.put(followerActor.path().toString(),
followerActor.path().toString());
followerActorContext.setCommitIndex(2);
Leader leader = new Leader(leaderActorContext);
+ leader.markFollowerActive(followerActor.path().toString());
leader.handleMessage(leaderActor, new SendHeartBeat());
}};
}
- private static class LeaderTestKit extends JavaTestKit {
+ @Test
+ public void testHandleAppendEntriesReplyFailure(){
+ new JavaTestKit(getSystem()) {
+ {
+
+ ActorRef leaderActor =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
- private LeaderTestKit(ActorSystem actorSystem) {
- super(actorSystem);
- }
+ ActorRef followerActor =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
- protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(logLevel
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(subject.path().toString())
- .message(logMessage)
- .occurrences(1).exec();
- Assert.assertEquals(true, result);
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
- }
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1",
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ AppendEntriesReply reply = new AppendEntriesReply("follower-1", 1, false, 10, 1);
+
+ RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
+
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
+
+ }};
+ }
+
+ @Test
+ public void testHandleAppendEntriesReplySuccess() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+
+ ActorRef leaderActor =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ ActorRef followerActor =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1",
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+ leaderActorContext.setCommitIndex(1);
+ leaderActorContext.setLastApplied(1);
+ leaderActorContext.getTermInformation().update(1, "leader");
+
+ Leader leader = new Leader(leaderActorContext);
+
+ AppendEntriesReply reply = new AppendEntriesReply("follower-1", 1, true, 2, 1);
+
+ RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(followerActor, reply);
+
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
+
+ assertEquals(2, leaderActorContext.getCommitIndex());
+
+ ApplyLogEntries applyLogEntries =
+ (ApplyLogEntries) MessageCollectorActor.getFirstMatching(leaderActor,
+ ApplyLogEntries.class);
+
+ assertNotNull(applyLogEntries);
+
+ assertEquals(2, leaderActorContext.getLastApplied());
+
+ assertEquals(2, applyLogEntries.getToIndex());
+
+ List<Object> applyStateList = MessageCollectorActor.getAllMatching(leaderActor,
+ ApplyState.class);
+
+ assertEquals(1,applyStateList.size());
+
+ ApplyState applyState = (ApplyState) applyStateList.get(0);
+
+ assertEquals(2, applyState.getReplicatedLogEntry().getIndex());
+
+ }};
+ }
+
+ @Test
+ public void testHandleAppendEntriesReplyUnknownFollower(){
+ new JavaTestKit(getSystem()) {
+ {
+
+ ActorRef leaderActor =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ AppendEntriesReply reply = new AppendEntriesReply("follower-1", 1, false, 10, 1);
+
+ RaftActorBehavior raftActorBehavior = leader.handleAppendEntriesReply(getRef(), reply);
+
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
+
+ }};
+ }
+
+ @Test
+ public void testHandleRequestVoteReply(){
+ new JavaTestKit(getSystem()) {
+ {
+
+ ActorRef leaderActor =
+ getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ RaftActorBehavior raftActorBehavior = leader.handleRequestVoteReply(getRef(), new RequestVoteReply(1, true));
+
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
+
+ raftActorBehavior = leader.handleRequestVoteReply(getRef(), new RequestVoteReply(1, false));
+
+ assertEquals(RaftState.Leader, raftActorBehavior.state());
+ }};
+ }
+
+ @Test
+ public void testIsolatedLeaderCheckNoFollowers() {
+ new JavaTestKit(getSystem()) {{
+ ActorRef leaderActor = getTestActor();
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ Leader leader = new Leader(leaderActorContext);
+ RaftActorBehavior behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
+ Assert.assertTrue(behavior instanceof Leader);
+ }};
+ }
+
+ @Test
+ public void testIsolatedLeaderCheckTwoFollowers() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef followerActor1 = getTestActor();
+ ActorRef followerActor2 = getTestActor();
+
+ MockRaftActorContext leaderActorContext = (MockRaftActorContext) createActorContext();
+
+ Map<String, String> peerAddresses = new HashMap<>();
+ peerAddresses.put("follower-1", followerActor1.path().toString());
+ peerAddresses.put("follower-2", followerActor2.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ Leader leader = new Leader(leaderActorContext);
+ leader.stopIsolatedLeaderCheckSchedule();
+
+ leader.markFollowerActive("follower-1");
+ leader.markFollowerActive("follower-2");
+ RaftActorBehavior behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
+ Assert.assertTrue("Behavior not instance of Leader when all followers are active",
+ behavior instanceof Leader);
+
+ // kill 1 follower and verify if that got killed
+ final JavaTestKit probe = new JavaTestKit(getSystem());
+ probe.watch(followerActor1);
+ followerActor1.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ final Terminated termMsg1 = probe.expectMsgClass(Terminated.class);
+ assertEquals(termMsg1.getActor(), followerActor1);
+
+ //sleep enough for all the follower stopwatches to lapse
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().
+ getElectionTimeOutInterval().toMillis(), TimeUnit.MILLISECONDS);
+
+ leader.markFollowerActive("follower-2");
+ behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
+ Assert.assertTrue("Behavior not instance of Leader when majority of followers are active",
+ behavior instanceof Leader);
+
+ // kill 2nd follower and leader should change to Isolated leader
+ followerActor2.tell(PoisonPill.getInstance(), null);
+ probe.watch(followerActor2);
+ followerActor2.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ final Terminated termMsg2 = probe.expectMsgClass(Terminated.class);
+ assertEquals(termMsg2.getActor(), followerActor2);
+
+ //sleep enough for the remaining the follower stopwatches to lapse
+ Uninterruptibles.sleepUninterruptibly(leaderActorContext.getConfigParams().
+ getElectionTimeOutInterval().toMillis(), TimeUnit.MILLISECONDS);
+
+ behavior = leader.handleMessage(leaderActor, new IsolatedLeaderCheck());
+ Assert.assertTrue("Behavior not instance of IsolatedLeader when majority followers are inactive",
+ behavior instanceof IsolatedLeader);
+
+ }};
}
class MockLeader extends Leader {
super(context);
}
- public void addToFollowerToLog(String followerId, long nextIndex, long matchIndex) {
- FollowerLogInformation followerLogInformation =
- new FollowerLogInformationImpl(followerId,
- new AtomicLong(nextIndex),
- new AtomicLong(matchIndex));
- followerToLog.put(followerId, followerLogInformation);
- }
-
public FollowerToSnapshot getFollowerToSnapshot() {
return fts;
}
}
}
+
+ private class MockConfigParamsImpl extends DefaultConfigParamsImpl {
+
+ private long electionTimeOutIntervalMillis;
+ private int snapshotChunkSize;
+
+ public MockConfigParamsImpl(long electionTimeOutIntervalMillis, int snapshotChunkSize) {
+ super();
+ this.electionTimeOutIntervalMillis = electionTimeOutIntervalMillis;
+ this.snapshotChunkSize = snapshotChunkSize;
+ }
+
+ @Override
+ public FiniteDuration getElectionTimeOutInterval() {
+ return new FiniteDuration(electionTimeOutIntervalMillis, TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public int getSnapshotChunkSize() {
+ return snapshotChunkSize;
+ }
+ }
}
import akka.actor.UntypedActor;
import akka.pattern.Patterns;
import akka.util.Timeout;
+import com.google.common.collect.Lists;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
* @param clazz
* @return
*/
- public static Object getFirstMatching(ActorRef actor, Class clazz) throws Exception {
+ public static Object getFirstMatching(ActorRef actor, Class<?> clazz) throws Exception {
List<Object> allMessages = getAllMessages(actor);
for(Object message : allMessages){
return null;
}
+ public static List<Object> getAllMatching(ActorRef actor, Class<?> clazz) throws Exception {
+ List<Object> allMessages = getAllMessages(actor);
+
+ List<Object> output = Lists.newArrayList();
+
+ for(Object message : allMessages){
+ if(message.getClass().equals(clazz)){
+ output.add(message);
+ }
+ }
+
+ return output;
+ }
+
}
actor {
# enable to test serialization only.
- serialize-messages = on
+ serialize-messages = off
serializers {
java = "akka.serialization.JavaSerializer"
}
serialization-bindings {
+ "org.opendaylight.controller.cluster.common.actor.Monitor" = java
"org.opendaylight.controller.cluster.raft.client.messages.FindLeader" = java
"org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry" = java
"com.google.protobuf.Message" = proto
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>binding-data-codec</artifactId>
- <version>0.7.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
*/
package org.opendaylight.controller.config.yang.md.sal.binding.impl;
-import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.util.Hashtable;
-import java.util.Map.Entry;
-import java.util.Set;
import javassist.ClassPool;
import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.yangtools.concepts.Delegator;
import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
import org.opendaylight.yangtools.sal.binding.generator.impl.RuntimeGeneratedMappingServiceImpl;
-import org.opendaylight.yangtools.yang.binding.DataContainer;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.impl.codec.BindingIndependentMappingService;
-import org.opendaylight.yangtools.yang.data.impl.codec.CodecRegistry;
-import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
*
*/
public final class RuntimeMappingModule extends AbstractRuntimeMappingModule {
- private static final Logger LOG = LoggerFactory.getLogger(RuntimeMappingModule.class);
-
private BundleContext bundleContext;
public RuntimeMappingModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
public java.lang.AutoCloseable createInstance() {
final GeneratedClassLoadingStrategy classLoading = getGlobalClassLoadingStrategy();
final BindingIndependentMappingService legacyMapping = getGlobalLegacyMappingService(classLoading);
- BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(new StreamWriterGenerator(SingletonHolder.JAVASSIST));
+ BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(StreamWriterGenerator.create(SingletonHolder.JAVASSIST));
BindingToNormalizedNodeCodec instance = new BindingToNormalizedNodeCodec(classLoading, legacyMapping, codecRegistry);
bundleContext.registerService(SchemaContextListener.class, instance, new Hashtable<String,String>());
return instance;
public void setBundleContext(final BundleContext bundleContext) {
this.bundleContext = bundleContext;
}
-
- private static final class RuntimeGeneratedMappingServiceProxy implements //
- BindingIndependentMappingService, //
- Delegator<BindingIndependentMappingService>, //
- AutoCloseable {
-
- private BindingIndependentMappingService delegate;
- private ServiceReference<BindingIndependentMappingService> reference;
- private BundleContext bundleContext;
-
- public RuntimeGeneratedMappingServiceProxy(final BundleContext bundleContext,
- final ServiceReference<BindingIndependentMappingService> serviceRef,
- final BindingIndependentMappingService delegate) {
- this.bundleContext = Preconditions.checkNotNull(bundleContext);
- this.reference = Preconditions.checkNotNull(serviceRef);
- this.delegate = Preconditions.checkNotNull(delegate);
- }
-
- @Override
- public CodecRegistry getCodecRegistry() {
- return delegate.getCodecRegistry();
- }
-
- @Override
- public CompositeNode toDataDom(final DataObject data) {
- return delegate.toDataDom(data);
- }
-
- @Override
- public Entry<YangInstanceIdentifier, CompositeNode> toDataDom(
- final Entry<InstanceIdentifier<? extends DataObject>, DataObject> entry) {
- return delegate.toDataDom(entry);
- }
-
- @Override
- public YangInstanceIdentifier toDataDom(final InstanceIdentifier<? extends DataObject> path) {
- return delegate.toDataDom(path);
- }
-
- @Override
- public DataObject dataObjectFromDataDom(
- final InstanceIdentifier<? extends DataObject> path,
- final CompositeNode result) throws DeserializationException {
- return delegate.dataObjectFromDataDom(path, result);
- }
-
- @Override
- public InstanceIdentifier<?> fromDataDom(final YangInstanceIdentifier entry)
- throws DeserializationException {
- return delegate.fromDataDom(entry);
- }
-
- @Override
- public Set<QName> getRpcQNamesFor(final Class<? extends RpcService> service) {
- return delegate.getRpcQNamesFor(service);
- }
-
- @Override
- public Optional<Class<? extends RpcService>> getRpcServiceClassFor(final String namespace, final String revision) {
- return delegate.getRpcServiceClassFor(namespace,revision);
- }
-
- @Override
- public DataContainer dataObjectFromDataDom(final Class<? extends DataContainer> inputClass, final CompositeNode domInput) {
- return delegate.dataObjectFromDataDom(inputClass, domInput);
- }
-
- @Override
- public void close() {
- if(delegate != null) {
- delegate = null;
-
- try {
- bundleContext.ungetService(reference);
- } catch (IllegalStateException e) {
- // Indicates the BundleContext is no longer valid which can happen normally on shutdown.
- LOG.debug( "Error unregistering service", e );
- }
-
- bundleContext= null;
- reference = null;
- }
- }
-
- @Override
- public BindingIndependentMappingService getDelegate() {
- return delegate;
- }
- }
}
if (path.isWildcarded()) {
return Optional.absent();
}
- return (Optional) getCodec().deserializeFunction(path).apply(Optional.<NormalizedNode<?, ?>> of(data));
+ return (Optional<DataObject>) getCodec().deserializeFunction(path).apply(Optional.<NormalizedNode<?, ?>> of(data));
}
private class TranslatingDataChangeInvoker implements DOMDataChangeListener {
package org.opendaylight.controller.sal.binding.codegen.impl;
import com.google.common.base.Supplier;
-
-import java.util.Map;
-import java.util.WeakHashMap;
-
+import com.google.common.collect.Iterables;
import javassist.ClassPool;
import javassist.CtClass;
import javassist.CtMethod;
import javassist.NotFoundException;
-
-import javax.annotation.concurrent.GuardedBy;
-
import org.eclipse.xtext.xbase.lib.Extension;
import org.opendaylight.controller.sal.binding.api.rpc.RpcRouter;
import org.opendaylight.controller.sal.binding.codegen.RpcIsNotRoutedException;
import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
import org.opendaylight.yangtools.yang.binding.util.ClassLoaderUtils;
+import javax.annotation.concurrent.GuardedBy;
+import java.util.Map;
+import java.util.WeakHashMap;
+
abstract class AbstractRuntimeCodeGenerator implements org.opendaylight.controller.sal.binding.codegen.RuntimeCodeGenerator, NotificationInvokerFactory {
@GuardedBy("this")
private final Map<Class<? extends NotificationListener>, RuntimeGeneratedInvokerPrototype> invokerClasses = new WeakHashMap<>();
}
});
+ if (Iterables.isEmpty(metadata.getContexts())) {
+ throw new RpcIsNotRoutedException("Service doesn't have routing context associated.");
+ }
+
synchronized (utils) {
final T instance = ClassLoaderUtils.withClassLoader(serviceType.getClassLoader(), routerSupplier(serviceType, metadata));
return new RpcRouterCodegenInstance<T>(name, serviceType, instance, metadata.getContexts());
*/
package org.opendaylight.controller.sal.binding.test;
-import junit.framework.Assert;
-
-
+import static org.junit.Assert.assertNotNull;
import org.opendaylight.yangtools.yang.binding.Augmentable;
import org.opendaylight.yangtools.yang.binding.Augmentation;
public static <T extends Augmentable<T>> void assertHasAugmentation(T object,
Class<? extends Augmentation<T>> augmentation) {
- Assert.assertNotNull(object);
- Assert.assertNotNull("Augmentation " + augmentation.getSimpleName() + " is not present.", object.getAugmentation(augmentation));
+ assertNotNull(object);
+ assertNotNull("Augmentation " + augmentation.getSimpleName() + " is not present.", object.getAugmentation(augmentation));
}
public static <T extends Augmentable<T>> AugmentationVerifier<T> from(T obj) {
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-test-model</artifactId>
- <version>1.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
*/
package org.opendaylight.controller.sal.binding.test.connect.dom;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNotSame;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
import java.math.BigInteger;
import java.util.Collections;
*/
package org.opendaylight.controller.sal.binding.test.connect.dom;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.InputStream;
import java.util.Collections;
*/
package org.opendaylight.controller.sal.binding.test.connect.dom;
-import static junit.framework.Assert.assertNotNull;
+import static org.junit.Assert.assertNotNull;
import java.util.concurrent.Future;
mavenBundle("org.apache.sshd", "sshd-core").versionAsInProject(), //
mavenBundle("org.openexi", "nagasena").versionAsInProject(), //
mavenBundle("org.openexi", "nagasena-rta").versionAsInProject(), //
- mavenBundle(CONTROLLER + ".thirdparty", "ganymed").versionAsInProject(), //
mavenBundle(CONTROLLER, "netconf-mapping-api").versionAsInProject(), //
mavenBundle(CONTROLLER, "config-persister-impl").versionAsInProject(), //
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
- <version>2.0.1</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<dependency>
<groupId>xmlunit</groupId>
<artifactId>xmlunit</artifactId>
- <version>1.5</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<dependency>
<groupId>xmlunit</groupId>
<artifactId>xmlunit</artifactId>
- <version>1.5</version>
</dependency>
<dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
- <version>2.0.1</version>
</dependency>
<dependency>
<groupId>com.codahale.metrics</groupId>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence
+ * API.
+ */
+public interface DataPersistenceProvider {
+ /**
+ * @return false if recovery is not applicable. In that case the provider is not persistent and may not have
+ * anything to be recovered
+ */
+ boolean isRecoveryApplicable();
+
+ /**
+ * Persist a journal entry.
+ *
+ * @param o
+ * @param procedure
+ * @param <T>
+ */
+ <T> void persist(T o, Procedure<T> procedure);
+
+ /**
+ * Save a snapshot
+ *
+ * @param o
+ */
+ void saveSnapshot(Object o);
+
+ /**
+ * Delete snapshots based on the criteria
+ *
+ * @param criteria
+ */
+ void deleteSnapshots(SnapshotSelectionCriteria criteria);
+
+ /**
+ * Delete journal entries up to the sequence number
+ *
+ * @param sequenceNumber
+ */
+ void deleteMessages(long sequenceNumber);
+
+}
*/
public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
+ //this is used in the metric name. Some transient actors do not have defined names
+ private String actorNameOverride;
+
public AbstractUntypedActorWithMetering() {
if (isMetricsCaptureEnabled())
getContext().become(new MeteringBehavior(this));
}
+ public AbstractUntypedActorWithMetering(String actorNameOverride){
+ this.actorNameOverride = actorNameOverride;
+ if (isMetricsCaptureEnabled())
+ getContext().become(new MeteringBehavior(this));
+ }
+
private boolean isMetricsCaptureEnabled(){
CommonConfig config = new CommonConfig(getContext().system().settings().config());
return config.isMetricCaptureEnabled();
}
+
+ public String getActorNameOverride() {
+ return actorNameOverride;
+ }
}
import akka.event.Logging;
import akka.event.LoggingAdapter;
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
}
unhandled(message);
}
+
+ protected class PersistentDataProvider implements DataPersistenceProvider {
+
+ public PersistentDataProvider(){
+
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return true;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ AbstractUntypedPersistentActor.this.persist(o, procedure);
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ AbstractUntypedPersistentActor.this.saveSnapshot(o);
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ AbstractUntypedPersistentActor.this.deleteSnapshots(criteria);
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ AbstractUntypedPersistentActor.this.deleteMessages(sequenceNumber);
+ }
+ }
+
+ protected class NonPersistentDataProvider implements DataPersistenceProvider {
+
+ public NonPersistentDataProvider(){
+
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ try {
+ procedure.apply(o);
+ } catch (Exception e) {
+ LOG.error(e, "An unexpected error occurred");
+ }
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+
+ }
+ }
}
public T mailboxCapacity(int capacity) {
Preconditions.checkArgument(capacity > 0, "mailbox capacity must be >0");
- Map<String, Object> boundedMailbox = (Map) configHolder.get(TAG_MAILBOX);
+ Map<String, Object> boundedMailbox = (Map<String, Object>) configHolder.get(TAG_MAILBOX);
boundedMailbox.put(TAG_MAILBOX_CAPACITY, capacity);
return (T)this;
}
Duration pushTimeout = Duration.create(timeout);
Preconditions.checkArgument(pushTimeout.isFinite(), "invalid value for mailbox push timeout");
- Map<String, Object> boundedMailbox = (Map) configHolder.get(TAG_MAILBOX);
+ Map<String, Object> boundedMailbox = (Map<String, Object>) configHolder.get(TAG_MAILBOX);
boundedMailbox.put(TAG_MAILBOX_PUSH_TIMEOUT, timeout);
return (T)this;
}
return; //there's no actor to monitor
}
String actorName = owner.get().path().toStringWithoutAddress();
- String metricName = registry.name(actorName, QUEUE_SIZE);
+ String metricName = MetricRegistry.name(actorName, QUEUE_SIZE);
if (registry.getMetrics().containsKey(metricName))
return; //already registered
- Gauge queueSize = getQueueSizeGuage(monitoredQueue);
+ Gauge<Integer> queueSize = getQueueSizeGuage(monitoredQueue);
registerQueueSizeMetric(metricName, queueSize);
}
public static class MeteredMessageQueue extends BoundedDequeBasedMailbox.MessageQueue {
+ private static final long serialVersionUID = 1L;
public MeteredMessageQueue(int capacity, FiniteDuration pushTimeOut) {
super(capacity, pushTimeOut);
}
}
- private Gauge getQueueSizeGuage(final MeteredMessageQueue monitoredQueue ){
+ private Gauge<Integer> getQueueSizeGuage(final MeteredMessageQueue monitoredQueue ){
return new Gauge<Integer>() {
@Override
public Integer getValue() {
};
}
- private void registerQueueSizeMetric(String metricName, Gauge metric){
+ private void registerQueueSizeMetric(String metricName, Gauge<Integer> metric){
try {
registry.register(metricName,metric);
} catch (IllegalArgumentException e) {
private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
private final String MSG_PROCESSING_RATE = "msg-rate";
- private String actorName;
+ private String actorQualifiedName;
private Timer msgProcessingTimer;
/**
*
* @param actor whose behaviour needs to be metered
*/
- public MeteringBehavior(UntypedActor actor){
+ public MeteringBehavior(AbstractUntypedActorWithMetering actor){
Preconditions.checkArgument(actor != null, "actor must not be null");
+ this.meteredActor = actor;
+ String actorName = actor.getActorNameOverride() != null ? actor.getActorNameOverride()
+ : actor.getSelf().path().name();
+ init(actorName);
+ }
+
+ public MeteringBehavior(UntypedActor actor){
+ Preconditions.checkArgument(actor != null, "actor must not be null");
this.meteredActor = actor;
- actorName = meteredActor.getSelf().path().toStringWithoutAddress();
- final String msgProcessingTime = MetricRegistry.name(actorName, MSG_PROCESSING_RATE);
+
+ String actorName = actor.getSelf().path().name();
+ init(actorName);
+ }
+
+ private void init(String actorName){
+ actorQualifiedName = new StringBuilder(meteredActor.getSelf().path().parent().toStringWithoutAddress()).
+ append("/").append(actorName).toString();
+
+ final String msgProcessingTime = MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE);
msgProcessingTimer = METRICREGISTRY.timer(msgProcessingTime);
}
final String messageType = message.getClass().getSimpleName();
final String msgProcessingTimeByMsgType =
- MetricRegistry.name(actorName, MSG_PROCESSING_RATE, messageType);
+ MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE, messageType);
final Timer msgProcessingTimerByMsgType = METRICREGISTRY.timer(msgProcessingTimeByMsgType);
import akka.actor.ActorRef;
-public class Monitor {
+import java.io.Serializable;
+
+public class Monitor implements Serializable {
+ private static final long serialVersionUID = 1L;
private final ActorRef actorRef;
public Monitor(ActorRef actorRef){
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * This class is intended for testing purposes. It just triggers CountDownLatch's in each method.
+ * This class really should be under src/test/java but it was problematic trying to uses it in other projects.
+ */
+public class DataPersistenceProviderMonitor implements DataPersistenceProvider {
+
+ private CountDownLatch persistLatch = new CountDownLatch(1);
+ private CountDownLatch saveSnapshotLatch = new CountDownLatch(1);
+ private CountDownLatch deleteSnapshotsLatch = new CountDownLatch(1);;
+ private CountDownLatch deleteMessagesLatch = new CountDownLatch(1);;
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ persistLatch.countDown();
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ saveSnapshotLatch.countDown();
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ deleteSnapshotsLatch.countDown();
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ deleteMessagesLatch.countDown();
+ }
+
+ public void setPersistLatch(CountDownLatch persistLatch) {
+ this.persistLatch = persistLatch;
+ }
+
+ public void setSaveSnapshotLatch(CountDownLatch saveSnapshotLatch) {
+ this.saveSnapshotLatch = saveSnapshotLatch;
+ }
+
+ public void setDeleteSnapshotsLatch(CountDownLatch deleteSnapshotsLatch) {
+ this.deleteSnapshotsLatch = deleteSnapshotsLatch;
+ }
+
+ public void setDeleteMessagesLatch(CountDownLatch deleteMessagesLatch) {
+ this.deleteMessagesLatch = deleteMessagesLatch;
+ }
+}
public class NormalizedNodeGetter implements
NormalizedNodeVisitor {
private final String path;
- NormalizedNode output;
+ NormalizedNode<?, ?> output;
public NormalizedNodeGetter(String path){
Preconditions.checkNotNull(path);
}
@Override
- public void visitNode(int level, String parentPath, NormalizedNode normalizedNode) {
+ public void visitNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode) {
String nodePath = parentPath + "/"+ PathUtils.toString(normalizedNode.getIdentifier());
if(nodePath.toString().equals(path)){
}
}
- public NormalizedNode getOutput(){
+ public NormalizedNode<?, ?> getOutput(){
return output;
}
}
String newParentPath = parentPath + "/" + node.getIdentifier().toString();
final Iterable<? extends NormalizedNode<?, ?>> value = node.getValue();
- for(NormalizedNode normalizedNode : value){
+ for(NormalizedNode<?, ?> normalizedNode : value){
if(normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer){
navigateNormalizedNodeContainerMixin(level + 1, newParentPath, (NormalizedNodeContainer) normalizedNode);
} else {
private void navigateNormalizedNode(int level, String parentPath, NormalizedNode<?,?> normalizedNode){
if(normalizedNode instanceof DataContainerNode){
- final DataContainerNode<?> dataContainerNode = (DataContainerNode) normalizedNode;
+ final DataContainerNode<?> dataContainerNode = (DataContainerNode<?>) normalizedNode;
navigateDataContainerNode(level + 1, parentPath, dataContainerNode);
} else {
}
@Override
- public void visitNode(int level, String parentPath, NormalizedNode normalizedNode) {
+ public void visitNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode) {
System.out.println(spaces((level) * 4) + normalizedNode.getClass().toString() + ":" + normalizedNode.getIdentifier());
if(normalizedNode instanceof LeafNode || normalizedNode instanceof LeafSetEntryNode){
System.out.println(spaces((level) * 4) + " parentPath = " + parentPath);
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public interface NormalizedNodeVisitor {
- public void visitNode(int level, String parentPath, NormalizedNode normalizedNode);
+ public void visitNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode);
}
* @param node
* @return
*/
- public static NormalizedNodeMessages.Node serialize(NormalizedNode node){
+ public static NormalizedNodeMessages.Node serialize(NormalizedNode<?, ?> node){
Preconditions.checkNotNull(node, "node should not be null");
return new Serializer(node).serialize();
}
- public static Serializer newSerializer(NormalizedNode node) {
+ public static Serializer newSerializer(NormalizedNode<?, ?> node) {
Preconditions.checkNotNull(node, "node should not be null");
return new Serializer(node);
}
* @param node
* @return
*/
- public static NormalizedNode deSerialize(NormalizedNodeMessages.Node node) {
+ public static NormalizedNode<?, ?> deSerialize(NormalizedNodeMessages.Node node) {
Preconditions.checkNotNull(node, "node should not be null");
return new DeSerializer(null, node).deSerialize();
}
public static class Serializer extends QNameSerializationContextImpl
implements NormalizedNodeSerializationContext {
- private final NormalizedNode node;
+ private final NormalizedNode<?, ?> node;
private NormalizedNodeMessages.InstanceIdentifier serializedPath;
- private Serializer(NormalizedNode node) {
+ private Serializer(NormalizedNode<?, ?> node) {
this.node = node;
}
}
private NormalizedNodeMessages.Node.Builder serialize(
- NormalizedNode node) {
+ NormalizedNode<?, ?> node) {
NormalizedNodeMessages.Node.Builder builder =
NormalizedNodeMessages.Node.newBuilder();
ValueSerializer.serialize(builder, this, value);
} else if (value instanceof Iterable) {
- Iterable iterable = (Iterable) value;
+ Iterable<?> iterable = (Iterable<?>) value;
for (Object o : iterable) {
if (o instanceof NormalizedNode) {
- builder.addChild(serialize((NormalizedNode) o));
+ builder.addChild(serialize((NormalizedNode<?, ?>) o));
}
}
} else if (value instanceof NormalizedNode) {
- builder.addChild(serialize((NormalizedNode) value));
+ builder.addChild(serialize((NormalizedNode<?, ?>) value));
} else {
static {
deSerializationFunctions.put(CONTAINER_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode>
deSerializationFunctions.put(LEAF_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, Object, LeafNode<Object>>
deSerializationFunctions.put(MAP_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
CollectionNodeBuilder<MapEntryNode, MapNode>
deSerializationFunctions.put(MAP_ENTRY_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
deSerializationFunctions.put(AUGMENTATION_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
DataContainerNodeBuilder<YangInstanceIdentifier.AugmentationIdentifier, AugmentationNode>
deSerializationFunctions.put(LEAF_SET_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
ListNodeBuilder<Object, LeafSetEntryNode<Object>>
deSerializationFunctions.put(LEAF_SET_ENTRY_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeWithValue, Object, LeafSetEntryNode<Object>>
deSerializationFunctions.put(CHOICE_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ChoiceNode>
deSerializationFunctions.put(ORDERED_LEAF_SET_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
ListNodeBuilder<Object, LeafSetEntryNode<Object>>
deSerializationFunctions.put(ORDERED_MAP_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
CollectionNodeBuilder<MapEntryNode, OrderedMapNode>
deSerializationFunctions.put(UNKEYED_LIST_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode>
deSerializationFunctions.put(UNKEYED_LIST_ENTRY_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, UnkeyedListEntryNode>
deSerializationFunctions.put(ANY_XML_NODE_TYPE,
new DeSerializationFunction() {
- @Override public NormalizedNode apply(
+ @Override public NormalizedNode<?, ?> apply(
DeSerializer deSerializer,
NormalizedNodeMessages.Node node) {
NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, Node<?>, AnyXmlNode>
return deserializedPath;
}
- public NormalizedNode deSerialize() {
- NormalizedNode deserializedNode = deSerialize(node);
+ public NormalizedNode<?, ?> deSerialize() {
+ NormalizedNode<?, ?> deserializedNode = deSerialize(node);
if(path != null) {
deserializedPath = InstanceIdentifierUtils.fromSerializable(path, this);
}
return deserializedNode;
}
- private NormalizedNode deSerialize(NormalizedNodeMessages.Node node){
+ private NormalizedNode<?, ?> deSerialize(NormalizedNodeMessages.Node node){
Preconditions.checkNotNull(node, "node should not be null");
DeSerializationFunction deSerializationFunction = deSerializationFunctions.get(
}
- private NormalizedNode buildCollectionNode(
+ private NormalizedNode<?, ?> buildCollectionNode(
CollectionNodeBuilder builder,
NormalizedNodeMessages.Node node) {
}
- private NormalizedNode buildListNode(
+ private NormalizedNode<?, ?> buildListNode(
ListNodeBuilder<Object, LeafSetEntryNode<Object>> builder,
NormalizedNodeMessages.Node node) {
builder.withNodeIdentifier(toNodeIdentifier(node.getPathArgument()));
return builder.build();
}
- private NormalizedNode buildDataContainer(DataContainerNodeBuilder builder, NormalizedNodeMessages.Node node){
+ private NormalizedNode<?, ?> buildDataContainer(DataContainerNodeBuilder builder, NormalizedNodeMessages.Node node){
for(NormalizedNodeMessages.Node child : node.getChildList()){
builder.withChild((DataContainerChild<?, ?>) deSerialize(child));
return builder.build();
}
- private NormalizedNode buildNormalizedNode(NormalizedNodeAttrBuilder builder, NormalizedNodeMessages.Node node){
+ private NormalizedNode<?, ?> buildNormalizedNode(NormalizedNodeAttrBuilder builder, NormalizedNodeMessages.Node node){
builder.withValue(ValueSerializer.deSerialize(this, node));
}
private static interface DeSerializationFunction {
- NormalizedNode apply(DeSerializer deserializer, NormalizedNodeMessages.Node node);
+ NormalizedNode<?, ?> apply(DeSerializer deserializer, NormalizedNodeMessages.Node node);
}
}
}
UNKEYED_LIST_ENTRY_NODE_TYPE,
ANY_XML_NODE_TYPE;
- public static NormalizedNodeType getSerializableNodeType(NormalizedNode node){
+ public static NormalizedNodeType getSerializableNodeType(NormalizedNode<?, ?> node){
Preconditions.checkNotNull(node, "node should not be null");
if(node instanceof LeafNode){
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
-import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
-import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
+import com.google.protobuf.ByteString;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.HashSet;
import java.util.Set;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class ValueSerializer {
public static void serialize(NormalizedNodeMessages.Node.Builder builder,
builder.setInstanceIdentifierValue(
InstanceIdentifierUtils.toSerializable((YangInstanceIdentifier) value, context));
} else if(value instanceof Set) {
- Set set = (Set) value;
- if(!set.isEmpty()){
- for(Object o : set){
- if(o instanceof String){
+ Set<?> set = (Set<?>) value;
+ if (!set.isEmpty()) {
+ for (Object o : set) {
+ if (o instanceof String) {
builder.addBitsValue(o.toString());
} else {
throw new IllegalArgumentException("Expected value type to be Bits but was : " +
- value.toString());
+ value.toString());
}
}
}
+ } else if(value instanceof byte[]){
+ builder.setBytesValue(ByteString.copyFrom((byte[]) value));
} else {
builder.setValue(value.toString());
}
QNameSerializationContext context, Object value){
builder.setType(ValueType.getSerializableType(value).ordinal());
- builder.setValue(value.toString());
+
+ if(value instanceof YangInstanceIdentifier) {
+ builder.setInstanceIdentifierValue(
+ InstanceIdentifierUtils.toSerializable((YangInstanceIdentifier) value, context));
+ } else if(value instanceof Set) {
+ Set<?> set = (Set<?>) value;
+ if (!set.isEmpty()) {
+ for (Object o : set) {
+ if (o instanceof String) {
+ builder.addBitsValue(o.toString());
+ } else {
+ throw new IllegalArgumentException("Expected value type to be Bits but was : " +
+ value.toString());
+ }
+ }
+ }
+ } else if(value instanceof byte[]){
+ builder.setBytesValue(ByteString.copyFrom((byte[]) value));
+ } else {
+ builder.setValue(value.toString());
+ }
}
public static Object deSerialize(QNameDeSerializationContext context,
return InstanceIdentifierUtils.fromSerializable(
node.getInstanceIdentifierValue(), context);
} else if(node.getIntValueType() == ValueType.BITS_TYPE.ordinal()){
- return new HashSet(node.getBitsValueList());
+ return new HashSet<>(node.getBitsValueList());
+ } else if(node.getIntValueType() == ValueType.BINARY_TYPE.ordinal()){
+ return node.getBytesValue().toByteArray();
}
return deSerializeBasicTypes(node.getIntValueType(), node.getValue());
}
public static Object deSerialize(QNameDeSerializationContext context,
NormalizedNodeMessages.PathArgumentAttribute attribute) {
+
+ if(attribute.getType() == ValueType.YANG_IDENTIFIER_TYPE.ordinal()){
+ return InstanceIdentifierUtils.fromSerializable(
+ attribute.getInstanceIdentifierValue(), context);
+ } else if(attribute.getType() == ValueType.BITS_TYPE.ordinal()){
+ return new HashSet<>(attribute.getBitsValueList());
+ } else if(attribute.getType() == ValueType.BINARY_TYPE.ordinal()){
+ return attribute.getBytesValue().toByteArray();
+ }
return deSerializeBasicTypes(attribute.getType(), attribute.getValue());
}
YANG_IDENTIFIER_TYPE,
STRING_TYPE,
BIG_INTEGER_TYPE,
- BIG_DECIMAL_TYPE;
+ BIG_DECIMAL_TYPE,
+ BINARY_TYPE;
- private static Map<Class, ValueType> types = new HashMap<>();
+ private static Map<Class<?>, ValueType> types = new HashMap<>();
static {
types.put(String.class, STRING_TYPE);
types.put(Short.class,SHORT_TYPE);
types.put(BigInteger.class, BIG_INTEGER_TYPE);
types.put(BigDecimal.class, BIG_DECIMAL_TYPE);
+ types.put(byte[].class, BINARY_TYPE);
}
public static final ValueType getSerializableType(Object node){
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+public class NodeTypes {
+
+ public static final byte LEAF_NODE = 1;
+ public static final byte LEAF_SET = 2;
+ public static final byte LEAF_SET_ENTRY_NODE = 3;
+ public static final byte CONTAINER_NODE = 4;
+ public static final byte UNKEYED_LIST = 5;
+ public static final byte UNKEYED_LIST_ITEM = 6;
+ public static final byte MAP_NODE = 7;
+ public static final byte MAP_ENTRY_NODE = 8;
+ public static final byte ORDERED_MAP_NODE = 9;
+ public static final byte CHOICE_NODE = 10;
+ public static final byte AUGMENTATION_NODE = 11;
+ public static final byte ANY_XML_NODE = 12;
+ public static final byte END_NODE = 13;
+
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * NormalizedNodeInputStreamReader reads the byte stream and constructs the normalized node including its children nodes.
+ * This process goes in recursive manner, where each NodeTypes object signifies the start of the object, except END_NODE.
+ * If a node can have children, then that node's end is calculated based on appearance of END_NODE.
+ *
+ */
+
+public class NormalizedNodeInputStreamReader implements NormalizedNodeStreamReader {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeInputStreamReader.class);
+
+ private static final String REVISION_ARG = "?revision=";
+
+ private final DataInputStream reader;
+
+ private final Map<Integer, String> codedStringMap = new HashMap<>();
+
+ private QName lastLeafSetQName;
+
+ public NormalizedNodeInputStreamReader(InputStream stream) throws IOException {
+ Preconditions.checkNotNull(stream);
+ reader = new DataInputStream(stream);
+ }
+
+ @Override
+ public NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+ NormalizedNode<?, ?> node = null;
+
+ // each node should start with a byte
+ byte nodeType = reader.readByte();
+
+ if(nodeType == NodeTypes.END_NODE) {
+ LOG.debug("End node reached. return");
+ return null;
+ }
+ else if(nodeType == NodeTypes.AUGMENTATION_NODE) {
+ LOG.debug("Reading augmentation node. will create augmentation identifier");
+
+ YangInstanceIdentifier.AugmentationIdentifier identifier =
+ new YangInstanceIdentifier.AugmentationIdentifier(readQNameSet());
+ DataContainerNodeBuilder<YangInstanceIdentifier.AugmentationIdentifier, AugmentationNode> augmentationBuilder =
+ Builders.augmentationBuilder().withNodeIdentifier(identifier);
+ augmentationBuilder = addDataContainerChildren(augmentationBuilder);
+ node = augmentationBuilder.build();
+
+ } else {
+ if(nodeType == NodeTypes.LEAF_SET_ENTRY_NODE) {
+ LOG.debug("Reading leaf set entry node. Will create NodeWithValue instance identifier");
+
+ // Read the object value
+ Object value = readObject();
+
+ YangInstanceIdentifier.NodeWithValue nodeWithValue = new YangInstanceIdentifier.NodeWithValue(
+ lastLeafSetQName, value);
+ node = Builders.leafSetEntryBuilder().withNodeIdentifier(nodeWithValue).
+ withValue(value).build();
+
+ } else if(nodeType == NodeTypes.MAP_ENTRY_NODE) {
+ LOG.debug("Reading map entry node. Will create node identifier with predicates.");
+
+ QName qName = readQName();
+ YangInstanceIdentifier.NodeIdentifierWithPredicates nodeIdentifier =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(qName, readKeyValueMap());
+ DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode> mapEntryBuilder
+ = Builders.mapEntryBuilder().withNodeIdentifier(nodeIdentifier);
+
+ mapEntryBuilder = (DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates,
+ MapEntryNode>)addDataContainerChildren(mapEntryBuilder);
+ node = mapEntryBuilder.build();
+
+ } else {
+ LOG.debug("Creating standard node identifier. ");
+
+ QName qName = readQName();
+ YangInstanceIdentifier.NodeIdentifier identifier = new YangInstanceIdentifier.NodeIdentifier(qName);
+ node = readNodeIdentifierDependentNode(nodeType, identifier);
+
+ }
+ }
+ return node;
+ }
+
+ private NormalizedNode<?, ?> readNodeIdentifierDependentNode(byte nodeType, YangInstanceIdentifier.NodeIdentifier identifier)
+ throws IOException {
+
+ switch(nodeType) {
+ case NodeTypes.LEAF_NODE :
+ LOG.debug("Read leaf node");
+ // Read the object value
+ NormalizedNodeAttrBuilder leafBuilder = Builders.leafBuilder();
+ return leafBuilder.withNodeIdentifier(identifier).withValue(readObject()).build();
+
+ case NodeTypes.ANY_XML_NODE :
+ LOG.debug("Read xml node");
+ Node<?> value = (Node<?>) readObject();
+ return Builders.anyXmlBuilder().withValue(value).build();
+
+ case NodeTypes.MAP_NODE :
+ LOG.debug("Read map node");
+ CollectionNodeBuilder<MapEntryNode, MapNode> mapBuilder = Builders.mapBuilder().withNodeIdentifier(identifier);
+ mapBuilder = addMapNodeChildren(mapBuilder);
+ return mapBuilder.build();
+
+ case NodeTypes.CHOICE_NODE :
+ LOG.debug("Read choice node");
+ DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ChoiceNode> choiceBuilder =
+ Builders.choiceBuilder().withNodeIdentifier(identifier);
+ choiceBuilder = addDataContainerChildren(choiceBuilder);
+ return choiceBuilder.build();
+
+ case NodeTypes.ORDERED_MAP_NODE :
+ LOG.debug("Reading ordered map node");
+ CollectionNodeBuilder<MapEntryNode, OrderedMapNode> orderedMapBuilder =
+ Builders.orderedMapBuilder().withNodeIdentifier(identifier);
+ orderedMapBuilder = addMapNodeChildren(orderedMapBuilder);
+ return orderedMapBuilder.build();
+
+ case NodeTypes.UNKEYED_LIST :
+ LOG.debug("Read unkeyed list node");
+ CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode> unkeyedListBuilder =
+ Builders.unkeyedListBuilder().withNodeIdentifier(identifier);
+ unkeyedListBuilder = addUnkeyedListChildren(unkeyedListBuilder);
+ return unkeyedListBuilder.build();
+
+ case NodeTypes.UNKEYED_LIST_ITEM :
+ LOG.debug("Read unkeyed list item node");
+ DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, UnkeyedListEntryNode> unkeyedListEntryBuilder
+ = Builders.unkeyedListEntryBuilder().withNodeIdentifier(identifier);
+
+ unkeyedListEntryBuilder = (DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, UnkeyedListEntryNode>)
+ addDataContainerChildren(unkeyedListEntryBuilder);
+ return unkeyedListEntryBuilder.build();
+
+ case NodeTypes.CONTAINER_NODE :
+ LOG.debug("Read container node");
+ DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> containerBuilder =
+ Builders.containerBuilder().withNodeIdentifier(identifier);
+
+ containerBuilder = (DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode>)
+ addDataContainerChildren(containerBuilder);
+ return containerBuilder.build();
+
+ case NodeTypes.LEAF_SET :
+ LOG.debug("Read leaf set node");
+ ListNodeBuilder<Object, LeafSetEntryNode<Object>> leafSetBuilder =
+ Builders.leafSetBuilder().withNodeIdentifier(identifier);
+ leafSetBuilder = addLeafSetChildren(identifier.getNodeType(), leafSetBuilder);
+ return leafSetBuilder.build();
+
+ default :
+ return null;
+ }
+ }
+
+ private QName readQName() throws IOException {
+ // Read in the same sequence of writing
+ String localName = readCodedString();
+ String namespace = readCodedString();
+ String revision = readCodedString();
+ String qName;
+ // Not using stringbuilder as compiler optimizes string concatenation of +
+ if(revision != null){
+ qName = "(" + namespace+ REVISION_ARG + revision + ")" +localName;
+ } else {
+ qName = "(" + namespace + ")" +localName;
+ }
+
+ return QNameFactory.create(qName);
+ }
+
+
+ private String readCodedString() throws IOException {
+ boolean readFromMap = reader.readBoolean();
+ if(readFromMap) {
+ return codedStringMap.get(reader.readInt());
+ } else {
+ String value = reader.readUTF();
+ if(value != null) {
+ codedStringMap.put(Integer.valueOf(codedStringMap.size()), value);
+ }
+ return value;
+ }
+ }
+
+ private Set<QName> readQNameSet() throws IOException{
+ // Read the children count
+ int count = reader.readInt();
+ Set<QName> children = new HashSet<>(count);
+ for(int i = 0; i<count; i++) {
+ children.add(readQName());
+ }
+ return children;
+ }
+
+ private Map<QName, Object> readKeyValueMap() throws IOException {
+ int count = reader.readInt();
+ Map<QName, Object> keyValueMap = new HashMap<>(count);
+
+ for(int i = 0; i<count; i++) {
+ keyValueMap.put(readQName(), readObject());
+ }
+
+ return keyValueMap;
+ }
+
+ private Object readObject() throws IOException {
+ byte objectType = reader.readByte();
+ switch(objectType) {
+ case ValueTypes.BITS_TYPE:
+ return readObjSet();
+
+ case ValueTypes.BOOL_TYPE :
+ return reader.readBoolean();
+
+ case ValueTypes.BYTE_TYPE :
+ return reader.readByte();
+
+ case ValueTypes.INT_TYPE :
+ return reader.readInt();
+
+ case ValueTypes.LONG_TYPE :
+ return reader.readLong();
+
+ case ValueTypes.QNAME_TYPE :
+ return readQName();
+
+ case ValueTypes.SHORT_TYPE :
+ return reader.readShort();
+
+ case ValueTypes.STRING_TYPE :
+ return reader.readUTF();
+
+ case ValueTypes.BIG_DECIMAL_TYPE :
+ return new BigDecimal(reader.readUTF());
+
+ case ValueTypes.BIG_INTEGER_TYPE :
+ return new BigInteger(reader.readUTF());
+
+ case ValueTypes.YANG_IDENTIFIER_TYPE :
+ int size = reader.readInt();
+
+ List<YangInstanceIdentifier.PathArgument> pathArguments = new ArrayList<>(size);
+
+ for(int i=0; i<size; i++) {
+ pathArguments.add(readPathArgument());
+ }
+ return YangInstanceIdentifier.create(pathArguments);
+
+ default :
+ return null;
+ }
+ }
+
+ private Set<String> readObjSet() throws IOException {
+ int count = reader.readInt();
+ Set<String> children = new HashSet<>(count);
+ for(int i = 0; i<count; i++) {
+ children.add(readCodedString());
+ }
+ return children;
+ }
+
+ private YangInstanceIdentifier.PathArgument readPathArgument() throws IOException {
+ // read Type
+ int type = reader.readByte();
+
+ switch(type) {
+
+ case PathArgumentTypes.AUGMENTATION_IDENTIFIER :
+ return new YangInstanceIdentifier.AugmentationIdentifier(readQNameSet());
+
+ case PathArgumentTypes.NODE_IDENTIFIER :
+ return new YangInstanceIdentifier.NodeIdentifier(readQName());
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_PREDICATES :
+ return new YangInstanceIdentifier.NodeIdentifierWithPredicates(readQName(), readKeyValueMap());
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_VALUE :
+ return new YangInstanceIdentifier.NodeWithValue(readQName(), readObject());
+
+ default :
+ return null;
+ }
+ }
+
+ private ListNodeBuilder<Object, LeafSetEntryNode<Object>> addLeafSetChildren(QName nodeType,
+ ListNodeBuilder<Object, LeafSetEntryNode<Object>> builder)
+ throws IOException {
+
+ LOG.debug("Reading children of leaf set");
+
+ lastLeafSetQName = nodeType;
+
+ LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNode();
+
+ while(child != null) {
+ builder.withChild(child);
+ child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ }
+ return builder;
+ }
+
+ private CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode> addUnkeyedListChildren(
+ CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode> builder)
+ throws IOException{
+
+ LOG.debug("Reading children of unkeyed list");
+ UnkeyedListEntryNode child = (UnkeyedListEntryNode)readNormalizedNode();
+
+ while(child != null) {
+ builder.withChild(child);
+ child = (UnkeyedListEntryNode)readNormalizedNode();
+ }
+ return builder;
+ }
+
+ private DataContainerNodeBuilder addDataContainerChildren(DataContainerNodeBuilder builder)
+ throws IOException {
+ LOG.debug("Reading data container (leaf nodes) nodes");
+
+ DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?> child =
+ (DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>) readNormalizedNode();
+
+ while(child != null) {
+ builder.withChild(child);
+ child =
+ (DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>) readNormalizedNode();
+ }
+ return builder;
+ }
+
+
+ private CollectionNodeBuilder addMapNodeChildren(CollectionNodeBuilder builder)
+ throws IOException {
+ LOG.debug("Reading map node children");
+ MapEntryNode child = (MapEntryNode)readNormalizedNode();
+
+ while(child != null){
+ builder.withChild(child);
+ child = (MapEntryNode)readNormalizedNode();
+ }
+
+ return builder;
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ reader.close();
+ }
+
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * NormalizedNodeOutputStreamWriter will be used by distributed datastore to send normalized node in
+ * a stream.
+ * A stream writer wrapper around this class will write node objects to stream in recursive manner.
+ * for example - If you have a ContainerNode which has a two LeafNode as children, then
+ * you will first call {@link #startContainerNode(YangInstanceIdentifier.NodeIdentifier, int)}, then will call
+ * {@link #leafNode(YangInstanceIdentifier.NodeIdentifier, Object)} twice and then, {@link #endNode()} to end
+ * container node.
+ *
+ * Based on the each node, the node type is also written to the stream, that helps in reconstructing the object,
+ * while reading.
+ *
+ *
+ */
+
+public class NormalizedNodeOutputStreamWriter implements NormalizedNodeStreamWriter{
+
+ private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeOutputStreamWriter.class);
+
+ private final DataOutputStream writer;
+
+ private final Map<String, Integer> stringCodeMap = new HashMap<>();
+
+ public NormalizedNodeOutputStreamWriter(OutputStream stream) throws IOException {
+ Preconditions.checkNotNull(stream);
+ writer = new DataOutputStream(stream);
+ }
+
+ @Override
+ public void leafNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Writing a new leaf node");
+ startNode(name.getNodeType(), NodeTypes.LEAF_NODE);
+
+ writeObject(value);
+ }
+
+ @Override
+ public void startLeafSet(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new leaf set");
+
+ startNode(name.getNodeType(), NodeTypes.LEAF_SET);
+ }
+
+ @Override
+ public void leafSetEntryNode(Object value) throws IOException, IllegalArgumentException {
+ LOG.debug("Writing a new leaf set entry node");
+
+ writer.writeByte(NodeTypes.LEAF_SET_ENTRY_NODE);
+ writeObject(value);
+ }
+
+ @Override
+ public void startContainerNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+
+ LOG.debug("Starting a new container node");
+
+ startNode(name.getNodeType(), NodeTypes.CONTAINER_NODE);
+ }
+
+ @Override
+ public void startUnkeyedList(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new unkeyed list");
+
+ startNode(name.getNodeType(), NodeTypes.UNKEYED_LIST);
+ }
+
+ @Override
+ public void startUnkeyedListItem(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalStateException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new unkeyed list item");
+
+ startNode(name.getNodeType(), NodeTypes.UNKEYED_LIST_ITEM);
+ }
+
+ @Override
+ public void startMapNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new map node");
+
+ startNode(name.getNodeType(), NodeTypes.MAP_NODE);
+ }
+
+ @Override
+ public void startMapEntryNode(YangInstanceIdentifier.NodeIdentifierWithPredicates identifier, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(identifier, "Node identifier should not be null");
+ LOG.debug("Starting a new map entry node");
+ startNode(identifier.getNodeType(), NodeTypes.MAP_ENTRY_NODE);
+
+ writeKeyValueMap(identifier.getKeyValues());
+
+ }
+
+ @Override
+ public void startOrderedMapNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new ordered map node");
+
+ startNode(name.getNodeType(), NodeTypes.ORDERED_MAP_NODE);
+ }
+
+ @Override
+ public void startChoiceNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new choice node");
+
+ startNode(name.getNodeType(), NodeTypes.CHOICE_NODE);
+ }
+
+ @Override
+ public void startAugmentationNode(YangInstanceIdentifier.AugmentationIdentifier identifier) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(identifier, "Node identifier should not be null");
+ LOG.debug("Starting a new augmentation node");
+
+ writer.writeByte(NodeTypes.AUGMENTATION_NODE);
+ writeQNameSet(identifier.getPossibleChildNames());
+ }
+
+ @Override
+ public void anyxmlNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Writing a new xml node");
+
+ startNode(name.getNodeType(), NodeTypes.ANY_XML_NODE);
+
+ writeObject(value);
+ }
+
+ @Override
+ public void endNode() throws IOException, IllegalStateException {
+ LOG.debug("Ending the node");
+
+ writer.writeByte(NodeTypes.END_NODE);
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.close();
+ }
+
+ @Override
+ public void flush() throws IOException {
+ writer.flush();
+ }
+
+ private void startNode(final QName qName, byte nodeType) throws IOException {
+
+ Preconditions.checkNotNull(qName, "QName of node identifier should not be null.");
+ // First write the type of node
+ writer.writeByte(nodeType);
+ // Write Start Tag
+ writeQName(qName);
+ }
+
+ private void writeQName(QName qName) throws IOException {
+
+ writeCodedString(qName.getLocalName());
+ writeCodedString(qName.getNamespace().toString());
+ writeCodedString(qName.getFormattedRevision());
+ }
+
+ private void writeCodedString(String key) throws IOException {
+ Integer value = stringCodeMap.get(key);
+
+ if(value != null) {
+ writer.writeBoolean(true);
+ writer.writeInt(value);
+ } else {
+ if(key != null) {
+ stringCodeMap.put(key, Integer.valueOf(stringCodeMap.size()));
+ }
+ writer.writeBoolean(false);
+ writer.writeUTF(key);
+ }
+ }
+
+ private void writeObjSet(Set<?> set) throws IOException {
+ if(!set.isEmpty()){
+ writer.writeInt(set.size());
+ for(Object o : set){
+ if(o instanceof String){
+ writeCodedString(o.toString());
+ } else {
+ throw new IllegalArgumentException("Expected value type to be String but was : " +
+ o.toString());
+ }
+ }
+ } else {
+ writer.writeInt(0);
+ }
+ }
+
+ private void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException {
+ Iterable<YangInstanceIdentifier.PathArgument> pathArguments = identifier.getPathArguments();
+ int size = Iterables.size(pathArguments);
+ writer.writeInt(size);
+
+ for(YangInstanceIdentifier.PathArgument pathArgument : pathArguments) {
+ writePathArgument(pathArgument);
+ }
+ }
+
+ private void writePathArgument(YangInstanceIdentifier.PathArgument pathArgument) throws IOException {
+
+ byte type = PathArgumentTypes.getSerializablePathArgumentType(pathArgument);
+
+ writer.writeByte(type);
+
+ switch(type) {
+ case PathArgumentTypes.NODE_IDENTIFIER :
+
+ YangInstanceIdentifier.NodeIdentifier nodeIdentifier =
+ (YangInstanceIdentifier.NodeIdentifier) pathArgument;
+
+ writeQName(nodeIdentifier.getNodeType());
+ break;
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_PREDICATES:
+
+ YangInstanceIdentifier.NodeIdentifierWithPredicates nodeIdentifierWithPredicates =
+ (YangInstanceIdentifier.NodeIdentifierWithPredicates) pathArgument;
+ writeQName(nodeIdentifierWithPredicates.getNodeType());
+
+ writeKeyValueMap(nodeIdentifierWithPredicates.getKeyValues());
+ break;
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_VALUE :
+
+ YangInstanceIdentifier.NodeWithValue nodeWithValue =
+ (YangInstanceIdentifier.NodeWithValue) pathArgument;
+
+ writeQName(nodeWithValue.getNodeType());
+ writeObject(nodeWithValue.getValue());
+ break;
+
+ case PathArgumentTypes.AUGMENTATION_IDENTIFIER :
+
+ YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier =
+ (YangInstanceIdentifier.AugmentationIdentifier) pathArgument;
+
+ // No Qname in augmentation identifier
+ writeQNameSet(augmentationIdentifier.getPossibleChildNames());
+ break;
+ default :
+ throw new IllegalStateException("Unknown node identifier type is found : " + pathArgument.getClass().toString() );
+ }
+ }
+
+ private void writeKeyValueMap(Map<QName, Object> keyValueMap) throws IOException {
+ if(keyValueMap != null && !keyValueMap.isEmpty()) {
+ writer.writeInt(keyValueMap.size());
+ Set<QName> qNameSet = keyValueMap.keySet();
+
+ for(QName qName : qNameSet) {
+ writeQName(qName);
+ writeObject(keyValueMap.get(qName));
+ }
+ } else {
+ writer.writeInt(0);
+ }
+ }
+
+ private void writeQNameSet(Set<QName> children) throws IOException {
+ // Write each child's qname separately, if list is empty send count as 0
+ if(children != null && !children.isEmpty()) {
+ writer.writeInt(children.size());
+ for(QName qName : children) {
+ writeQName(qName);
+ }
+ } else {
+ LOG.debug("augmentation node does not have any child");
+ writer.writeInt(0);
+ }
+ }
+
+ private void writeObject(Object value) throws IOException {
+
+ byte type = ValueTypes.getSerializableType(value);
+ // Write object type first
+ writer.writeByte(type);
+
+ switch(type) {
+ case ValueTypes.BOOL_TYPE:
+ writer.writeBoolean((Boolean) value);
+ break;
+ case ValueTypes.QNAME_TYPE:
+ writeQName((QName) value);
+ break;
+ case ValueTypes.INT_TYPE:
+ writer.writeInt((Integer) value);
+ break;
+ case ValueTypes.BYTE_TYPE:
+ writer.writeByte((Byte) value);
+ break;
+ case ValueTypes.LONG_TYPE:
+ writer.writeLong((Long) value);
+ break;
+ case ValueTypes.SHORT_TYPE:
+ writer.writeShort((Short) value);
+ break;
+ case ValueTypes.BITS_TYPE:
+ writeObjSet((Set<?>) value);
+ break;
+ case ValueTypes.YANG_IDENTIFIER_TYPE:
+ writeYangInstanceIdentifier((YangInstanceIdentifier) value);
+ break;
+ default:
+ writer.writeUTF(value.toString());
+ break;
+ }
+ }
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+import java.io.IOException;
+
+
+public interface NormalizedNodeStreamReader extends AutoCloseable {
+
+ NormalizedNode<?, ?> readNormalizedNode() throws IOException;
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.collect.ImmutableMap;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+import java.util.Map;
+
+public class PathArgumentTypes {
+ public static final byte AUGMENTATION_IDENTIFIER = 1;
+ public static final byte NODE_IDENTIFIER = 2;
+ public static final byte NODE_IDENTIFIER_WITH_VALUE = 3;
+ public static final byte NODE_IDENTIFIER_WITH_PREDICATES = 4;
+
+ private static Map<Class<?>, Byte> CLASS_TO_ENUM_MAP =
+ ImmutableMap.<Class<?>, Byte>builder().
+ put(YangInstanceIdentifier.AugmentationIdentifier.class, AUGMENTATION_IDENTIFIER).
+ put(YangInstanceIdentifier.NodeIdentifier.class, NODE_IDENTIFIER).
+ put(YangInstanceIdentifier.NodeIdentifierWithPredicates.class, NODE_IDENTIFIER_WITH_PREDICATES).
+ put(YangInstanceIdentifier.NodeWithValue.class, NODE_IDENTIFIER_WITH_VALUE).build();
+
+ public static byte getSerializablePathArgumentType(YangInstanceIdentifier.PathArgument pathArgument){
+
+ Byte type = CLASS_TO_ENUM_MAP.get(pathArgument.getClass());
+ if(type == null) {
+ throw new IllegalArgumentException("Unknown type of PathArgument = " + pathArgument);
+ }
+
+ return type;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+public class ValueTypes {
+ public static final byte SHORT_TYPE = 1;
+ public static final byte BYTE_TYPE = 2;
+ public static final byte INT_TYPE = 3;
+ public static final byte LONG_TYPE = 4;
+ public static final byte BOOL_TYPE = 5;
+ public static final byte QNAME_TYPE = 6;
+ public static final byte BITS_TYPE = 7;
+ public static final byte YANG_IDENTIFIER_TYPE = 8;
+ public static final byte STRING_TYPE = 9;
+ public static final byte BIG_INTEGER_TYPE = 10;
+ public static final byte BIG_DECIMAL_TYPE = 11;
+
+ private static Map<Class<?>, Byte> types = new HashMap<>();
+
+ static {
+ types.put(String.class, Byte.valueOf(STRING_TYPE));
+ types.put(Byte.class, Byte.valueOf(BYTE_TYPE));
+ types.put(Integer.class, Byte.valueOf(INT_TYPE));
+ types.put(Long.class, Byte.valueOf(LONG_TYPE));
+ types.put(Boolean.class, Byte.valueOf(BOOL_TYPE));
+ types.put(QName.class, Byte.valueOf(QNAME_TYPE));
+ types.put(Set.class, Byte.valueOf(BITS_TYPE));
+ types.put(YangInstanceIdentifier.class, Byte.valueOf(YANG_IDENTIFIER_TYPE));
+ types.put(Short.class, Byte.valueOf(SHORT_TYPE));
+ types.put(BigInteger.class, Byte.valueOf(BIG_INTEGER_TYPE));
+ types.put(BigDecimal.class, Byte.valueOf(BIG_DECIMAL_TYPE));
+ }
+
+ public static final byte getSerializableType(Object node){
+ Preconditions.checkNotNull(node, "node should not be null");
+
+ Byte type = types.get(node.getClass());
+ if(type != null) {
+ return type;
+ } else if(node instanceof Set){
+ return BITS_TYPE;
+ }
+
+ throw new IllegalArgumentException("Unknown value type " + node.getClass().getSimpleName());
+ }
+}
public class CompositeModificationPayload extends Payload implements
Serializable {
+ private static final long serialVersionUID = 1L;
private final PersistentMessages.CompositeModification modification;
public interface CanCommitTransactionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required string transactionId = 1;
+ // optional string transactionId = 1;
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
boolean hasTransactionId();
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
java.lang.String getTransactionId();
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
com.google.protobuf.ByteString
getTransactionIdBytes();
}
private int bitField0_;
- // required string transactionId = 1;
+ // optional string transactionId = 1;
public static final int TRANSACTIONID_FIELD_NUMBER = 1;
private java.lang.Object transactionId_;
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public boolean hasTransactionId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public java.lang.String getTransactionId() {
java.lang.Object ref = transactionId_;
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public com.google.protobuf.ByteString
getTransactionIdBytes() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (!hasTransactionId()) {
- memoizedIsInitialized = 0;
- return false;
- }
memoizedIsInitialized = 1;
return true;
}
}
public final boolean isInitialized() {
- if (!hasTransactionId()) {
-
- return false;
- }
return true;
}
}
private int bitField0_;
- // required string transactionId = 1;
+ // optional string transactionId = 1;
private java.lang.Object transactionId_ = "";
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public boolean hasTransactionId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public java.lang.String getTransactionId() {
java.lang.Object ref = transactionId_;
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public com.google.protobuf.ByteString
getTransactionIdBytes() {
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder setTransactionId(
java.lang.String value) {
return this;
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder clearTransactionId() {
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder setTransactionIdBytes(
com.google.protobuf.ByteString value) {
public interface AbortTransactionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required string transactionId = 1;
+ // optional string transactionId = 1;
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
boolean hasTransactionId();
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
java.lang.String getTransactionId();
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
com.google.protobuf.ByteString
getTransactionIdBytes();
}
private int bitField0_;
- // required string transactionId = 1;
+ // optional string transactionId = 1;
public static final int TRANSACTIONID_FIELD_NUMBER = 1;
private java.lang.Object transactionId_;
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public boolean hasTransactionId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public java.lang.String getTransactionId() {
java.lang.Object ref = transactionId_;
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public com.google.protobuf.ByteString
getTransactionIdBytes() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (!hasTransactionId()) {
- memoizedIsInitialized = 0;
- return false;
- }
memoizedIsInitialized = 1;
return true;
}
}
public final boolean isInitialized() {
- if (!hasTransactionId()) {
-
- return false;
- }
return true;
}
}
private int bitField0_;
- // required string transactionId = 1;
+ // optional string transactionId = 1;
private java.lang.Object transactionId_ = "";
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public boolean hasTransactionId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public java.lang.String getTransactionId() {
java.lang.Object ref = transactionId_;
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public com.google.protobuf.ByteString
getTransactionIdBytes() {
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder setTransactionId(
java.lang.String value) {
return this;
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder clearTransactionId() {
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder setTransactionIdBytes(
com.google.protobuf.ByteString value) {
public interface CommitTransactionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
- // required string transactionId = 1;
+ // optional string transactionId = 1;
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
boolean hasTransactionId();
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
java.lang.String getTransactionId();
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
com.google.protobuf.ByteString
getTransactionIdBytes();
}
private int bitField0_;
- // required string transactionId = 1;
+ // optional string transactionId = 1;
public static final int TRANSACTIONID_FIELD_NUMBER = 1;
private java.lang.Object transactionId_;
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public boolean hasTransactionId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public java.lang.String getTransactionId() {
java.lang.Object ref = transactionId_;
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public com.google.protobuf.ByteString
getTransactionIdBytes() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
- if (!hasTransactionId()) {
- memoizedIsInitialized = 0;
- return false;
- }
memoizedIsInitialized = 1;
return true;
}
}
public final boolean isInitialized() {
- if (!hasTransactionId()) {
-
- return false;
- }
return true;
}
}
private int bitField0_;
- // required string transactionId = 1;
+ // optional string transactionId = 1;
private java.lang.Object transactionId_ = "";
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public boolean hasTransactionId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public java.lang.String getTransactionId() {
java.lang.Object ref = transactionId_;
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public com.google.protobuf.ByteString
getTransactionIdBytes() {
}
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder setTransactionId(
java.lang.String value) {
return this;
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder clearTransactionId() {
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
- * <code>required string transactionId = 1;</code>
+ * <code>optional string transactionId = 1;</code>
*/
public Builder setTransactionIdBytes(
com.google.protobuf.ByteString value) {
java.lang.String[] descriptorData = {
"\n\014Cohort.proto\022!org.opendaylight.control" +
"ler.mdsal\"-\n\024CanCommitTransaction\022\025\n\rtra" +
- "nsactionId\030\001 \002(\t\".\n\031CanCommitTransaction" +
+ "nsactionId\030\001 \001(\t\".\n\031CanCommitTransaction" +
"Reply\022\021\n\tcanCommit\030\001 \002(\010\")\n\020AbortTransac" +
- "tion\022\025\n\rtransactionId\030\001 \002(\t\"\027\n\025AbortTran" +
+ "tion\022\025\n\rtransactionId\030\001 \001(\t\"\027\n\025AbortTran" +
"sactionReply\"*\n\021CommitTransaction\022\025\n\rtra" +
- "nsactionId\030\001 \002(\t\"\030\n\026CommitTransactionRep" +
+ "nsactionId\030\001 \001(\t\"\030\n\026CommitTransactionRep" +
"ly\"\026\n\024PreCommitTransaction\"\033\n\031PreCommitT" +
"ransactionReplyBZ\n8org.opendaylight.cont" +
"roller.protobuff.messages.cohort3pcB\036Thr",
* <code>optional int32 type = 3;</code>
*/
int getType();
+
+ // optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ boolean hasInstanceIdentifierValue();
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierValue();
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierValueOrBuilder();
+
+ // repeated string bitsValue = 5;
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ java.util.List<java.lang.String>
+ getBitsValueList();
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ int getBitsValueCount();
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ java.lang.String getBitsValue(int index);
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ com.google.protobuf.ByteString
+ getBitsValueBytes(int index);
+
+ // optional bytes bytesValue = 6;
+ /**
+ * <code>optional bytes bytesValue = 6;</code>
+ */
+ boolean hasBytesValue();
+ /**
+ * <code>optional bytes bytesValue = 6;</code>
+ */
+ com.google.protobuf.ByteString getBytesValue();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.PathArgumentAttribute}
type_ = input.readInt32();
break;
}
+ case 34: {
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ subBuilder = instanceIdentifierValue_.toBuilder();
+ }
+ instanceIdentifierValue_ = input.readMessage(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(instanceIdentifierValue_);
+ instanceIdentifierValue_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000008;
+ break;
+ }
+ case 42: {
+ if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ bitsValue_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000010;
+ }
+ bitsValue_.add(input.readBytes());
+ break;
+ }
+ case 50: {
+ bitField0_ |= 0x00000010;
+ bytesValue_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
+ if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ bitsValue_ = new com.google.protobuf.UnmodifiableLazyStringList(bitsValue_);
+ }
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
return type_;
}
+ // optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;
+ public static final int INSTANCEIDENTIFIERVALUE_FIELD_NUMBER = 4;
+ private org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier instanceIdentifierValue_;
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public boolean hasInstanceIdentifierValue() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierValue() {
+ return instanceIdentifierValue_;
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierValueOrBuilder() {
+ return instanceIdentifierValue_;
+ }
+
+ // repeated string bitsValue = 5;
+ public static final int BITSVALUE_FIELD_NUMBER = 5;
+ private com.google.protobuf.LazyStringList bitsValue_;
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public java.util.List<java.lang.String>
+ getBitsValueList() {
+ return bitsValue_;
+ }
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public int getBitsValueCount() {
+ return bitsValue_.size();
+ }
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public java.lang.String getBitsValue(int index) {
+ return bitsValue_.get(index);
+ }
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getBitsValueBytes(int index) {
+ return bitsValue_.getByteString(index);
+ }
+
+ // optional bytes bytesValue = 6;
+ public static final int BYTESVALUE_FIELD_NUMBER = 6;
+ private com.google.protobuf.ByteString bytesValue_;
+ /**
+ * <code>optional bytes bytesValue = 6;</code>
+ */
+ public boolean hasBytesValue() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional bytes bytesValue = 6;</code>
+ */
+ public com.google.protobuf.ByteString getBytesValue() {
+ return bytesValue_;
+ }
+
private void initFields() {
name_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.getDefaultInstance();
value_ = "";
type_ = 0;
+ instanceIdentifierValue_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ bitsValue_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bytesValue_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ if (hasInstanceIdentifierValue()) {
+ if (!getInstanceIdentifierValue().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
memoizedIsInitialized = 1;
return true;
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt32(3, type_);
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeMessage(4, instanceIdentifierValue_);
+ }
+ for (int i = 0; i < bitsValue_.size(); i++) {
+ output.writeBytes(5, bitsValue_.getByteString(i));
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(6, bytesValue_);
+ }
getUnknownFields().writeTo(output);
}
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, type_);
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, instanceIdentifierValue_);
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < bitsValue_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(bitsValue_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getBitsValueList().size();
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(6, bytesValue_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getNameFieldBuilder();
+ getInstanceIdentifierValueFieldBuilder();
}
}
private static Builder create() {
bitField0_ = (bitField0_ & ~0x00000002);
type_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
+ if (instanceIdentifierValueBuilder_ == null) {
+ instanceIdentifierValue_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ } else {
+ instanceIdentifierValueBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000008);
+ bitsValue_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ bytesValue_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000020);
return this;
}
to_bitField0_ |= 0x00000004;
}
result.type_ = type_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ if (instanceIdentifierValueBuilder_ == null) {
+ result.instanceIdentifierValue_ = instanceIdentifierValue_;
+ } else {
+ result.instanceIdentifierValue_ = instanceIdentifierValueBuilder_.build();
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ bitsValue_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ bitsValue_);
+ bitField0_ = (bitField0_ & ~0x00000010);
+ }
+ result.bitsValue_ = bitsValue_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.bytesValue_ = bytesValue_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
if (other.hasType()) {
setType(other.getType());
}
+ if (other.hasInstanceIdentifierValue()) {
+ mergeInstanceIdentifierValue(other.getInstanceIdentifierValue());
+ }
+ if (!other.bitsValue_.isEmpty()) {
+ if (bitsValue_.isEmpty()) {
+ bitsValue_ = other.bitsValue_;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ } else {
+ ensureBitsValueIsMutable();
+ bitsValue_.addAll(other.bitsValue_);
+ }
+ onChanged();
+ }
+ if (other.hasBytesValue()) {
+ setBytesValue(other.getBytesValue());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
+ if (hasInstanceIdentifierValue()) {
+ if (!getInstanceIdentifierValue().isInitialized()) {
+
+ return false;
+ }
+ }
return true;
}
} else {
nameBuilder_.mergeFrom(value);
}
- bitField0_ |= 0x00000001;
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ */
+ public Builder clearName() {
+ if (nameBuilder_ == null) {
+ name_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.getDefaultInstance();
+ onChanged();
+ } else {
+ nameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.Builder getNameBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getNameFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QNameOrBuilder getNameOrBuilder() {
+ if (nameBuilder_ != null) {
+ return nameBuilder_.getMessageOrBuilder();
+ } else {
+ return name_;
+ }
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QNameOrBuilder>
+ getNameFieldBuilder() {
+ if (nameBuilder_ == null) {
+ nameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QNameOrBuilder>(
+ name_,
+ getParentForChildren(),
+ isClean());
+ name_ = null;
+ }
+ return nameBuilder_;
+ }
+
+ // optional string value = 2;
+ private java.lang.Object value_ = "";
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public java.lang.String getValue() {
+ java.lang.Object ref = value_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ value_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public Builder setValue(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string value = 2;</code>
+ */
+ public Builder setValueBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 type = 3;
+ private int type_ ;
+ /**
+ * <code>optional int32 type = 3;</code>
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 type = 3;</code>
+ */
+ public int getType() {
+ return type_;
+ }
+ /**
+ * <code>optional int32 type = 3;</code>
+ */
+ public Builder setType(int value) {
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 type = 3;</code>
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ type_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;
+ private org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier instanceIdentifierValue_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder> instanceIdentifierValueBuilder_;
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public boolean hasInstanceIdentifierValue() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierValue() {
+ if (instanceIdentifierValueBuilder_ == null) {
+ return instanceIdentifierValue_;
+ } else {
+ return instanceIdentifierValueBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public Builder setInstanceIdentifierValue(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier value) {
+ if (instanceIdentifierValueBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ instanceIdentifierValue_ = value;
+ onChanged();
+ } else {
+ instanceIdentifierValueBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public Builder setInstanceIdentifierValue(
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder builderForValue) {
+ if (instanceIdentifierValueBuilder_ == null) {
+ instanceIdentifierValue_ = builderForValue.build();
+ onChanged();
+ } else {
+ instanceIdentifierValueBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000008;
+ return this;
+ }
+ /**
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
+ */
+ public Builder mergeInstanceIdentifierValue(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier value) {
+ if (instanceIdentifierValueBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
+ instanceIdentifierValue_ != org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance()) {
+ instanceIdentifierValue_ =
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.newBuilder(instanceIdentifierValue_).mergeFrom(value).buildPartial();
+ } else {
+ instanceIdentifierValue_ = value;
+ }
+ onChanged();
+ } else {
+ instanceIdentifierValueBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000008;
return this;
}
/**
- * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
*/
- public Builder clearName() {
- if (nameBuilder_ == null) {
- name_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.getDefaultInstance();
+ public Builder clearInstanceIdentifierValue() {
+ if (instanceIdentifierValueBuilder_ == null) {
+ instanceIdentifierValue_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
onChanged();
} else {
- nameBuilder_.clear();
+ instanceIdentifierValueBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
- * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
*/
- public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.Builder getNameBuilder() {
- bitField0_ |= 0x00000001;
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder getInstanceIdentifierValueBuilder() {
+ bitField0_ |= 0x00000008;
onChanged();
- return getNameFieldBuilder().getBuilder();
+ return getInstanceIdentifierValueFieldBuilder().getBuilder();
}
/**
- * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
*/
- public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QNameOrBuilder getNameOrBuilder() {
- if (nameBuilder_ != null) {
- return nameBuilder_.getMessageOrBuilder();
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierValueOrBuilder() {
+ if (instanceIdentifierValueBuilder_ != null) {
+ return instanceIdentifierValueBuilder_.getMessageOrBuilder();
} else {
- return name_;
+ return instanceIdentifierValue_;
}
}
/**
- * <code>optional .org.opendaylight.controller.mdsal.QName name = 1;</code>
+ * <code>optional .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierValue = 4;</code>
+ *
+ * <pre>
+ * Specific values
+ * </pre>
*/
private com.google.protobuf.SingleFieldBuilder<
- org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QNameOrBuilder>
- getNameFieldBuilder() {
- if (nameBuilder_ == null) {
- nameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QName.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.QNameOrBuilder>(
- name_,
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder>
+ getInstanceIdentifierValueFieldBuilder() {
+ if (instanceIdentifierValueBuilder_ == null) {
+ instanceIdentifierValueBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder>(
+ instanceIdentifierValue_,
getParentForChildren(),
isClean());
- name_ = null;
+ instanceIdentifierValue_ = null;
}
- return nameBuilder_;
+ return instanceIdentifierValueBuilder_;
}
- // optional string value = 2;
- private java.lang.Object value_ = "";
+ // repeated string bitsValue = 5;
+ private com.google.protobuf.LazyStringList bitsValue_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureBitsValueIsMutable() {
+ if (!((bitField0_ & 0x00000010) == 0x00000010)) {
+ bitsValue_ = new com.google.protobuf.LazyStringArrayList(bitsValue_);
+ bitField0_ |= 0x00000010;
+ }
+ }
/**
- * <code>optional string value = 2;</code>
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
*/
- public boolean hasValue() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
+ public java.util.List<java.lang.String>
+ getBitsValueList() {
+ return java.util.Collections.unmodifiableList(bitsValue_);
}
/**
- * <code>optional string value = 2;</code>
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
*/
- public java.lang.String getValue() {
- java.lang.Object ref = value_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- value_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
+ public int getBitsValueCount() {
+ return bitsValue_.size();
}
/**
- * <code>optional string value = 2;</code>
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public java.lang.String getBitsValue(int index) {
+ return bitsValue_.get(index);
+ }
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
*/
public com.google.protobuf.ByteString
- getValueBytes() {
- java.lang.Object ref = value_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- value_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
+ getBitsValueBytes(int index) {
+ return bitsValue_.getByteString(index);
}
/**
- * <code>optional string value = 2;</code>
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
*/
- public Builder setValue(
+ public Builder setBitsValue(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBitsValueIsMutable();
+ bitsValue_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public Builder addBitsValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000002;
- value_ = value;
+ ensureBitsValueIsMutable();
+ bitsValue_.add(value);
onChanged();
return this;
}
/**
- * <code>optional string value = 2;</code>
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
*/
- public Builder clearValue() {
- bitField0_ = (bitField0_ & ~0x00000002);
- value_ = getDefaultInstance().getValue();
+ public Builder addAllBitsValue(
+ java.lang.Iterable<java.lang.String> values) {
+ ensureBitsValueIsMutable();
+ super.addAll(values, bitsValue_);
onChanged();
return this;
}
/**
- * <code>optional string value = 2;</code>
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
*/
- public Builder setValueBytes(
+ public Builder clearBitsValue() {
+ bitsValue_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string bitsValue = 5;</code>
+ *
+ * <pre>
+ * intValueType = Bits
+ * </pre>
+ */
+ public Builder addBitsValueBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
- bitField0_ |= 0x00000002;
- value_ = value;
+ ensureBitsValueIsMutable();
+ bitsValue_.add(value);
onChanged();
return this;
}
- // optional int32 type = 3;
- private int type_ ;
+ // optional bytes bytesValue = 6;
+ private com.google.protobuf.ByteString bytesValue_ = com.google.protobuf.ByteString.EMPTY;
/**
- * <code>optional int32 type = 3;</code>
+ * <code>optional bytes bytesValue = 6;</code>
*/
- public boolean hasType() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
+ public boolean hasBytesValue() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
- * <code>optional int32 type = 3;</code>
+ * <code>optional bytes bytesValue = 6;</code>
*/
- public int getType() {
- return type_;
+ public com.google.protobuf.ByteString getBytesValue() {
+ return bytesValue_;
}
/**
- * <code>optional int32 type = 3;</code>
+ * <code>optional bytes bytesValue = 6;</code>
*/
- public Builder setType(int value) {
- bitField0_ |= 0x00000004;
- type_ = value;
+ public Builder setBytesValue(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ bytesValue_ = value;
onChanged();
return this;
}
/**
- * <code>optional int32 type = 3;</code>
+ * <code>optional bytes bytesValue = 6;</code>
*/
- public Builder clearType() {
- bitField0_ = (bitField0_ & ~0x00000004);
- type_ = 0;
+ public Builder clearBytesValue() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ bytesValue_ = getDefaultInstance().getBytesValue();
onChanged();
return this;
}
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ for (int i = 0; i < getAttributeCount(); i++) {
+ if (!getAttribute(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
for (int i = 0; i < getAttributesCount(); i++) {
if (!getAttributes(i).isInitialized()) {
memoizedIsInitialized = 0;
}
public final boolean isInitialized() {
+ for (int i = 0; i < getAttributeCount(); i++) {
+ if (!getAttribute(i).isInitialized()) {
+
+ return false;
+ }
+ }
for (int i = 0; i < getAttributesCount(); i++) {
if (!getAttributes(i).isInitialized()) {
*/
com.google.protobuf.ByteString
getCodeBytes(int index);
+
+ // optional bytes bytesValue = 13;
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ boolean hasBytesValue();
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ com.google.protobuf.ByteString getBytesValue();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.Node}
code_.add(input.readBytes());
break;
}
+ case 106: {
+ bitField0_ |= 0x00000100;
+ bytesValue_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return code_.getByteString(index);
}
+ // optional bytes bytesValue = 13;
+ public static final int BYTESVALUE_FIELD_NUMBER = 13;
+ private com.google.protobuf.ByteString bytesValue_;
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ public boolean hasBytesValue() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ public com.google.protobuf.ByteString getBytesValue() {
+ return bytesValue_;
+ }
+
private void initFields() {
path_ = "";
type_ = "";
instanceIdentifierValue_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
bitsValue_ = com.google.protobuf.LazyStringArrayList.EMPTY;
code_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bytesValue_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
for (int i = 0; i < code_.size(); i++) {
output.writeBytes(12, code_.getByteString(i));
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeBytes(13, bytesValue_);
+ }
getUnknownFields().writeTo(output);
}
size += dataSize;
size += 1 * getCodeList().size();
}
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(13, bytesValue_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
bitField0_ = (bitField0_ & ~0x00000400);
code_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000800);
+ bytesValue_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00001000);
return this;
}
bitField0_ = (bitField0_ & ~0x00000800);
}
result.code_ = code_;
+ if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ result.bytesValue_ = bytesValue_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
onChanged();
}
+ if (other.hasBytesValue()) {
+ setBytesValue(other.getBytesValue());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
return this;
}
+ // optional bytes bytesValue = 13;
+ private com.google.protobuf.ByteString bytesValue_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ public boolean hasBytesValue() {
+ return ((bitField0_ & 0x00001000) == 0x00001000);
+ }
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ public com.google.protobuf.ByteString getBytesValue() {
+ return bytesValue_;
+ }
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ public Builder setBytesValue(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00001000;
+ bytesValue_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes bytesValue = 13;</code>
+ */
+ public Builder clearBytesValue() {
+ bitField0_ = (bitField0_ & ~0x00001000);
+ bytesValue_ = getDefaultInstance().getBytesValue();
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.Node)
}
java.lang.String[] descriptorData = {
"\n\014Common.proto\022!org.opendaylight.control" +
"ler.mdsal\"6\n\tAttribute\022\014\n\004name\030\001 \002(\t\022\r\n\005" +
- "value\030\002 \001(\t\022\014\n\004type\030\003 \001(\t\"l\n\025PathArgumen" +
- "tAttribute\0226\n\004name\030\001 \001(\0132(.org.opendayli" +
- "ght.controller.mdsal.QName\022\r\n\005value\030\002 \001(" +
- "\t\022\014\n\004type\030\003 \001(\005\"N\n\005QName\022\r\n\005value\030\001 \001(\t\022" +
- "\021\n\tnamespace\030\002 \001(\005\022\020\n\010revision\030\003 \001(\005\022\021\n\t" +
- "localName\030\004 \001(\005\"\207\002\n\014PathArgument\022\r\n\005valu" +
- "e\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022:\n\010nodeType\030\003 \001(\0132" +
- "(.org.opendaylight.controller.mdsal.QNam",
- "e\022K\n\tattribute\030\004 \003(\01328.org.opendaylight." +
- "controller.mdsal.PathArgumentAttribute\022@" +
+ "value\030\002 \001(\t\022\014\n\004type\030\003 \001(\t\"\353\001\n\025PathArgume" +
+ "ntAttribute\0226\n\004name\030\001 \001(\0132(.org.opendayl" +
+ "ight.controller.mdsal.QName\022\r\n\005value\030\002 \001" +
+ "(\t\022\014\n\004type\030\003 \001(\005\022V\n\027instanceIdentifierVa" +
+ "lue\030\004 \001(\01325.org.opendaylight.controller." +
+ "mdsal.InstanceIdentifier\022\021\n\tbitsValue\030\005 " +
+ "\003(\t\022\022\n\nbytesValue\030\006 \001(\014\"N\n\005QName\022\r\n\005valu" +
+ "e\030\001 \001(\t\022\021\n\tnamespace\030\002 \001(\005\022\020\n\010revision\030\003",
+ " \001(\005\022\021\n\tlocalName\030\004 \001(\005\"\207\002\n\014PathArgument" +
+ "\022\r\n\005value\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022:\n\010nodeTyp" +
+ "e\030\003 \001(\0132(.org.opendaylight.controller.md" +
+ "sal.QName\022K\n\tattribute\030\004 \003(\01328.org.opend" +
+ "aylight.controller.mdsal.PathArgumentAtt" +
+ "ribute\022@\n\nattributes\030\005 \003(\0132,.org.openday" +
+ "light.controller.mdsal.Attribute\022\017\n\007intT" +
+ "ype\030\006 \001(\005\"f\n\022InstanceIdentifier\022B\n\targum" +
+ "ents\030\001 \003(\0132/.org.opendaylight.controller" +
+ ".mdsal.PathArgument\022\014\n\004code\030\002 \003(\t\"\271\003\n\004No",
+ "de\022\014\n\004path\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022E\n\014pathAr" +
+ "gument\030\003 \001(\0132/.org.opendaylight.controll" +
+ "er.mdsal.PathArgument\022\017\n\007intType\030\004 \001(\005\022@" +
"\n\nattributes\030\005 \003(\0132,.org.opendaylight.co" +
- "ntroller.mdsal.Attribute\022\017\n\007intType\030\006 \001(" +
- "\005\"f\n\022InstanceIdentifier\022B\n\targuments\030\001 \003" +
- "(\0132/.org.opendaylight.controller.mdsal.P" +
- "athArgument\022\014\n\004code\030\002 \003(\t\"\245\003\n\004Node\022\014\n\004pa" +
- "th\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022E\n\014pathArgument\030\003" +
- " \001(\0132/.org.opendaylight.controller.mdsal" +
- ".PathArgument\022\017\n\007intType\030\004 \001(\005\022@\n\nattrib",
- "utes\030\005 \003(\0132,.org.opendaylight.controller" +
- ".mdsal.Attribute\0226\n\005child\030\006 \003(\0132\'.org.op" +
- "endaylight.controller.mdsal.Node\022\r\n\005valu" +
- "e\030\007 \001(\t\022\021\n\tvalueType\030\010 \001(\t\022\024\n\014intValueTy" +
- "pe\030\t \001(\005\022V\n\027instanceIdentifierValue\030\n \001(" +
- "\01325.org.opendaylight.controller.mdsal.In" +
- "stanceIdentifier\022\021\n\tbitsValue\030\013 \003(\t\022\014\n\004c" +
- "ode\030\014 \003(\t\"`\n\tContainer\022\022\n\nparentPath\030\001 \002" +
- "(\t\022?\n\016normalizedNode\030\002 \001(\0132\'.org.openday" +
- "light.controller.mdsal.Node\"\246\001\n\014NodeMapE",
- "ntry\022U\n\026instanceIdentifierPath\030\001 \002(\01325.o" +
- "rg.opendaylight.controller.mdsal.Instanc" +
- "eIdentifier\022?\n\016normalizedNode\030\002 \001(\0132\'.or" +
- "g.opendaylight.controller.mdsal.Node\"N\n\007" +
- "NodeMap\022C\n\nmapEntries\030\001 \003(\0132/.org.openda" +
- "ylight.controller.mdsal.NodeMapEntryBO\n5" +
- "org.opendaylight.controller.protobuff.me" +
- "ssages.commonB\026NormalizedNodeMessages"
+ "ntroller.mdsal.Attribute\0226\n\005child\030\006 \003(\0132" +
+ "\'.org.opendaylight.controller.mdsal.Node" +
+ "\022\r\n\005value\030\007 \001(\t\022\021\n\tvalueType\030\010 \001(\t\022\024\n\014in" +
+ "tValueType\030\t \001(\005\022V\n\027instanceIdentifierVa" +
+ "lue\030\n \001(\01325.org.opendaylight.controller." +
+ "mdsal.InstanceIdentifier\022\021\n\tbitsValue\030\013 ",
+ "\003(\t\022\014\n\004code\030\014 \003(\t\022\022\n\nbytesValue\030\r \001(\014\"`\n" +
+ "\tContainer\022\022\n\nparentPath\030\001 \002(\t\022?\n\016normal" +
+ "izedNode\030\002 \001(\0132\'.org.opendaylight.contro" +
+ "ller.mdsal.Node\"\246\001\n\014NodeMapEntry\022U\n\026inst" +
+ "anceIdentifierPath\030\001 \002(\01325.org.opendayli" +
+ "ght.controller.mdsal.InstanceIdentifier\022" +
+ "?\n\016normalizedNode\030\002 \001(\0132\'.org.opendaylig" +
+ "ht.controller.mdsal.Node\"N\n\007NodeMap\022C\n\nm" +
+ "apEntries\030\001 \003(\0132/.org.opendaylight.contr" +
+ "oller.mdsal.NodeMapEntryBO\n5org.opendayl",
+ "ight.controller.protobuff.messages.commo" +
+ "nB\026NormalizedNodeMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_PathArgumentAttribute_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_PathArgumentAttribute_descriptor,
- new java.lang.String[] { "Name", "Value", "Type", });
+ new java.lang.String[] { "Name", "Value", "Type", "InstanceIdentifierValue", "BitsValue", "BytesValue", });
internal_static_org_opendaylight_controller_mdsal_QName_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_org_opendaylight_controller_mdsal_QName_fieldAccessorTable = new
internal_static_org_opendaylight_controller_mdsal_Node_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_Node_descriptor,
- new java.lang.String[] { "Path", "Type", "PathArgument", "IntType", "Attributes", "Child", "Value", "ValueType", "IntValueType", "InstanceIdentifierValue", "BitsValue", "Code", });
+ new java.lang.String[] { "Path", "Type", "PathArgument", "IntType", "Attributes", "Child", "Value", "ValueType", "IntValueType", "InstanceIdentifierValue", "BitsValue", "Code", "BytesValue", });
internal_static_org_opendaylight_controller_mdsal_Container_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_org_opendaylight_controller_mdsal_Container_fieldAccessorTable = new
*/
com.google.protobuf.ByteString
getTransactionChainIdBytes();
+
+ // optional int32 messageVersion = 4;
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ boolean hasMessageVersion();
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ int getMessageVersion();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransaction}
transactionChainId_ = input.readBytes();
break;
}
+ case 32: {
+ bitField0_ |= 0x00000008;
+ messageVersion_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
}
}
+ // optional int32 messageVersion = 4;
+ public static final int MESSAGEVERSION_FIELD_NUMBER = 4;
+ private int messageVersion_;
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ public boolean hasMessageVersion() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ public int getMessageVersion() {
+ return messageVersion_;
+ }
+
private void initFields() {
transactionId_ = "";
transactionType_ = 0;
transactionChainId_ = "";
+ messageVersion_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getTransactionChainIdBytes());
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt32(4, messageVersion_);
+ }
getUnknownFields().writeTo(output);
}
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getTransactionChainIdBytes());
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, messageVersion_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
bitField0_ = (bitField0_ & ~0x00000002);
transactionChainId_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
+ messageVersion_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
to_bitField0_ |= 0x00000004;
}
result.transactionChainId_ = transactionChainId_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.messageVersion_ = messageVersion_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
transactionChainId_ = other.transactionChainId_;
onChanged();
}
+ if (other.hasMessageVersion()) {
+ setMessageVersion(other.getMessageVersion());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
return this;
}
+ // optional int32 messageVersion = 4;
+ private int messageVersion_ ;
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ public boolean hasMessageVersion() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ public int getMessageVersion() {
+ return messageVersion_;
+ }
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ public Builder setMessageVersion(int value) {
+ bitField0_ |= 0x00000008;
+ messageVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 messageVersion = 4;</code>
+ */
+ public Builder clearMessageVersion() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ messageVersion_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CreateTransaction)
}
*/
com.google.protobuf.ByteString
getTransactionIdBytes();
+
+ // optional int32 messageVersion = 3;
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ boolean hasMessageVersion();
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ int getMessageVersion();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransactionReply}
transactionId_ = input.readBytes();
break;
}
+ case 24: {
+ bitField0_ |= 0x00000004;
+ messageVersion_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
}
}
+ // optional int32 messageVersion = 3;
+ public static final int MESSAGEVERSION_FIELD_NUMBER = 3;
+ private int messageVersion_;
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ public boolean hasMessageVersion() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ public int getMessageVersion() {
+ return messageVersion_;
+ }
+
private void initFields() {
transactionActorPath_ = "";
transactionId_ = "";
+ messageVersion_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getTransactionIdBytes());
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt32(3, messageVersion_);
+ }
getUnknownFields().writeTo(output);
}
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getTransactionIdBytes());
}
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, messageVersion_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
bitField0_ = (bitField0_ & ~0x00000001);
transactionId_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
+ messageVersion_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
to_bitField0_ |= 0x00000002;
}
result.transactionId_ = transactionId_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.messageVersion_ = messageVersion_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
transactionId_ = other.transactionId_;
onChanged();
}
+ if (other.hasMessageVersion()) {
+ setMessageVersion(other.getMessageVersion());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
return this;
}
+ // optional int32 messageVersion = 3;
+ private int messageVersion_ ;
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ public boolean hasMessageVersion() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ public int getMessageVersion() {
+ return messageVersion_;
+ }
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ public Builder setMessageVersion(int value) {
+ bitField0_ |= 0x00000004;
+ messageVersion_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 messageVersion = 3;</code>
+ */
+ public Builder clearMessageVersion() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ messageVersion_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CreateTransactionReply)
}
java.lang.String[] descriptorData = {
"\n\026ShardTransaction.proto\022!org.opendaylig" +
"ht.controller.mdsal\032\014Common.proto\"\022\n\020Clo" +
- "seTransaction\"\027\n\025CloseTransactionReply\"_" +
+ "seTransaction\"\027\n\025CloseTransactionReply\"w" +
"\n\021CreateTransaction\022\025\n\rtransactionId\030\001 \002" +
"(\t\022\027\n\017transactionType\030\002 \002(\005\022\032\n\022transacti" +
- "onChainId\030\003 \001(\t\"M\n\026CreateTransactionRepl" +
- "y\022\034\n\024transactionActorPath\030\001 \002(\t\022\025\n\rtrans" +
- "actionId\030\002 \002(\t\"\022\n\020ReadyTransaction\"*\n\025Re" +
- "adyTransactionReply\022\021\n\tactorPath\030\001 \002(\t\"l" +
- "\n\nDeleteData\022^\n\037instanceIdentifierPathAr",
+ "onChainId\030\003 \001(\t\022\026\n\016messageVersion\030\004 \001(\005\"" +
+ "e\n\026CreateTransactionReply\022\034\n\024transaction" +
+ "ActorPath\030\001 \002(\t\022\025\n\rtransactionId\030\002 \002(\t\022\026" +
+ "\n\016messageVersion\030\003 \001(\005\"\022\n\020ReadyTransacti" +
+ "on\"*\n\025ReadyTransactionReply\022\021\n\tactorPath",
+ "\030\001 \002(\t\"l\n\nDeleteData\022^\n\037instanceIdentifi" +
+ "erPathArguments\030\001 \002(\01325.org.opendaylight" +
+ ".controller.mdsal.InstanceIdentifier\"\021\n\017" +
+ "DeleteDataReply\"j\n\010ReadData\022^\n\037instanceI" +
+ "dentifierPathArguments\030\001 \002(\01325.org.opend" +
+ "aylight.controller.mdsal.InstanceIdentif" +
+ "ier\"P\n\rReadDataReply\022?\n\016normalizedNode\030\001" +
+ " \001(\0132\'.org.opendaylight.controller.mdsal" +
+ ".Node\"\254\001\n\tWriteData\022^\n\037instanceIdentifie" +
+ "rPathArguments\030\001 \002(\01325.org.opendaylight.",
+ "controller.mdsal.InstanceIdentifier\022?\n\016n" +
+ "ormalizedNode\030\002 \002(\0132\'.org.opendaylight.c" +
+ "ontroller.mdsal.Node\"\020\n\016WriteDataReply\"\254" +
+ "\001\n\tMergeData\022^\n\037instanceIdentifierPathAr" +
"guments\030\001 \002(\01325.org.opendaylight.control" +
- "ler.mdsal.InstanceIdentifier\"\021\n\017DeleteDa" +
- "taReply\"j\n\010ReadData\022^\n\037instanceIdentifie" +
- "rPathArguments\030\001 \002(\01325.org.opendaylight." +
- "controller.mdsal.InstanceIdentifier\"P\n\rR" +
- "eadDataReply\022?\n\016normalizedNode\030\001 \001(\0132\'.o" +
- "rg.opendaylight.controller.mdsal.Node\"\254\001" +
- "\n\tWriteData\022^\n\037instanceIdentifierPathArg" +
- "uments\030\001 \002(\01325.org.opendaylight.controll" +
- "er.mdsal.InstanceIdentifier\022?\n\016normalize",
- "dNode\030\002 \002(\0132\'.org.opendaylight.controlle" +
- "r.mdsal.Node\"\020\n\016WriteDataReply\"\254\001\n\tMerge" +
- "Data\022^\n\037instanceIdentifierPathArguments\030" +
- "\001 \002(\01325.org.opendaylight.controller.mdsa" +
- "l.InstanceIdentifier\022?\n\016normalizedNode\030\002" +
- " \002(\0132\'.org.opendaylight.controller.mdsal" +
- ".Node\"\020\n\016MergeDataReply\"l\n\nDataExists\022^\n" +
- "\037instanceIdentifierPathArguments\030\001 \002(\01325" +
- ".org.opendaylight.controller.mdsal.Insta" +
- "nceIdentifier\"!\n\017DataExistsReply\022\016\n\006exis",
- "ts\030\001 \002(\010BV\n:org.opendaylight.controller." +
- "protobuff.messages.transactionB\030ShardTra" +
- "nsactionMessages"
+ "ler.mdsal.InstanceIdentifier\022?\n\016normaliz" +
+ "edNode\030\002 \002(\0132\'.org.opendaylight.controll" +
+ "er.mdsal.Node\"\020\n\016MergeDataReply\"l\n\nDataE" +
+ "xists\022^\n\037instanceIdentifierPathArguments" +
+ "\030\001 \002(\01325.org.opendaylight.controller.mds",
+ "al.InstanceIdentifier\"!\n\017DataExistsReply" +
+ "\022\016\n\006exists\030\001 \002(\010BV\n:org.opendaylight.con" +
+ "troller.protobuff.messages.transactionB\030" +
+ "ShardTransactionMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_CreateTransaction_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CreateTransaction_descriptor,
- new java.lang.String[] { "TransactionId", "TransactionType", "TransactionChainId", });
+ new java.lang.String[] { "TransactionId", "TransactionType", "TransactionChainId", "MessageVersion", });
internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_descriptor,
- new java.lang.String[] { "TransactionActorPath", "TransactionId", });
+ new java.lang.String[] { "TransactionActorPath", "TransactionId", "MessageVersion", });
internal_static_org_opendaylight_controller_mdsal_ReadyTransaction_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_org_opendaylight_controller_mdsal_ReadyTransaction_fieldAccessorTable = new
message CanCommitTransaction{
- required string transactionId = 1;
+ optional string transactionId = 1;
}
message CanCommitTransactionReply{
}
message AbortTransaction{
- required string transactionId = 1;
+ optional string transactionId = 1;
}
message AbortTransactionReply {
}
message CommitTransaction{
- required string transactionId = 1;
+ optional string transactionId = 1;
}
message CommitTransactionReply{
optional QName name =1;
optional string value=2;
optional int32 type=3;
+ // Specific values
+ optional InstanceIdentifier instanceIdentifierValue = 4; // intValueType = YangInstanceIdentifier
+ repeated string bitsValue = 5; // intValueType = Bits
+ optional bytes bytesValue = 6;
+
}
repeated string bitsValue = 11; // intValueType = Bits
repeated string code = 12; // A list of string codes which can be used for any repeated strings in the NormalizedNode
+
+ optional bytes bytesValue = 13;
}
message Container{
required string transactionId = 1;
required int32 transactionType =2;
optional string transactionChainId = 3;
+ optional int32 messageVersion = 4;
}
message CreateTransactionReply{
-required string transactionActorPath = 1;
-required string transactionId = 2;
-
+ required string transactionActorPath = 1;
+ required string transactionId = 2;
+ optional int32 messageVersion = 3;
}
message ReadyTransaction{
}
message ReadyTransactionReply{
-required string actorPath = 1;
+ required string actorPath = 1;
}
message DeleteData {
public static Props props(final ReentrantLock lock){
return Props.create(new Creator<PingPongActor>(){
+ private static final long serialVersionUID = 1L;
@Override
public PingPongActor create() throws Exception {
return new PingPongActor(lock);
import java.util.List;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class NormalizedNodeToNodeCodecTest {
PathUtils.toString(YangInstanceIdentifier.builder().build()), documentOne);
// Validate the value of id can be retrieved from the normalized node
- NormalizedNode output = normalizedNodeGetter.getOutput();
+ NormalizedNode<?, ?> output = normalizedNodeGetter.getOutput();
assertNotNull(output);
package org.opendaylight.controller.cluster.datastore.node.utils;
-import junit.framework.Assert;
+import static org.junit.Assert.assertTrue;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
NodeIdentifierFactory
.getArgument("AugmentationIdentifier{childNames=[(urn:opendaylight:flow:table:statistics?revision=2013-12-15)flow-table-statistics]}");
- Assert
- .assertTrue(argument instanceof YangInstanceIdentifier.AugmentationIdentifier);
+ assertTrue(argument instanceof YangInstanceIdentifier.AugmentationIdentifier);
}
package org.opendaylight.controller.cluster.datastore.node.utils;
+import com.google.common.collect.ImmutableSet;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Map;
import java.util.Set;
-
import static junit.framework.TestCase.assertEquals;
public class PathUtilsTest {
}
private YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier(){
- Set<QName> childNames = new HashSet();
- childNames.add(QNameFactory.create("(urn:opendaylight:flow:table:statistics?revision=2013-12-15)flow-table-statistics"));
+ Set<QName> childNames = ImmutableSet.of(QNameFactory.create("(urn:opendaylight:flow:table:statistics?revision=2013-12-15)flow-table-statistics"));
return new YangInstanceIdentifier.AugmentationIdentifier(childNames);
}
import org.opendaylight.controller.cluster.datastore.util.TestModel;
import org.opendaylight.yangtools.yang.common.QName;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.TestCase.assertEquals;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
public class QNameFactoryTest {
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+import com.google.common.base.Optional;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
public class NormalizedNodeSerializerTest {
start = System.nanoTime();
- NormalizedNode actualNode =
+ NormalizedNode<?, ?> actualNode =
NormalizedNodeSerializer.deSerialize(expected);
System.out.println("DeSerialize Time = " + (System.nanoTime() - start)/1000000);
// created by serializing the original node and deSerializing it back.
assertEquals(expectedNode, actualNode);
+ byte[] binaryData = new byte[5];
+ for(byte i=0;i<5;i++){
+ binaryData[i] = i;
+ }
+
+ ContainerNode node1 = TestModel.createBaseTestContainerBuilder()
+ .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATE_QNAME, binaryData))
+ .build();
+
+ NormalizedNodeMessages.Node serializedNode1 = NormalizedNodeSerializer
+ .serialize(node1);
+
+ ContainerNode node2 =
+ (ContainerNode) NormalizedNodeSerializer.deSerialize(serializedNode1);
+
+
+ // FIXME: This will not work due to BUG 2326. Once that is fixed we can uncomment this assertion
+ // assertEquals(node1, node2);
+
+ Optional<DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>> child = node2.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.SOME_BINARY_DATE_QNAME));
+
+ Object value = child.get().getValue();
+
+ assertTrue("value should be of type byte[]", value instanceof byte[]);
+
+ byte[] bytesValue = (byte[]) value;
+
+ for(byte i=0;i<5;i++){
+ assertEquals(i, bytesValue[i]);
+ }
+
}
@Test(expected = NullPointerException.class)
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
+import com.google.protobuf.ByteString;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Arrays;
+import java.util.Set;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.Set;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
public class ValueSerializerTest{
ImmutableSet.of("foo", "bar"));
assertEquals(ValueType.BITS_TYPE.ordinal(), builder1.getType());
- assertEquals("[foo, bar]", builder1.getValue());
+ assertTrue( "foo not in bits", builder1.getBitsValueList().contains("foo"));
+ assertTrue( "bar not in bits", builder1.getBitsValueList().contains("bar"));
}
assertEquals(1, serializedYangInstanceIdentifier.getArgumentsCount());
Mockito.verify(mockContext).addLocalName(TestModel.TEST_QNAME.getLocalName());
Mockito.verify(mockContext).addNamespace(TestModel.TEST_QNAME.getNamespace());
+
+ NormalizedNodeMessages.PathArgumentAttribute.Builder argumentBuilder
+ = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+
+ mockContext = mock(QNameSerializationContext.class);
+
+ ValueSerializer.serialize(argumentBuilder, mockContext, v1);
+
+ serializedYangInstanceIdentifier =
+ argumentBuilder.getInstanceIdentifierValue();
+
+ assertEquals(1, serializedYangInstanceIdentifier.getArgumentsCount());
+ Mockito.verify(mockContext).addLocalName(TestModel.TEST_QNAME.getLocalName());
+ Mockito.verify(mockContext).addNamespace(TestModel.TEST_QNAME.getNamespace());
+
}
@Test
}
+ @Test
+ public void testSerializeBinary(){
+ NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
+ byte[] bytes = new byte[] {1,2,3,4};
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class),bytes);
+
+ assertEquals(ValueType.BINARY_TYPE.ordinal(), builder.getIntValueType());
+ assertEquals(ByteString.copyFrom(bytes), builder.getBytesValue());
+
+ NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class),bytes);
+
+ assertEquals(ValueType.BINARY_TYPE.ordinal(), builder1.getType());
+ assertEquals(ByteString.copyFrom(bytes), builder1.getBytesValue());
+
+ }
+
+
@Test
public void testDeSerializeShort(){
NormalizedNodeMessages.Node.Builder nodeBuilder = NormalizedNodeMessages.Node.newBuilder();
nodeBuilder.build());
assertTrue(o instanceof Set);
- assertTrue(((Set)o).contains("foo"));
- assertTrue(((Set) o).contains("bar"));
+ assertTrue(((Set<?>)o).contains("foo"));
+ assertTrue(((Set<?>) o).contains("bar"));
+
+ NormalizedNodeMessages.PathArgumentAttribute.Builder argumentBuilder
+ = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+
+ argumentBuilder.setType(ValueType.BITS_TYPE.ordinal());
+ argumentBuilder.addAllBitsValue(ImmutableList.of("foo", "bar"));
+
+ o = ValueSerializer
+ .deSerialize(mock(QNameDeSerializationContext.class),
+ argumentBuilder.build());
+
+ assertTrue(o instanceof Set);
+ assertTrue(((Set<?>)o).contains("foo"));
+ assertTrue(((Set<?>) o).contains("bar"));
}
assertTrue(o instanceof YangInstanceIdentifier);
assertEquals(TestModel.TEST_PATH, o);
+ NormalizedNodeMessages.PathArgumentAttribute.Builder argumentBuilder =
+ NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+
+ argumentBuilder.setType(ValueType.YANG_IDENTIFIER_TYPE.ordinal());
+ argumentBuilder.setInstanceIdentifierValue(idBuilder);
+
+ o = ValueSerializer.deSerialize(mockContext, argumentBuilder.build());
+
+ assertTrue(o instanceof YangInstanceIdentifier);
+ assertEquals(TestModel.TEST_PATH, o);
}
@Test
}
+
+ @Test
+ public void testDeSerializeBinaryType(){
+ NormalizedNodeMessages.Node.Builder nodeBuilder = NormalizedNodeMessages.Node.newBuilder();
+ nodeBuilder.setIntValueType(ValueType.BINARY_TYPE.ordinal());
+ byte[] bytes = new byte[] {1,2,3,4};
+ nodeBuilder.setBytesValue(ByteString.copyFrom(bytes));
+
+ Object o = ValueSerializer.deSerialize(mock(QNameDeSerializationContext.class),nodeBuilder.build());
+
+ assertTrue("not a byte array", o instanceof byte[]);
+ assertTrue("bytes value does not match" , Arrays.equals(bytes, (byte[]) o));
+
+ NormalizedNodeMessages.PathArgumentAttribute.Builder argumentBuilder =
+ NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+ argumentBuilder.setType(ValueType.BINARY_TYPE.ordinal());
+ argumentBuilder.setBytesValue(ByteString.copyFrom(bytes));
+
+ o = ValueSerializer.deSerialize(mock(QNameDeSerializationContext.class), argumentBuilder.build());
+
+ assertTrue("not a byte array", o instanceof byte[]);
+ assertTrue("bytes value does not match" ,Arrays.equals(bytes, (byte[]) o));
+
+
+ }
+
+
}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class ValueTypeTest {
+
+ @Test
+ public void testGetSerializableType(){
+ byte[] b = new byte[10];
+ b[0] = 1;
+ b[2] = 2;
+
+ ValueType serializableType = ValueType.getSerializableType(b);
+ assertEquals(ValueType.BINARY_TYPE, serializableType);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.util.TestModel;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+public class NormalizedNodeStreamReaderWriterTest {
+
+ final NormalizedNode<?, ?> input = TestModel.createTestContainer();
+
+ @Test
+ public void testNormalizedNodeStreamReaderWriter() throws IOException {
+
+ byte[] byteData = null;
+
+ try(ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream)) {
+
+ NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
+ normalizedNodeWriter.write(input);
+ byteData = byteArrayOutputStream.toByteArray();
+
+ }
+
+ try(NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteData))) {
+
+ NormalizedNode<?,?> node = reader.readNormalizedNode();
+ Assert.assertEquals(input, node);
+
+ }
+ }
+
+ @Test
+ public void testWithSerializable() {
+ SampleNormalizedNodeSerializable serializable = new SampleNormalizedNodeSerializable(input);
+ SampleNormalizedNodeSerializable clone = (SampleNormalizedNodeSerializable)SerializationUtils.clone(serializable);
+
+ Assert.assertEquals(input, clone.getInput());
+
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.net.URISyntaxException;
+
+public class SampleNormalizedNodeSerializable implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private NormalizedNode<?, ?> input;
+
+ public SampleNormalizedNodeSerializable(NormalizedNode<?, ?> input) {
+ this.input = input;
+ }
+
+ public NormalizedNode<?, ?> getInput() {
+ return input;
+ }
+
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException, URISyntaxException {
+ NormalizedNodeStreamReader reader = new NormalizedNodeInputStreamReader(stream);
+ this.input = reader.readNormalizedNode();
+ }
+
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(stream);
+ NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
+
+ normalizedNodeWriter.write(this.input);
+ }
+
+}
package org.opendaylight.controller.cluster.datastore.util;
+import com.google.common.collect.ImmutableSet;
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameDeSerializationContext;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.HashSet;
import java.util.List;
public class InstanceIdentifierUtilsTest {
@Test
public void testAugmentationIdentifier() {
- YangInstanceIdentifier.PathArgument p1 = new YangInstanceIdentifier.AugmentationIdentifier(new HashSet(
- Arrays.asList(TEST_QNAME)));
+ YangInstanceIdentifier.PathArgument p1 = new YangInstanceIdentifier.AugmentationIdentifier(
+ ImmutableSet.of(TEST_QNAME));
List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
public static final QName DESC_QNAME = QName.create(TEST_QNAME, "desc");
public static final QName POINTER_QNAME = QName.create(TEST_QNAME, "pointer");
+ public static final QName SOME_BINARY_DATE_QNAME = QName.create(TEST_QNAME, "some-binary-data");
public static final QName SOME_REF_QNAME = QName.create(TEST_QNAME,
"some-ref");
public static final QName MYIDENTITY_QNAME = QName.create(TEST_QNAME,
}
- public static ContainerNode createTestContainer() {
-
-
- // Create a list of shoes
- // This is to test leaf list entry
- final LeafSetEntryNode<Object> nike =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeWithValue(QName.create(
- TEST_QNAME, "shoe"), "nike")).withValue("nike").build();
-
- final LeafSetEntryNode<Object> puma =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeWithValue(QName.create(
- TEST_QNAME, "shoe"), "puma")).withValue("puma").build();
-
- final LeafSetNode<Object> shoes =
- ImmutableLeafSetNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(QName.create(
- TEST_QNAME, "shoe"))).withChild(nike).withChild(puma)
- .build();
-
-
- // Test a leaf-list where each entry contains an identity
- final LeafSetEntryNode<Object> cap1 =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeWithValue(QName.create(
- TEST_QNAME, "capability"), DESC_QNAME))
- .withValue(DESC_QNAME).build();
-
- final LeafSetNode<Object> capabilities =
- ImmutableLeafSetNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(QName.create(
- TEST_QNAME, "capability"))).withChild(cap1).build();
-
- ContainerNode switchFeatures =
- ImmutableContainerNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(SWITCH_FEATURES_QNAME))
- .withChild(capabilities).build();
-
- // Create a leaf list with numbers
- final LeafSetEntryNode<Object> five =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- (new YangInstanceIdentifier.NodeWithValue(QName.create(
- TEST_QNAME, "number"), 5))).withValue(5).build();
- final LeafSetEntryNode<Object> fifteen =
- ImmutableLeafSetEntryNodeBuilder
- .create()
- .withNodeIdentifier(
- (new YangInstanceIdentifier.NodeWithValue(QName.create(
- TEST_QNAME, "number"), 15))).withValue(15).build();
- final LeafSetNode<Object> numbers =
- ImmutableLeafSetNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(QName.create(
- TEST_QNAME, "number"))).withChild(five).withChild(fifteen)
- .build();
-
-
- // Create augmentations
- MapEntryNode mapEntry = createAugmentedListEntry(1, "First Test");
-
- // Create a bits leaf
+ public static DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> createBaseTestContainerBuilder() {
+ // Create a list of shoes
+ // This is to test leaf list entry
+ final LeafSetEntryNode<Object> nike =
+ ImmutableLeafSetEntryNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeWithValue(QName.create(
+ TEST_QNAME, "shoe"), "nike")).withValue("nike").build();
+
+ final LeafSetEntryNode<Object> puma =
+ ImmutableLeafSetEntryNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeWithValue(QName.create(
+ TEST_QNAME, "shoe"), "puma")).withValue("puma").build();
+
+ final LeafSetNode<Object> shoes =
+ ImmutableLeafSetNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(QName.create(
+ TEST_QNAME, "shoe"))).withChild(nike).withChild(puma)
+ .build();
+
+
+ // Test a leaf-list where each entry contains an identity
+ final LeafSetEntryNode<Object> cap1 =
+ ImmutableLeafSetEntryNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeWithValue(QName.create(
+ TEST_QNAME, "capability"), DESC_QNAME))
+ .withValue(DESC_QNAME).build();
+
+ final LeafSetNode<Object> capabilities =
+ ImmutableLeafSetNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(QName.create(
+ TEST_QNAME, "capability"))).withChild(cap1).build();
+
+ ContainerNode switchFeatures =
+ ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(SWITCH_FEATURES_QNAME))
+ .withChild(capabilities).build();
+
+ // Create a leaf list with numbers
+ final LeafSetEntryNode<Object> five =
+ ImmutableLeafSetEntryNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ (new YangInstanceIdentifier.NodeWithValue(QName.create(
+ TEST_QNAME, "number"), 5))).withValue(5).build();
+ final LeafSetEntryNode<Object> fifteen =
+ ImmutableLeafSetEntryNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ (new YangInstanceIdentifier.NodeWithValue(QName.create(
+ TEST_QNAME, "number"), 15))).withValue(15).build();
+ final LeafSetNode<Object> numbers =
+ ImmutableLeafSetNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(QName.create(
+ TEST_QNAME, "number"))).withChild(five).withChild(fifteen)
+ .build();
+
+
+ // Create augmentations
+ MapEntryNode mapEntry = createAugmentedListEntry(1, "First Test");
+
+ // Create a bits leaf
NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, Object, LeafNode<Object>>
- myBits = Builders.leafBuilder().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(
- QName.create(TEST_QNAME, "my-bits"))).withValue(
- ImmutableSet.of("foo", "bar"));
+ myBits = Builders.leafBuilder().withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(
+ QName.create(TEST_QNAME, "my-bits"))).withValue(
+ ImmutableSet.of("foo", "bar"));
// Create the document
- return ImmutableContainerNodeBuilder
- .create()
- .withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME))
- .withChild(myBits.build())
- .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC))
- .withChild(ImmutableNodes.leafNode(POINTER_QNAME, "pointer"))
- .withChild(
- ImmutableNodes.leafNode(SOME_REF_QNAME, YangInstanceIdentifier
- .builder().build()))
- .withChild(ImmutableNodes.leafNode(MYIDENTITY_QNAME, DESC_QNAME))
-
- // .withChild(augmentationNode)
- .withChild(shoes)
- .withChild(numbers)
- .withChild(switchFeatures)
- .withChild(
- mapNodeBuilder(AUGMENTED_LIST_QNAME).withChild(mapEntry).build())
- .withChild(
- mapNodeBuilder(OUTER_LIST_QNAME)
- .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
- .withChild(BAR_NODE).build()).build();
+ return ImmutableContainerNodeBuilder
+ .create()
+ .withNodeIdentifier(
+ new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME))
+ .withChild(myBits.build())
+ .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC))
+ .withChild(ImmutableNodes.leafNode(POINTER_QNAME, "pointer"))
+ .withChild(
+ ImmutableNodes.leafNode(SOME_REF_QNAME, YangInstanceIdentifier
+ .builder().build()))
+ .withChild(ImmutableNodes.leafNode(MYIDENTITY_QNAME, DESC_QNAME))
+
+ // .withChild(augmentationNode)
+ .withChild(shoes)
+ .withChild(numbers)
+ .withChild(switchFeatures)
+ .withChild(
+ mapNodeBuilder(AUGMENTED_LIST_QNAME).withChild(mapEntry).build())
+ .withChild(
+ mapNodeBuilder(OUTER_LIST_QNAME)
+ .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+ .withChild(BAR_NODE).build());
+ }
+ public static ContainerNode createTestContainer() {
+ return createBaseTestContainerBuilder().build();
}
public static MapEntryNode createAugmentedListEntry(int id, String name) {
package org.opendaylight.controller.xml.codec;
-
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.io.ByteSource;
-import junit.framework.Assert;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import javax.xml.parsers.DocumentBuilderFactory;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import javax.xml.parsers.DocumentBuilderFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-
public class XmlUtilsTest {
private static final DocumentBuilderFactory BUILDERFACTORY;
@Test
public void testInputXmlToCompositeNode() {
CompositeNode node = XmlUtils.inputXmlToCompositeNode(testRpc.getQName(), XML_CONTENT, schema);
- ImmutableList<SimpleNode> input = (ImmutableList)node.getValue().get(0).getValue();
- SimpleNode firstNode = input.get(0);
+ ImmutableList<SimpleNode<?>> input = (ImmutableList<SimpleNode<?>>)node.getValue().get(0).getValue();
+ SimpleNode<?> firstNode = input.get(0);
Assert.assertEquals("id", firstNode.getNodeType().getLocalName());
Assert.assertEquals("flowid", firstNode.getValue());
- SimpleNode secondNode = input.get(1);
+ SimpleNode<?> secondNode = input.get(1);
Assert.assertEquals("flow", secondNode.getNodeType().getLocalName());
YangInstanceIdentifier instance = (YangInstanceIdentifier) secondNode.getValue();
Iterable<YangInstanceIdentifier.PathArgument> iterable = instance.getPathArguments();
- Iterator it = iterable.iterator();
+ Iterator<YangInstanceIdentifier.PathArgument> it = iterable.iterator();
YangInstanceIdentifier.NodeIdentifier firstPath = (YangInstanceIdentifier.NodeIdentifier) it.next();
Assert.assertEquals("node", firstPath.getNodeType().getLocalName());
YangInstanceIdentifier.NodeIdentifierWithPredicates secondPath = (YangInstanceIdentifier.NodeIdentifierWithPredicates)it.next();
@Test
public void testInputCompositeNodeToXML() {
CompositeNode input = XmlUtils.inputXmlToCompositeNode(testRpc.getQName(), XML_CONTENT, schema);
- List<Node<?>> childNodes = new ArrayList();
+ List<Node<?>> childNodes = new ArrayList<>();
childNodes.add(input);
QName rpcQName = schema.getOperations().iterator().next().getQName();
CompositeNode node = new ImmutableCompositeNode(rpcQName, input.getValue(), ModifyAction.REPLACE);
}
}
+ leaf some-binary-data {
+ type binary;
+ }
+
}
}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-inmemory-datastore</artifactId>
- <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-commons</artifactId>
- <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
+import akka.actor.Address;
public interface ClusterWrapper {
void subscribeToMemberEvents(ActorRef actorRef);
String getCurrentMemberName();
+ Address getSelfAddress();
}
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import akka.actor.Address;
import akka.cluster.Cluster;
import akka.cluster.ClusterEvent;
import com.google.common.base.Preconditions;
public class ClusterWrapperImpl implements ClusterWrapper {
private final Cluster cluster;
private final String currentMemberName;
+ private final Address selfAddress;
public ClusterWrapperImpl(ActorSystem actorSystem){
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
);
currentMemberName = (String) cluster.getSelfRoles().toArray()[0];
+ selfAddress = cluster.selfAddress();
}
public String getCurrentMemberName() {
return currentMemberName;
}
+
+ public Address getSelfAddress() {
+ return selfAddress;
+ }
}
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigObject;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ConfigurationImpl implements Configuration {
// Look up maps to speed things up
// key = memberName, value = list of shardNames
- private Map<String, List<String>> memberShardNames = new HashMap<>();
+ private final Map<String, List<String>> memberShardNames = new HashMap<>();
// key = shardName, value = list of replicaNames (replicaNames are the same as memberNames)
- private Map<String, List<String>> shardReplicaNames = new HashMap<>();
+ private final Map<String, List<String>> shardReplicaNames = new HashMap<>();
- public ConfigurationImpl(String moduleShardsConfigPath,
+ public ConfigurationImpl(final String moduleShardsConfigPath,
- String modulesConfigPath){
+ final String modulesConfigPath){
Preconditions.checkNotNull(moduleShardsConfigPath, "moduleShardsConfigPath should not be null");
Preconditions.checkNotNull(modulesConfigPath, "modulesConfigPath should not be null");
readModules(modulesConfig);
}
- @Override public List<String> getMemberShardNames(String memberName){
+ @Override public List<String> getMemberShardNames(final String memberName){
Preconditions.checkNotNull(memberName, "memberName should not be null");
return memberShardNames.get(memberName);
}
- List<String> shards = new ArrayList();
+ List<String> shards = new ArrayList<>();
for(ModuleShard ms : moduleShards){
for(Shard s : ms.getShards()){
for(String m : s.getReplicas()){
}
- @Override public Optional<String> getModuleNameFromNameSpace(String nameSpace) {
+ @Override public Optional<String> getModuleNameFromNameSpace(final String nameSpace) {
Preconditions.checkNotNull(nameSpace, "nameSpace should not be null");
return map;
}
- @Override public List<String> getShardNamesFromModuleName(String moduleName) {
+ @Override public List<String> getShardNamesFromModuleName(final String moduleName) {
Preconditions.checkNotNull(moduleName, "moduleName should not be null");
}
}
- return Collections.EMPTY_LIST;
+ return Collections.emptyList();
}
- @Override public List<String> getMembersFromShardName(String shardName) {
+ @Override public List<String> getMembersFromShardName(final String shardName) {
Preconditions.checkNotNull(shardName, "shardName should not be null");
}
}
}
- shardReplicaNames.put(shardName, Collections.EMPTY_LIST);
- return Collections.EMPTY_LIST;
+ shardReplicaNames.put(shardName, Collections.<String>emptyList());
+ return Collections.emptyList();
}
@Override public Set<String> getAllShardNames() {
- private void readModules(Config modulesConfig) {
+ private void readModules(final Config modulesConfig) {
List<? extends ConfigObject> modulesConfigObjectList =
modulesConfig.getObjectList("modules");
}
}
- private void readModuleShards(Config moduleShardsConfig) {
+ private void readModuleShards(final Config moduleShardsConfig) {
List<? extends ConfigObject> moduleShardsConfigObjectList =
moduleShardsConfig.getObjectList("module-shards");
private final String moduleName;
private final List<Shard> shards;
- public ModuleShard(String moduleName, List<Shard> shards) {
+ public ModuleShard(final String moduleName, final List<Shard> shards) {
this.moduleName = moduleName;
this.shards = shards;
}
private final String name;
private final List<String> replicas;
- Shard(String name, List<String> replicas) {
+ Shard(final String name, final List<String> replicas) {
this.name = name;
this.replicas = replicas;
}
private final String nameSpace;
private final ShardStrategy shardStrategy;
- Module(String name, String nameSpace, String shardStrategy) {
+ Module(final String name, final String nameSpace, final String shardStrategy) {
this.name = name;
this.nameSpace = nameSpace;
if(ModuleShardStrategy.NAME.equals(shardStrategy)){
private final ConfigObject configObject;
- ConfigObjectWrapper(ConfigObject configObject){
+ ConfigObjectWrapper(final ConfigObject configObject){
this.configObject = configObject;
}
- public String stringValue(String name){
+ public String stringValue(final String name){
return configObject.get(name).unwrapped().toString();
}
}
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class DataChangeListener extends AbstractUntypedActor {
+ private static final Logger LOG = LoggerFactory.getLogger(DataChangeListener.class);
+
private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener;
private boolean notificationsEnabled = false;
this.listener = Preconditions.checkNotNull(listener, "listener should not be null");
}
- @Override public void handleReceive(Object message) throws Exception {
+ @Override
+ public void handleReceive(Object message) throws Exception {
if(message instanceof DataChanged){
dataChanged(message);
} else if(message instanceof EnableNotification){
private void enableNotification(EnableNotification message) {
notificationsEnabled = message.isEnabled();
+ LOG.debug("{} notifications for listener {}", (notificationsEnabled ? "Enabled" : "Disabled"),
+ listener);
}
private void dataChanged(Object message) {
// Do nothing if notifications are not enabled
- if(!notificationsEnabled){
+ if(!notificationsEnabled) {
+ LOG.debug("Notifications not enabled for listener {} - dropping change notification",
+ listener);
return;
}
DataChanged reply = (DataChanged) message;
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>
- change = reply.getChange();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = reply.getChange();
+
+ LOG.debug("Sending change notification {} to listener {}", change, listener);
+
this.listener.onDataChanged(change);
// It seems the sender is never null but it doesn't hurt to check. If the caller passes in
private static class DataChangeListenerRegistrationCreator
implements Creator<DataChangeListenerRegistration> {
+ private static final long serialVersionUID = 1L;
final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
NormalizedNode<?, ?>>> registration;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
+import akka.dispatch.OnComplete;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
+import scala.concurrent.Future;
/**
* ListenerRegistrationProxy acts as a proxy for a ListenerRegistration that was done on a remote shard
* The ListenerRegistrationProxy talks to a remote ListenerRegistration actor.
* </p>
*/
+@SuppressWarnings("rawtypes")
public class DataChangeListenerRegistrationProxy implements ListenerRegistration {
+
+ private static final Logger LOG = LoggerFactory.getLogger(DataChangeListenerRegistrationProxy.class);
+
private volatile ActorSelection listenerRegistrationActor;
- private final AsyncDataChangeListener listener;
- private final ActorRef dataChangeListenerActor;
+ private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener;
+ private ActorRef dataChangeListenerActor;
+ private final String shardName;
+ private final ActorContext actorContext;
private boolean closed = false;
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
- DataChangeListenerRegistrationProxy(
- ActorSelection listenerRegistrationActor,
- L listener, ActorRef dataChangeListenerActor) {
- this.listenerRegistrationActor = listenerRegistrationActor;
+ DataChangeListenerRegistrationProxy (
+ String shardName, ActorContext actorContext, L listener) {
+ this.shardName = shardName;
+ this.actorContext = actorContext;
this.listener = listener;
- this.dataChangeListenerActor = dataChangeListenerActor;
}
- public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
- DataChangeListenerRegistrationProxy(
- L listener, ActorRef dataChangeListenerActor) {
- this(null, listener, dataChangeListenerActor);
+ @VisibleForTesting
+ ActorSelection getListenerRegistrationActor() {
+ return listenerRegistrationActor;
+ }
+
+ @VisibleForTesting
+ ActorRef getDataChangeListenerActor() {
+ return dataChangeListenerActor;
}
@Override
return listener;
}
- public void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) {
+ private void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) {
+ if(listenerRegistrationActor == null) {
+ return;
+ }
+
boolean sendCloseMessage = false;
synchronized(this) {
if(closed) {
this.listenerRegistrationActor = listenerRegistrationActor;
}
}
+
if(sendCloseMessage) {
listenerRegistrationActor.tell(new
CloseDataChangeListenerRegistration().toSerializable(), null);
}
+ }
+
+ public void init(final YangInstanceIdentifier path, final AsyncDataBroker.DataChangeScope scope) {
- this.listenerRegistrationActor = listenerRegistrationActor;
+ dataChangeListenerActor = actorContext.getActorSystem().actorOf(
+ DataChangeListener.props(listener));
+
+ Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+ findFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(Throwable failure, ActorRef shard) {
+ if(failure instanceof LocalShardNotFoundException) {
+ LOG.debug("No local shard found for {} - DataChangeListener {} at path {} " +
+ "cannot be registered", shardName, listener, path);
+ } else if(failure != null) {
+ LOG.error("Failed to find local shard {} - DataChangeListener {} at path {} " +
+ "cannot be registered: {}", shardName, listener, path, failure);
+ } else {
+ doRegistration(shard, path, scope);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
}
- public ActorSelection getListenerRegistrationActor() {
- return listenerRegistrationActor;
+ private void doRegistration(ActorRef shard, final YangInstanceIdentifier path,
+ DataChangeScope scope) {
+
+ Future<Object> future = actorContext.executeOperationAsync(shard,
+ new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
+ actorContext.getDatastoreContext().getShardInitializationTimeout());
+
+ future.onComplete(new OnComplete<Object>(){
+ @Override
+ public void onComplete(Throwable failure, Object result) {
+ if(failure != null) {
+ LOG.error("Failed to register DataChangeListener {} at path {}",
+ listener, path.toString(), failure);
+ } else {
+ RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
+ setListenerRegistrationActor(actorContext.actorSelection(
+ reply.getListenerRegistrationPath()));
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
}
@Override
sendCloseMessage = !closed && listenerRegistrationActor != null;
closed = true;
}
+
if(sendCloseMessage) {
- listenerRegistrationActor.tell(new
- CloseDataChangeListenerRegistration().toSerializable(), null);
+ listenerRegistrationActor.tell(new CloseDataChangeListenerRegistration().toSerializable(),
+ ActorRef.noSender());
+ listenerRegistrationActor = null;
}
- dataChangeListenerActor.tell(PoisonPill.getInstance(), null);
+ if(dataChangeListenerActor != null) {
+ dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ dataChangeListenerActor = null;
+ }
}
}
package org.opendaylight.controller.cluster.datastore;
+import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
+import org.opendaylight.controller.cluster.datastore.config.FileConfigurationReader;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+import akka.util.Timeout;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
+
import java.util.concurrent.TimeUnit;
/**
private final ConfigParams shardRaftConfig;
private final int shardTransactionCommitTimeoutInSeconds;
private final int shardTransactionCommitQueueCapacity;
+ private final Timeout shardInitializationTimeout;
+ private final Timeout shardLeaderElectionTimeout;
+ private final boolean persistent;
+ private final ConfigurationReader configurationReader;
private DatastoreContext(InMemoryDOMDataStoreConfigProperties dataStoreProperties,
ConfigParams shardRaftConfig, String dataStoreMXBeanType, int operationTimeoutInSeconds,
Duration shardTransactionIdleTimeout, int shardTransactionCommitTimeoutInSeconds,
- int shardTransactionCommitQueueCapacity) {
+ int shardTransactionCommitQueueCapacity, Timeout shardInitializationTimeout,
+ Timeout shardLeaderElectionTimeout,
+ boolean persistent, ConfigurationReader configurationReader) {
this.dataStoreProperties = dataStoreProperties;
this.shardRaftConfig = shardRaftConfig;
this.dataStoreMXBeanType = dataStoreMXBeanType;
this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
+ this.shardInitializationTimeout = shardInitializationTimeout;
+ this.shardLeaderElectionTimeout = shardLeaderElectionTimeout;
+ this.persistent = persistent;
+ this.configurationReader = configurationReader;
}
public static Builder newBuilder() {
return shardTransactionCommitQueueCapacity;
}
+ public Timeout getShardInitializationTimeout() {
+ return shardInitializationTimeout;
+ }
+
+ public Timeout getShardLeaderElectionTimeout() {
+ return shardLeaderElectionTimeout;
+ }
+
+ public boolean isPersistent() {
+ return persistent;
+ }
+
+ public ConfigurationReader getConfigurationReader() {
+ return configurationReader;
+ }
+
public static class Builder {
private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
private Duration shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
private int shardSnapshotBatchCount = 20000;
private int shardHeartbeatIntervalInMillis = 500;
private int shardTransactionCommitQueueCapacity = 20000;
+ private Timeout shardInitializationTimeout = new Timeout(5, TimeUnit.MINUTES);
+ private Timeout shardLeaderElectionTimeout = new Timeout(30, TimeUnit.SECONDS);
+ private boolean persistent = true;
+ private ConfigurationReader configurationReader = new FileConfigurationReader();
+ private int shardIsolatedLeaderCheckIntervalInMillis = shardHeartbeatIntervalInMillis * 10;
public Builder shardTransactionIdleTimeout(Duration shardTransactionIdleTimeout) {
this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
return this;
}
+ public Builder shardInitializationTimeout(long timeout, TimeUnit unit) {
+ this.shardInitializationTimeout = new Timeout(timeout, unit);
+ return this;
+ }
+
+ public Builder shardLeaderElectionTimeout(long timeout, TimeUnit unit) {
+ this.shardLeaderElectionTimeout = new Timeout(timeout, unit);
+ return this;
+ }
+
+ public Builder configurationReader(ConfigurationReader configurationReader){
+ this.configurationReader = configurationReader;
+ return this;
+ }
+
+ public Builder persistent(boolean persistent){
+ this.persistent = persistent;
+ return this;
+ }
+
+ public Builder shardIsolatedLeaderCheckIntervalInMillis(int shardIsolatedLeaderCheckIntervalInMillis) {
+ this.shardIsolatedLeaderCheckIntervalInMillis = shardIsolatedLeaderCheckIntervalInMillis;
+ return this;
+ }
+
public DatastoreContext build() {
DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
TimeUnit.MILLISECONDS));
raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
+ raftConfig.setIsolatedLeaderCheckInterval(
+ new FiniteDuration(shardIsolatedLeaderCheckIntervalInMillis, TimeUnit.MILLISECONDS));
return new DatastoreContext(dataStoreProperties, raftConfig, dataStoreMXBeanType,
operationTimeoutInSeconds, shardTransactionIdleTimeout,
- shardTransactionCommitTimeoutInSeconds, shardTransactionCommitQueueCapacity);
+ shardTransactionCommitTimeoutInSeconds, shardTransactionCommitQueueCapacity,
+ shardInitializationTimeout, shardLeaderElectionTimeout,
+ persistent, configurationReader);
}
}
}
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorRef;
import akka.actor.ActorSystem;
-import akka.dispatch.OnComplete;
-import akka.util.Timeout;
-import com.google.common.base.Optional;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
/**
*
actorContext = new ActorContext(actorSystem, actorSystem.actorOf(
ShardManager.props(type, cluster, configuration, datastoreContext)
- .withMailbox(ActorContext.MAILBOX), shardManagerId ), cluster, configuration);
-
- actorContext.setOperationTimeout(datastoreContext.getOperationTimeoutInSeconds());
+ .withMailbox(ActorContext.MAILBOX), shardManagerId ),
+ cluster, configuration, datastoreContext);
}
public DistributedDataStore(ActorContext actorContext) {
String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
- Optional<ActorRef> shard = actorContext.findLocalShard(shardName);
-
- //if shard is NOT local
- if (!shard.isPresent()) {
- LOG.debug("No local shard for shardName {} was found so returning a noop registration", shardName);
- return new NoOpDataChangeListenerRegistration(listener);
- }
- //if shard is local
- ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(DataChangeListener.props(listener));
- Future future = actorContext.executeOperationAsync(shard.get(),
- new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
- new Timeout(actorContext.getOperationDuration().$times(REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR)));
-
final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
- new DataChangeListenerRegistrationProxy(listener, dataChangeListenerActor);
-
- future.onComplete(new OnComplete() {
-
- @Override
- public void onComplete(Throwable failure, Object result)
- throws Throwable {
- if (failure != null) {
- LOG.error("Failed to register listener at path " + path.toString(), failure);
- return;
- }
- RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
- listenerRegistrationProxy.setListenerRegistrationActor(actorContext
- .actorSelection(reply.getListenerRegistrationPath()));
- }
- }, actorContext.getActorSystem().dispatcher());
+ new DataChangeListenerRegistrationProxy(shardName, actorContext, listener);
+ listenerRegistrationProxy.init(path, scope);
return listenerRegistrationProxy;
-
}
@Override
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.osgi.BundleDelegatingClassLoader;
-import com.google.common.base.Preconditions;
-import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
+import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.osgi.framework.BundleContext;
-import java.io.File;
import java.util.concurrent.atomic.AtomicReference;
public class DistributedDataStoreFactory {
- public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
+
public static final String CONFIGURATION_NAME = "odl-cluster-data";
- private static AtomicReference<ActorSystem> actorSystem = new AtomicReference<>();
+
+ private static AtomicReference<ActorSystem> persistentActorSystem = new AtomicReference<>();
public static DistributedDataStore createInstance(String name, SchemaService schemaService,
DatastoreContext datastoreContext, BundleContext bundleContext) {
- ActorSystem actorSystem = getOrCreateInstance(bundleContext);
+ ActorSystem actorSystem = getOrCreateInstance(bundleContext, datastoreContext.getConfigurationReader());
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
final DistributedDataStore dataStore =
new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
return dataStore;
}
- synchronized private static final ActorSystem getOrCreateInstance(final BundleContext bundleContext) {
+ synchronized private static final ActorSystem getOrCreateInstance(final BundleContext bundleContext, ConfigurationReader configurationReader) {
+
+ AtomicReference<ActorSystem> actorSystemReference = persistentActorSystem;
+ String configurationName = CONFIGURATION_NAME;
+ String actorSystemName = ACTOR_SYSTEM_NAME;
- if (actorSystem.get() != null){
- return actorSystem.get();
+ if (actorSystemReference.get() != null){
+ return actorSystemReference.get();
}
+
// Create an OSGi bundle classloader for actor system
BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
Thread.currentThread().getContextClassLoader());
- ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
- ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
+ ActorSystem system = ActorSystem.create(actorSystemName,
+ ConfigFactory.load(configurationReader.read()).getConfig(configurationName), classLoader);
system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
- actorSystem.set(system);
+ actorSystemReference.set(system);
return system;
}
-
- private static final Config readAkkaConfiguration() {
- File defaultConfigFile = new File(AKKA_CONF_PATH);
- Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
- return ConfigFactory.parseFile(defaultConfigFile);
- }
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.UntypedActor;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-
-public class NoOpCohort extends UntypedActor {
-
- @Override public void onReceive(Object message) throws Exception {
- if (message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(new CanCommitTransactionReply(false).toSerializable(), getSelf());
- } else if (message.getClass().equals(PreCommitTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(
- new PreCommitTransactionReply().toSerializable(),
- getSelf());
- } else if (message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(new CommitTransactionReply().toSerializable(), getSelf());
- } else if (message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(new AbortTransactionReply().toSerializable(), getSelf());
- } else {
- throw new Exception ("Not recognized message received,message="+message);
- }
-
- }
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * When a consumer registers a data change listener and no local shard is
- * available to register that listener with then we return an instance of
- * NoOpDataChangeListenerRegistration
- *
- * <p>
- *
- * The NoOpDataChangeListenerRegistration as it's name suggests does
- * nothing when an operation is invoked on it
- */
-public class NoOpDataChangeListenerRegistration
- implements ListenerRegistration {
-
- private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>
- listener;
-
- public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> NoOpDataChangeListenerRegistration(
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener) {
-
- this.listener = listener;
- }
-
- @Override
- public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
- return listener;
- }
-
- @Override public void close() {
-
- }
-}
import com.google.common.util.concurrent.ListenableFuture;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
+import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import javax.annotation.Nonnull;
/**
* A Shard represents a portion of the logical data tree <br/>
private final LoggingAdapter LOG =
Logging.getLogger(getContext().system(), this);
- // By default persistent will be true and can be turned off using the system
- // property shard.persistent
- private final boolean persistent;
-
/// The name of this shard
private final ShardIdentifier name;
private final ShardStats shardMBean;
- private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
+ private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
+
+ private final List<DelayedListenerRegistration> delayedListenerRegistrations =
+ Lists.newArrayList();
private final DatastoreContext datastoreContext;
+ private final DataPersistenceProvider dataPersistenceProvider;
+
private SchemaContext schemaContext;
private ActorRef createSnapshotTransaction;
this.name = name;
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
+ this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
- String setting = System.getProperty("shard.persistent");
-
- this.persistent = !"false".equals(setting);
-
- LOG.info("Shard created : {} persistent : {}", name, persistent);
+ LOG.info("Shard created : {} persistent : {}", name, datastoreContext.isPersistent());
store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
datastoreContext.getDataStoreProperties());
}
@Override
- public void onReceiveRecover(Object message) {
+ public void onReceiveRecover(Object message) throws Exception {
if(LOG.isDebugEnabled()) {
LOG.debug("onReceiveRecover: Received message {} from {}",
message.getClass().toString(),
if (message instanceof RecoveryFailure){
LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
+
+ // Even though recovery failed, we still need to finish our recovery, eg send the
+ // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
+ onRecoveryComplete();
} else {
super.onReceiveRecover(message);
}
}
@Override
- public void onReceiveCommand(Object message) {
+ public void onReceiveCommand(Object message) throws Exception {
if(LOG.isDebugEnabled()) {
LOG.debug("onReceiveCommand: Received message {} from {}", message, getSender());
}
// currently uses a same thread executor anyway.
cohortEntry.getCohort().preCommit().get();
- if(persistent) {
- Shard.this.persistData(getSender(), transactionID,
- new CompositeModificationPayload(cohortEntry.getModification().toSerializable()));
- } else {
- Shard.this.finishCommit(getSender(), transactionID);
- }
+ Shard.this.persistData(getSender(), transactionID,
+ new CompositeModificationPayload(cohortEntry.getModification().toSerializable()));
} catch (InterruptedException | ExecutionException e) {
LOG.error(e, "An exception occurred while preCommitting transaction {}",
cohortEntry.getTransactionID());
}
private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
- LOG.debug("Readying transaction {}", ready.getTransactionID());
+ LOG.debug("Readying transaction {}, client version {}", ready.getTransactionID(),
+ ready.getTxnClientVersion());
// This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
// commitCoordinator in preparation for the subsequent three phase commit initiated by
commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
ready.getModification());
- // Return our actor path as we'll handle the three phase commit.
- getSender().tell(new ReadyTransactionReply(Serialization.serializedActorPath(self())).
- toSerializable(), getSelf());
+ // Return our actor path as we'll handle the three phase commit, except if the Tx client
+ // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
+ // node. In that case, the subsequent 3-phase commit messages won't contain the
+ // transactionId so to maintain backwards compatibility, we create a separate cohort actor
+ // to provide the compatible behavior.
+ ActorRef replyActorPath = self();
+ if(ready.getTxnClientVersion() < CreateTransaction.HELIUM_1_VERSION) {
+ LOG.debug("Creating BackwardsCompatibleThreePhaseCommitCohort");
+ replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
+ ready.getTransactionID()));
+ }
+
+ ReadyTransactionReply readyTransactionReply = new ReadyTransactionReply(
+ Serialization.serializedActorPath(replyActorPath));
+ getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
+ readyTransactionReply, getSelf());
}
private void handleAbortTransaction(AbortTransaction abort) {
} else if (getLeader() != null) {
getLeader().forward(message, getContext());
} else {
- getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
+ getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
"Could not find shard leader so transaction cannot be created. This typically happens" +
- " when system is coming up or recovering and a leader is being elected. Try again" +
+ " when the system is coming up or recovering and a leader is being elected. Try again" +
" later.")), getSelf());
}
}
}
}
- private ActorRef createTypedTransactionActor(
- int transactionType,
- ShardTransactionIdentifier transactionId,
- String transactionChainId ) {
+ private ActorRef createTypedTransactionActor(int transactionType,
+ ShardTransactionIdentifier transactionId, String transactionChainId, int clientVersion ) {
DOMStoreTransactionFactory factory = store;
}
}
- if(this.schemaContext == null){
- throw new NullPointerException("schemaContext should not be null");
+ if(this.schemaContext == null) {
+ throw new IllegalStateException("SchemaContext is not set");
}
if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
schemaContext,datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId()), transactionId.toString());
+ transactionId.getRemoteTransactionId(), clientVersion),
+ transactionId.toString());
} else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
return getContext().actorOf(
ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId()), transactionId.toString());
+ transactionId.getRemoteTransactionId(), clientVersion),
+ transactionId.toString());
} else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
schemaContext, datastoreContext, shardMBean,
- transactionId.getRemoteTransactionId()), transactionId.toString());
+ transactionId.getRemoteTransactionId(), clientVersion),
+ transactionId.toString());
} else {
throw new IllegalArgumentException(
"Shard="+name + ":CreateTransaction message has unidentified transaction type="
}
private void createTransaction(CreateTransaction createTransaction) {
- createTransaction(createTransaction.getTransactionType(),
- createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
+ try {
+ ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
+ createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
+ createTransaction.getVersion());
+
+ getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
+ createTransaction.getTransactionId()).toSerializable(), getSelf());
+ } catch (Exception e) {
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
}
- private ActorRef createTransaction(int transactionType, String remoteTransactionId, String transactionChainId) {
+ private ActorRef createTransaction(int transactionType, String remoteTransactionId,
+ String transactionChainId, int clientVersion) {
ShardTransactionIdentifier transactionId =
ShardTransactionIdentifier.builder()
.remoteTransactionId(remoteTransactionId)
.build();
+
if(LOG.isDebugEnabled()) {
LOG.debug("Creating transaction : {} ", transactionId);
}
- ActorRef transactionActor =
- createTypedTransactionActor(transactionType, transactionId, transactionChainId);
- getSender()
- .tell(new CreateTransactionReply(
- Serialization.serializedActorPath(transactionActor),
- remoteTransactionId).toSerializable(),
- getSelf());
+ ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
+ transactionChainId, clientVersion);
return transactionActor;
}
store.onGlobalContextUpdated(message.getSchemaContext());
}
- @VisibleForTesting void updateSchemaContext(SchemaContext schemaContext) {
+ @VisibleForTesting
+ void updateSchemaContext(SchemaContext schemaContext) {
store.onGlobalContextUpdated(schemaContext);
}
- private void registerChangeListener(
- RegisterChangeListener registerChangeListener) {
+ private void registerChangeListener(RegisterChangeListener registerChangeListener) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("registerDataChangeListener for {}", registerChangeListener
- .getPath());
+ LOG.debug("registerDataChangeListener for {}", registerChangeListener.getPath());
+
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration;
+ if(isLeader()) {
+ registration = doChangeListenerRegistration(registerChangeListener);
+ } else {
+ LOG.debug("Shard is not the leader - delaying registration");
+
+ DelayedListenerRegistration delayedReg =
+ new DelayedListenerRegistration(registerChangeListener);
+ delayedListenerRegistrations.add(delayedReg);
+ registration = delayedReg;
}
+ ActorRef listenerRegistration = getContext().actorOf(
+ DataChangeListenerRegistration.props(registration));
- ActorSelection dataChangeListenerPath = getContext()
- .system().actorSelection(
- registerChangeListener.getDataChangeListenerPath());
+ LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+ listenerRegistration.path());
+
+ getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()), getSelf());
+ }
+ private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> doChangeListenerRegistration(
+ RegisterChangeListener registerChangeListener) {
+
+ ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
+ registerChangeListener.getDataChangeListenerPath());
// Notify the listener if notifications should be enabled or not
// If this shard is the leader then it will enable notifications else
// it will not
- dataChangeListenerPath
- .tell(new EnableNotification(isLeader()), getSelf());
+ dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
// Now store a reference to the data change listener so it can be notified
// at a later point if notifications should be enabled or disabled
dataChangeListeners.add(dataChangeListenerPath);
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>
- listener = new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
+ AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
+ new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
- registration = store.registerChangeListener(registerChangeListener.getPath(),
- listener, registerChangeListener.getScope());
- ActorRef listenerRegistration =
- getContext().actorOf(
- DataChangeListenerRegistration.props(registration));
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
- , listenerRegistration.path().toString());
- }
+ LOG.debug("Registering for path {}", registerChangeListener.getPath());
- getSender()
- .tell(new RegisterChangeListenerReply(listenerRegistration.path()),
- getSelf());
+ return store.registerChangeListener(registerChangeListener.getPath(), listener,
+ registerChangeListener.getScope());
}
private boolean isMetricsCaptureEnabled(){
//notify shard manager
getContext().parent().tell(new ActorInitialized(), getSelf());
- // Schedule a message to be periodically sent to check if the current in-progress
- // transaction should be expired and aborted.
- FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
- txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
- period, period, getSelf(),
- TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
+ // Being paranoid here - this method should only be called once but just in case...
+ if(txCommitTimeoutCheckSchedule == null) {
+ // Schedule a message to be periodically sent to check if the current in-progress
+ // transaction should be expired and aborted.
+ FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
+ txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
+ period, period, getSelf(),
+ TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
+ }
}
@Override
// so that this actor does not get block building the snapshot
createSnapshotTransaction = createTransaction(
TransactionProxy.TransactionType.READ_ONLY.ordinal(),
- "createSnapshot" + ++createSnapshotTransactionCounter, "");
+ "createSnapshot" + ++createSnapshotTransactionCounter, "",
+ CreateTransaction.CURRENT_VERSION);
createSnapshotTransaction.tell(
new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
}
}
- @Override protected void onStateChanged() {
+ @Override
+ protected void onStateChanged() {
+ boolean isLeader = isLeader();
for (ActorSelection dataChangeListener : dataChangeListeners) {
- dataChangeListener
- .tell(new EnableNotification(isLeader()), getSelf());
+ dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
+ }
+
+ if(isLeader) {
+ for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
+ if(!reg.isClosed()) {
+ reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
+ }
+ }
+
+ delayedListenerRegistrations.clear();
}
shardMBean.setRaftState(getRaftState().name());
shardMBean.setCurrentTerm(getCurrentTerm());
// If this actor is no longer the leader close all the transaction chains
- if(!isLeader()){
+ if(!isLeader){
for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
if(LOG.isDebugEnabled()) {
LOG.debug(
}
}
+ @Override
+ protected DataPersistenceProvider persistence() {
+ return dataPersistenceProvider;
+ }
+
@Override protected void onLeaderChanged(String oldLeader, String newLeader) {
shardMBean.setLeader(newLeader);
}
return this.name.toString();
}
+ @VisibleForTesting
+ DataPersistenceProvider getDataPersistenceProvider() {
+ return dataPersistenceProvider;
+ }
+
private static class ShardCreator implements Creator<Shard> {
private static final long serialVersionUID = 1L;
ShardStats getShardMBean() {
return shardMBean;
}
+
+ private static class DelayedListenerRegistration implements
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
+
+ private volatile boolean closed;
+
+ private final RegisterChangeListener registerChangeListener;
+
+ private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> delegate;
+
+ DelayedListenerRegistration(RegisterChangeListener registerChangeListener) {
+ this.registerChangeListener = registerChangeListener;
+ }
+
+ void setDelegate( ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration) {
+ this.delegate = registration;
+ }
+
+ boolean isClosed() {
+ return closed;
+ }
+
+ RegisterChangeListener getRegisterChangeListener() {
+ return registerChangeListener;
+ }
+
+ @Override
+ public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
+ return delegate != null ? delegate.getInstance() : null;
+ }
+
+ @Override
+ public void close() {
+ closed = true;
+ if(delegate != null) {
+ delegate.close();
+ }
+ }
+ }
}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
+
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
private final Collection<String> knownModules = new HashSet<>(128);
+ private final DataPersistenceProvider dataPersistenceProvider;
+
/**
* @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
* configuration or operational
this.cluster = Preconditions.checkNotNull(cluster, "cluster should not be null");
this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
this.datastoreContext = datastoreContext;
+ this.dataPersistenceProvider = createDataPersistenceProvider(datastoreContext.isPersistent());
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
createLocalShards();
}
+ protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
+ return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
+ }
+
public static Props props(final String type,
final ClusterWrapper cluster,
final Configuration configuration,
LOG.debug("Initializing shard [{}]", shardName);
ShardInformation shardInformation = localShards.get(shardName);
if (shardInformation != null) {
- shardInformation.setShardInitialized(true);
+ shardInformation.setActorInitialized();
}
}
@Override
protected void handleRecover(Object message) throws Exception {
- if(message instanceof SchemaContextModules){
- SchemaContextModules msg = (SchemaContextModules) message;
- knownModules.clear();
- knownModules.addAll(msg.getModules());
- } else if(message instanceof RecoveryFailure){
- RecoveryFailure failure = (RecoveryFailure) message;
- LOG.error(failure.cause(), "Recovery failed");
- } else if(message instanceof RecoveryCompleted){
- LOG.info("Recovery complete : {}", persistenceId());
-
- // Delete all the messages from the akka journal except the last one
- deleteMessages(lastSequenceNr() - 1);
+ if(dataPersistenceProvider.isRecoveryApplicable()) {
+ if (message instanceof SchemaContextModules) {
+ SchemaContextModules msg = (SchemaContextModules) message;
+ knownModules.clear();
+ knownModules.addAll(msg.getModules());
+ } else if (message instanceof RecoveryFailure) {
+ RecoveryFailure failure = (RecoveryFailure) message;
+ LOG.error(failure.cause(), "Recovery failed");
+ } else if (message instanceof RecoveryCompleted) {
+ LOG.info("Recovery complete : {}", persistenceId());
+
+ // Delete all the messages from the akka journal except the last one
+ deleteMessages(lastSequenceNr() - 1);
+ }
+ } else {
+ if (message instanceof RecoveryCompleted) {
+ LOG.info("Recovery complete : {}", persistenceId());
+
+ // Delete all the messages from the akka journal
+ deleteMessages(lastSequenceNr());
+ }
}
}
return;
}
- sendResponse(shardInformation, new Supplier<Object>() {
+ sendResponse(shardInformation, message.isWaitUntilInitialized(), new Supplier<Object>() {
@Override
public Object get() {
return new LocalShardFound(shardInformation.getActor());
});
}
- private void sendResponse(ShardInformation shardInformation, Supplier<Object> messageSupplier) {
- if (shardInformation.getActor() == null || !shardInformation.isShardInitialized()) {
- getSender().tell(new ActorNotInitialized(), getSelf());
+ private void sendResponse(ShardInformation shardInformation, boolean waitUntilInitialized,
+ final Supplier<Object> messageSupplier) {
+ if (!shardInformation.isShardInitialized()) {
+ if(waitUntilInitialized) {
+ final ActorRef sender = getSender();
+ final ActorRef self = self();
+ shardInformation.addRunnableOnInitialized(new Runnable() {
+ @Override
+ public void run() {
+ sender.tell(messageSupplier.get(), self);
+ }
+ });
+ } else {
+ getSender().tell(new ActorNotInitialized(), getSelf());
+ }
+
return;
}
knownModules.clear();
knownModules.addAll(newModules);
- persist(new SchemaContextModules(newModules), new Procedure<SchemaContextModules>() {
+ dataPersistenceProvider.persist(new SchemaContextModules(newModules), new Procedure<SchemaContextModules>() {
@Override
public void apply(SchemaContextModules param) throws Exception {
LOG.info("Sending new SchemaContext to Shards");
for (ShardInformation info : localShards.values()) {
- if(info.getActor() == null) {
+ if (info.getActor() == null) {
info.setActor(getContext().actorOf(Shard.props(info.getShardId(),
- info.getPeerAddresses(), datastoreContext, schemaContext),
+ info.getPeerAddresses(), datastoreContext, schemaContext),
info.getShardId().toString()));
} else {
info.getActor().tell(message, getSelf());
// First see if the there is a local replica for the shard
final ShardInformation info = localShards.get(shardName);
if (info != null) {
- sendResponse(info, new Supplier<Object>() {
+ sendResponse(info, message.isWaitUntilInitialized(), new Supplier<Object>() {
@Override
public Object get() {
return new PrimaryFound(info.getActorPath().toString()).toSerializable();
return knownModules;
}
+ @VisibleForTesting
+ DataPersistenceProvider getDataPersistenceProvider() {
+ return dataPersistenceProvider;
+ }
+
private class ShardInformation {
private final ShardIdentifier shardId;
private final String shardName;
private ActorRef actor;
private ActorPath actorPath;
private final Map<ShardIdentifier, String> peerAddresses;
- private boolean shardInitialized = false; // flag that determines if the actor is ready for business
+
+ // flag that determines if the actor is ready for business
+ private boolean actorInitialized = false;
+
+ private final List<Runnable> runnablesOnInitialized = Lists.newArrayList();
private ShardInformation(String shardName, ShardIdentifier shardId,
Map<ShardIdentifier, String> peerAddresses) {
}
boolean isShardInitialized() {
- return shardInitialized;
+ return getActor() != null && actorInitialized;
}
- void setShardInitialized(boolean shardInitialized) {
- this.shardInitialized = shardInitialized;
+ void setActorInitialized() {
+ this.actorInitialized = true;
+
+ for(Runnable runnable: runnablesOnInitialized) {
+ runnable.run();
+ }
+
+ runnablesOnInitialized.clear();
+ }
+
+ void addRunnableOnInitialized(Runnable runnable) {
+ runnablesOnInitialized.add(runnable);
}
}
static class SchemaContextModules implements Serializable {
private static final long serialVersionUID = 1L;
-
private final Set<String> modules;
SchemaContextModules(Set<String> modules){
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
private final DOMStoreReadTransaction transaction;
public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID) {
- super(shardActor, schemaContext, shardStats, transactionID);
+ SchemaContext schemaContext, ShardStats shardStats, String transactionID,
+ int txnClientVersion) {
+ super(shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
this.transaction = transaction;
}
@Override
public void handleReceive(Object message) throws Exception {
- if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction, ReadData.fromSerializable(message));
+ if(message instanceof ReadData) {
+ readData(transaction, (ReadData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof DataExists) {
+ dataExists(transaction, (DataExists) message, !SERIALIZED_REPLY);
+
+ } else if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ readData(transaction, ReadData.fromSerializable(message), SERIALIZED_REPLY);
+
} else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
- dataExists(transaction, DataExists.fromSerializable(message));
+ dataExists(transaction, DataExists.fromSerializable(message), SERIALIZED_REPLY);
+
} else {
super.handleReceive(message);
}
private final DOMStoreReadWriteTransaction transaction;
public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID) {
- super(transaction, shardActor, schemaContext, shardStats, transactionID);
+ SchemaContext schemaContext, ShardStats shardStats, String transactionID,
+ int txnClientVersion) {
+ super(transaction, shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
this.transaction = transaction;
}
@Override
public void handleReceive(Object message) throws Exception {
- if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction, ReadData.fromSerializable(message));
+ if (message instanceof ReadData) {
+ readData(transaction, (ReadData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof DataExists) {
+ dataExists(transaction, (DataExists) message, !SERIALIZED_REPLY);
+
+ } else if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ readData(transaction, ReadData.fromSerializable(message), SERIALIZED_REPLY);
+
} else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
- dataExists(transaction, DataExists.fromSerializable(message));
+ dataExists(transaction, DataExists.fromSerializable(message), SERIALIZED_REPLY);
+
} else {
super.handleReceive(message);
}
import akka.japi.Creator;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
* <li> {@link org.opendaylight.controller.cluster.datastore.messages.CloseTransaction}
* </p>
*/
-public abstract class ShardTransaction extends AbstractUntypedActor {
+public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
+
+ protected static final boolean SERIALIZED_REPLY = true;
private final ActorRef shardActor;
private final SchemaContext schemaContext;
private final ShardStats shardStats;
private final String transactionID;
+ private final int txnClientVersion;
protected ShardTransaction(ActorRef shardActor, SchemaContext schemaContext,
- ShardStats shardStats, String transactionID) {
+ ShardStats shardStats, String transactionID, int txnClientVersion) {
+ super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
this.shardActor = shardActor;
this.schemaContext = schemaContext;
this.shardStats = shardStats;
this.transactionID = transactionID;
+ this.txnClientVersion = txnClientVersion;
}
public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats,
- String transactionID) {
+ String transactionID, int txnClientVersion) {
return Props.create(new ShardTransactionCreator(transaction, shardActor, schemaContext,
- datastoreContext, shardStats, transactionID));
+ datastoreContext, shardStats, transactionID, txnClientVersion));
}
protected abstract DOMStoreTransaction getDOMStoreTransaction();
return schemaContext;
}
+ protected int getTxnClientVersion() {
+ return txnClientVersion;
+ }
+
@Override
public void handleReceive(Object message) throws Exception {
if (message.getClass().equals(CloseTransaction.SERIALIZABLE_CLASS)) {
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
- protected void readData(DOMStoreReadTransaction transaction,ReadData message) {
+ protected void readData(DOMStoreReadTransaction transaction, ReadData message, final boolean returnSerialized) {
final ActorRef sender = getSender();
final ActorRef self = getSelf();
final YangInstanceIdentifier path = message.getPath();
final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
transaction.read(path);
+
future.addListener(new Runnable() {
@Override
public void run() {
try {
Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
- if (optional.isPresent()) {
- sender.tell(new ReadDataReply(schemaContext,optional.get()).toSerializable(), self);
- } else {
- sender.tell(new ReadDataReply(schemaContext,null).toSerializable(), self);
- }
+ ReadDataReply readDataReply = new ReadDataReply(schemaContext, optional.orNull());
+
+ sender.tell((returnSerialized ? readDataReply.toSerializable():
+ readDataReply), self);
+
} catch (Exception e) {
shardStats.incrementFailedReadTransactionsCount();
sender.tell(new akka.actor.Status.Failure(e), self);
}, getContext().dispatcher());
}
- protected void dataExists(DOMStoreReadTransaction transaction, DataExists message) {
+ protected void dataExists(DOMStoreReadTransaction transaction, DataExists message,
+ final boolean returnSerialized) {
final YangInstanceIdentifier path = message.getPath();
try {
Boolean exists = transaction.exists(path).checkedGet();
- getSender().tell(new DataExistsReply(exists).toSerializable(), getSelf());
+ DataExistsReply dataExistsReply = new DataExistsReply(exists);
+ getSender().tell(returnSerialized ? dataExistsReply.toSerializable() :
+ dataExistsReply, getSelf());
} catch (ReadFailedException e) {
getSender().tell(new akka.actor.Status.Failure(e),getSelf());
}
final DatastoreContext datastoreContext;
final ShardStats shardStats;
final String transactionID;
+ final int txnClientVersion;
ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
SchemaContext schemaContext, DatastoreContext datastoreContext,
- ShardStats shardStats, String transactionID) {
+ ShardStats shardStats, String transactionID, int txnClientVersion) {
this.transaction = transaction;
this.shardActor = shardActor;
this.shardStats = shardStats;
this.schemaContext = schemaContext;
this.datastoreContext = datastoreContext;
this.transactionID = transactionID;
+ this.txnClientVersion = txnClientVersion;
}
@Override
ShardTransaction tx;
if(transaction instanceof DOMStoreReadWriteTransaction) {
tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID);
+ shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
} else if(transaction instanceof DOMStoreReadTransaction) {
tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- schemaContext, shardStats, transactionID);
+ schemaContext, shardStats, transactionID, txnClientVersion);
} else {
tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, schemaContext, shardStats, transactionID);
+ shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
return getContext().actorOf(
ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId()), transactionName);
+ createTransaction.getTransactionId(),
+ createTransaction.getVersion()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId()), transactionName);
+ createTransaction.getTransactionId(),
+ createTransaction.getVersion()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
schemaContext, datastoreContext, shardStats,
- createTransaction.getTransactionId()), transactionName);
+ createTransaction.getTransactionId(),
+ createTransaction.getVersion()), transactionName);
} else {
throw new IllegalArgumentException (
"CreateTransaction message has unidentified transaction type=" +
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
+import akka.actor.PoisonPill;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats, String transactionID) {
- super(shardActor, schemaContext, shardStats, transactionID);
+ SchemaContext schemaContext, ShardStats shardStats, String transactionID,
+ int txnClientVersion) {
+ super(shardActor, schemaContext, shardStats, transactionID, txnClientVersion);
this.transaction = transaction;
}
@Override
public void handleReceive(Object message) throws Exception {
- if(WriteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- writeData(transaction, WriteData.fromSerializable(message, getSchemaContext()));
+
+ if (message instanceof WriteData) {
+ writeData(transaction, (WriteData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof MergeData) {
+ mergeData(transaction, (MergeData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof DeleteData) {
+ deleteData(transaction, (DeleteData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof ReadyTransaction) {
+ readyTransaction(transaction, new ReadyTransaction(), !SERIALIZED_REPLY);
+
+ } else if(WriteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ writeData(transaction, WriteData.fromSerializable(message, getSchemaContext()), SERIALIZED_REPLY);
+
} else if(MergeData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- mergeData(transaction, MergeData.fromSerializable(message, getSchemaContext()));
+ mergeData(transaction, MergeData.fromSerializable(message, getSchemaContext()), SERIALIZED_REPLY);
+
} else if(DeleteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- deleteData(transaction, DeleteData.fromSerializable(message));
+ deleteData(transaction, DeleteData.fromSerializable(message), SERIALIZED_REPLY);
+
} else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, new ReadyTransaction());
+ readyTransaction(transaction, new ReadyTransaction(), SERIALIZED_REPLY);
+
} else if (message instanceof GetCompositedModification) {
// This is here for testing only
getSender().tell(new GetCompositeModificationReply(
}
}
- private void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
+ private void writeData(DOMStoreWriteTransaction transaction, WriteData message,
+ boolean returnSerialized) {
+ LOG.debug("writeData at path : {}", message.getPath());
+
modification.addModification(
new WriteModification(message.getPath(), message.getData(), getSchemaContext()));
- if(LOG.isDebugEnabled()) {
- LOG.debug("writeData at path : " + message.getPath().toString());
- }
try {
transaction.write(message.getPath(), message.getData());
- getSender().tell(new WriteDataReply().toSerializable(), getSelf());
+ WriteDataReply writeDataReply = new WriteDataReply();
+ getSender().tell(returnSerialized ? writeDataReply.toSerializable() : writeDataReply,
+ getSelf());
}catch(Exception e){
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
}
- private void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
+ private void mergeData(DOMStoreWriteTransaction transaction, MergeData message,
+ boolean returnSerialized) {
+ LOG.debug("mergeData at path : {}", message.getPath());
+
modification.addModification(
new MergeModification(message.getPath(), message.getData(), getSchemaContext()));
- if(LOG.isDebugEnabled()) {
- LOG.debug("mergeData at path : " + message.getPath().toString());
- }
+
try {
transaction.merge(message.getPath(), message.getData());
- getSender().tell(new MergeDataReply().toSerializable(), getSelf());
+ MergeDataReply mergeDataReply = new MergeDataReply();
+ getSender().tell(returnSerialized ? mergeDataReply.toSerializable() : mergeDataReply ,
+ getSelf());
}catch(Exception e){
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
}
- private void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("deleteData at path : " + message.getPath().toString());
- }
+ private void deleteData(DOMStoreWriteTransaction transaction, DeleteData message,
+ boolean returnSerialized) {
+ LOG.debug("deleteData at path : {}", message.getPath());
+
modification.addModification(new DeleteModification(message.getPath()));
try {
transaction.delete(message.getPath());
- getSender().tell(new DeleteDataReply().toSerializable(), getSelf());
+ DeleteDataReply deleteDataReply = new DeleteDataReply();
+ getSender().tell(returnSerialized ? deleteDataReply.toSerializable() : deleteDataReply,
+ getSelf());
}catch(Exception e){
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
}
- private void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message) {
+ private void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message,
+ boolean returnSerialized) {
+ String transactionID = getTransactionID();
+
+ LOG.debug("readyTransaction : {}", transactionID);
+
DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
- getShardActor().forward(new ForwardedReadyTransaction(getTransactionID(), cohort, modification),
- getContext());
+ getShardActor().forward(new ForwardedReadyTransaction(transactionID, getTxnClientVersion(),
+ cohort, modification, returnSerialized), getContext());
+
+ // The shard will handle the commit from here so we're no longer needed - self-destruct.
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
}
// These classes are in here for test purposes only
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Preconditions;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainClosedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import scala.concurrent.Await;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-
-import java.util.Collections;
-import java.util.List;
+import scala.concurrent.Promise;
/**
* TransactionChainProxy acts as a proxy for a DOMStoreTransactionChain created on a remote shard
*/
-public class TransactionChainProxy implements DOMStoreTransactionChain{
+public class TransactionChainProxy implements DOMStoreTransactionChain {
+
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
+
+ private interface State {
+ boolean isReady();
+
+ List<Future<ActorSelection>> getPreviousReadyFutures();
+ }
+
+ private static class Allocated implements State {
+ private final ChainedTransactionProxy transaction;
+
+ Allocated(ChainedTransactionProxy transaction) {
+ this.transaction = transaction;
+ }
+
+ @Override
+ public boolean isReady() {
+ return transaction.isReady();
+ }
+
+ @Override
+ public List<Future<ActorSelection>> getPreviousReadyFutures() {
+ return transaction.getReadyFutures();
+ }
+ }
+
+ private static abstract class AbstractDefaultState implements State {
+ @Override
+ public List<Future<ActorSelection>> getPreviousReadyFutures() {
+ return Collections.emptyList();
+ }
+ }
+
+ private static final State IDLE_STATE = new AbstractDefaultState() {
+ @Override
+ public boolean isReady() {
+ return true;
+ }
+ };
+
+ private static final State CLOSED_STATE = new AbstractDefaultState() {
+ @Override
+ public boolean isReady() {
+ throw new TransactionChainClosedException("Transaction chain has been closed");
+ }
+ };
+
+ private static final AtomicInteger counter = new AtomicInteger(0);
+
private final ActorContext actorContext;
private final String transactionChainId;
- private volatile List<Future<ActorSelection>> cohortFutures = Collections.emptyList();
+ private volatile State currentState = IDLE_STATE;
public TransactionChainProxy(ActorContext actorContext) {
this.actorContext = actorContext;
- transactionChainId = actorContext.getCurrentMemberName() + "-" + System.currentTimeMillis();
+ transactionChainId = actorContext.getCurrentMemberName() + "-txn-chain-" + counter.incrementAndGet();
+ }
+
+ public String getTransactionChainId() {
+ return transactionChainId;
}
@Override
public DOMStoreReadTransaction newReadOnlyTransaction() {
- return new TransactionProxy(actorContext,
- TransactionProxy.TransactionType.READ_ONLY, this);
+ State localState = currentState;
+ checkReadyState(localState);
+
+ return new ChainedTransactionProxy(actorContext, TransactionProxy.TransactionType.READ_ONLY,
+ transactionChainId, localState.getPreviousReadyFutures());
}
@Override
public DOMStoreReadWriteTransaction newReadWriteTransaction() {
- return new TransactionProxy(actorContext,
- TransactionProxy.TransactionType.READ_WRITE, this);
+ return allocateWriteTransaction(TransactionProxy.TransactionType.READ_WRITE);
}
@Override
public DOMStoreWriteTransaction newWriteOnlyTransaction() {
- return new TransactionProxy(actorContext,
- TransactionProxy.TransactionType.WRITE_ONLY, this);
+ return allocateWriteTransaction(TransactionProxy.TransactionType.WRITE_ONLY);
}
@Override
public void close() {
+ currentState = CLOSED_STATE;
+
// Send a close transaction chain request to each and every shard
actorContext.broadcast(new CloseTransactionChain(transactionChainId));
}
- public String getTransactionChainId() {
- return transactionChainId;
+ private ChainedTransactionProxy allocateWriteTransaction(TransactionProxy.TransactionType type) {
+ State localState = currentState;
+
+ checkReadyState(localState);
+
+ // Pass the ready Futures from the previous Tx.
+ ChainedTransactionProxy txProxy = new ChainedTransactionProxy(actorContext, type,
+ transactionChainId, localState.getPreviousReadyFutures());
+
+ currentState = new Allocated(txProxy);
+
+ return txProxy;
}
- public void onTransactionReady(List<Future<ActorSelection>> cohortFutures){
- this.cohortFutures = cohortFutures;
+ private void checkReadyState(State state) {
+ Preconditions.checkState(state.isReady(), "Previous transaction is not ready yet");
}
- public void waitTillCurrentTransactionReady(){
- try {
- Await.result(Futures
- .sequence(this.cohortFutures, actorContext.getActorSystem().dispatcher()),
- actorContext.getOperationDuration());
- } catch (Exception e) {
- throw new IllegalStateException("Failed when waiting for transaction on a chain to become ready", e);
+ private static class ChainedTransactionProxy extends TransactionProxy {
+
+ /**
+ * Stores the ready Futures from the previous Tx in the chain.
+ */
+ private final List<Future<ActorSelection>> previousReadyFutures;
+
+ /**
+ * Stores the ready Futures from this transaction when it is readied.
+ */
+ private volatile List<Future<ActorSelection>> readyFutures;
+
+ private ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
+ String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
+ super(actorContext, transactionType, transactionChainId);
+ this.previousReadyFutures = previousReadyFutures;
+ }
+
+ List<Future<ActorSelection>> getReadyFutures() {
+ return readyFutures;
+ }
+
+ boolean isReady() {
+ return readyFutures != null;
+ }
+
+ @Override
+ protected void onTransactionReady(List<Future<ActorSelection>> readyFutures) {
+ LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
+ readyFutures.size(), getTransactionChainId());
+ this.readyFutures = readyFutures;
+ }
+
+ /**
+ * This method is overridden to ensure the previous Tx's ready operations complete
+ * before we create the next shard Tx in the chain to avoid creation failures if the
+ * previous Tx's ready operations haven't completed yet.
+ */
+ @Override
+ protected Future<Object> sendCreateTransaction(final ActorSelection shard,
+ final Object serializedCreateMessage) {
+
+ // Check if there are any previous ready Futures, otherwise let the super class handle it.
+ if(previousReadyFutures.isEmpty()) {
+ return super.sendCreateTransaction(shard, serializedCreateMessage);
+ }
+
+ // Combine the ready Futures into 1.
+ Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
+ previousReadyFutures, getActorContext().getActorSystem().dispatcher());
+
+ // Add a callback for completion of the combined Futures.
+ final Promise<Object> createTxPromise = akka.dispatch.Futures.promise();
+ OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
+ @Override
+ public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
+ if(failure != null) {
+ // A Ready Future failed so fail the returned Promise.
+ createTxPromise.failure(failure);
+ } else {
+ LOG.debug("Previous Tx readied - sending CreateTransaction for {} on chain {}",
+ getIdentifier(), getTransactionChainId());
+
+ // Send the CreateTx message and use the resulting Future to complete the
+ // returned Promise.
+ createTxPromise.completeWith(getActorContext().executeOperationAsync(shard,
+ serializedCreateMessage));
+ }
+ }
+ };
+
+ combinedFutures.onComplete(onComplete, getActorContext().getActorSystem().dispatcher());
+
+ return createTxPromise.future();
}
}
}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorSelection;
+import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
-
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.FinalizablePhantomReference;
import com.google.common.base.FinalizableReferenceQueue;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.SettableFuture;
-
-import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-import scala.Function1;
import scala.concurrent.Future;
-import scala.runtime.AbstractFunction1;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
+import scala.concurrent.Promise;
+import scala.concurrent.duration.FiniteDuration;
/**
* TransactionProxy acts as a proxy for one or more transactions that were created on a remote shard
*/
public class TransactionProxy implements DOMStoreReadWriteTransaction {
- private final TransactionChainProxy transactionChainProxy;
-
-
-
- public enum TransactionType {
+ public static enum TransactionType {
READ_ONLY,
WRITE_ONLY,
READ_WRITE
}
- static Function1<Throwable, Throwable> SAME_FAILURE_TRANSFORMER = new AbstractFunction1<
- Throwable, Throwable>() {
+ static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER =
+ new Mapper<Throwable, Throwable>() {
@Override
public Throwable apply(Throwable failure) {
return failure;
private static final AtomicLong counter = new AtomicLong();
- private static final Logger
- LOG = LoggerFactory.getLogger(TransactionProxy.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
+ /**
+ * Time interval in between transaction create retries.
+ */
+ private static final FiniteDuration CREATE_TX_TRY_INTERVAL =
+ FiniteDuration.create(1, TimeUnit.SECONDS);
/**
* Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
private List<ActorSelection> remoteTransactionActors;
private AtomicBoolean remoteTransactionActorsMB;
- private final Map<String, TransactionContext> remoteTransactionPaths = new HashMap<>();
+ /**
+ * Stores the create transaction results per shard.
+ */
+ private final Map<String, TransactionFutureCallback> txFutureCallbackMap = new HashMap<>();
private final TransactionType transactionType;
private final ActorContext actorContext;
private final TransactionIdentifier identifier;
+ private final String transactionChainId;
private final SchemaContext schemaContext;
private boolean inReadyState;
public TransactionProxy(ActorContext actorContext, TransactionType transactionType) {
- this(actorContext, transactionType, null);
- }
-
- @VisibleForTesting
- List<Future<Object>> getRecordedOperationFutures() {
- List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
- }
-
- return recordedOperationFutures;
+ this(actorContext, transactionType, "");
}
- public TransactionProxy(ActorContext actorContext, TransactionType transactionType, TransactionChainProxy transactionChainProxy) {
+ public TransactionProxy(ActorContext actorContext, TransactionType transactionType,
+ String transactionChainId) {
this.actorContext = Preconditions.checkNotNull(actorContext,
"actorContext should not be null");
this.transactionType = Preconditions.checkNotNull(transactionType,
"transactionType should not be null");
this.schemaContext = Preconditions.checkNotNull(actorContext.getSchemaContext(),
"schemaContext should not be null");
- this.transactionChainProxy = transactionChainProxy;
+ this.transactionChainId = transactionChainId;
String memberName = actorContext.getCurrentMemberName();
if(memberName == null){
new TransactionProxyCleanupPhantomReference(this);
phantomReferenceCache.put(cleanup, cleanup);
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("Created txn {} of type {}", identifier, transactionType);
+
+ LOG.debug("Created txn {} of type {} on chain {}", identifier, transactionType, transactionChainId);
+ }
+
+ @VisibleForTesting
+ List<Future<Object>> getRecordedOperationFutures() {
+ List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+ }
+ }
+
+ return recordedOperationFutures;
+ }
+
+ @VisibleForTesting
+ boolean hasTransactionContext() {
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ return true;
+ }
}
+
+ return false;
}
@Override
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} read {}", identifier, path);
+ LOG.debug("Tx {} read {}", identifier, path);
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future;
+ if(transactionContext != null) {
+ future = transactionContext.readData(path);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture = SettableFuture.create();
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ Futures.addCallback(transactionContext.readData(path),
+ new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
+ @Override
+ public void onSuccess(Optional<NormalizedNode<?, ?>> data) {
+ proxyFuture.set(data);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ proxyFuture.setException(t);
+ }
+ });
+ }
+ });
+
+ future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
- createTransactionIfMissing(actorContext, path);
- return transactionContext(path).readData(path);
+ return future;
}
@Override
- public CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path) {
+ public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Exists operation on write-only transaction is not allowed");
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} exists {}", identifier, path);
+ LOG.debug("Tx {} exists {}", identifier, path);
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+
+ CheckedFuture<Boolean, ReadFailedException> future;
+ if(transactionContext != null) {
+ future = transactionContext.dataExists(path);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ final SettableFuture<Boolean> proxyFuture = SettableFuture.create();
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ Futures.addCallback(transactionContext.dataExists(path),
+ new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean exists) {
+ proxyFuture.set(exists);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ proxyFuture.setException(t);
+ }
+ });
+ }
+ });
+
+ future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
- createTransactionIfMissing(actorContext, path);
- return transactionContext(path).dataExists(path);
+ return future;
}
private void checkModificationState() {
}
@Override
- public void write(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkModificationState();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} write {}", identifier, path);
- }
- createTransactionIfMissing(actorContext, path);
+ LOG.debug("Tx {} write {}", identifier, path);
- transactionContext(path).writeData(path, data);
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.writeData(path, data);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.writeData(path, data);
+ }
+ });
+ }
}
@Override
- public void merge(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkModificationState();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} merge {}", identifier, path);
- }
- createTransactionIfMissing(actorContext, path);
+ LOG.debug("Tx {} merge {}", identifier, path);
- transactionContext(path).mergeData(path, data);
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.mergeData(path, data);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.mergeData(path, data);
+ }
+ });
+ }
}
@Override
- public void delete(YangInstanceIdentifier path) {
+ public void delete(final YangInstanceIdentifier path) {
checkModificationState();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} delete {}", identifier, path);
- }
- createTransactionIfMissing(actorContext, path);
- transactionContext(path).deleteData(path);
+ LOG.debug("Tx {} delete {}", identifier, path);
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.deleteData(path);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.deleteData(path);
+ }
+ });
+ }
}
@Override
inReadyState = true;
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
- remoteTransactionPaths.size());
- }
+ LOG.debug("Tx {} Readying {} transactions for commit", identifier,
+ txFutureCallbackMap.size());
+
List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+
+ LOG.debug("Tx {} Readying transaction for shard {} chain {}", identifier,
+ txFutureCallback.getShardName(), transactionChainId);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Readying transaction for shard {}", identifier,
- transactionContext.getShardName());
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ cohortFutures.add(transactionContext.readyTransaction());
+ } else {
+ // The shard Tx hasn't been created yet so create a promise to ready the Tx later
+ // after it's created.
+ final Promise<ActorSelection> cohortPromise = akka.dispatch.Futures.promise();
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ cohortPromise.completeWith(transactionContext.readyTransaction());
+ }
+ });
+
+ cohortFutures.add(cohortPromise.future());
}
- cohortFutures.add(transactionContext.readyTransaction());
}
- if(transactionChainProxy != null){
- transactionChainProxy.onTransactionReady(cohortFutures);
- }
+ onTransactionReady(cohortFutures);
return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures,
identifier.toString());
}
+ /**
+ * Method for derived classes to be notified when the transaction has been readied.
+ *
+ * @param cohortFutures the cohort Futures for each shard transaction.
+ */
+ protected void onTransactionReady(List<Future<ActorSelection>> cohortFutures) {
+ }
+
+ /**
+ * Method called to send a CreateTransaction message to a shard.
+ *
+ * @param shard the shard actor to send to
+ * @param serializedCreateMessage the serialized message to send
+ * @return the response Future
+ */
+ protected Future<Object> sendCreateTransaction(ActorSelection shard,
+ Object serializedCreateMessage) {
+ return actorContext.executeOperationAsync(shard, serializedCreateMessage);
+ }
+
@Override
public Object getIdentifier() {
return this.identifier;
@Override
public void close() {
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- transactionContext.closeTransaction();
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.closeTransaction();
+ } else {
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.closeTransaction();
+ }
+ });
+ }
}
- remoteTransactionPaths.clear();
+ txFutureCallbackMap.clear();
if(transactionType == TransactionType.READ_ONLY) {
remoteTransactionActors.clear();
}
}
- private TransactionContext transactionContext(YangInstanceIdentifier path){
+ private String shardNameFromIdentifier(YangInstanceIdentifier path){
+ return ShardStrategyFactory.getStrategy(path).findShard(path);
+ }
+
+ private TransactionFutureCallback getOrCreateTxFutureCallback(YangInstanceIdentifier path) {
String shardName = shardNameFromIdentifier(path);
- return remoteTransactionPaths.get(shardName);
+ TransactionFutureCallback txFutureCallback = txFutureCallbackMap.get(shardName);
+ if(txFutureCallback == null) {
+ Future<ActorSelection> findPrimaryFuture = actorContext.findPrimaryShardAsync(shardName);
+
+ final TransactionFutureCallback newTxFutureCallback =
+ new TransactionFutureCallback(shardName);
+
+ txFutureCallback = newTxFutureCallback;
+ txFutureCallbackMap.put(shardName, txFutureCallback);
+
+ findPrimaryFuture.onComplete(new OnComplete<ActorSelection>() {
+ @Override
+ public void onComplete(Throwable failure, ActorSelection primaryShard) {
+ if(failure != null) {
+ newTxFutureCallback.onComplete(failure, null);
+ } else {
+ newTxFutureCallback.setPrimaryShard(primaryShard);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ }
+
+ return txFutureCallback;
}
- private String shardNameFromIdentifier(YangInstanceIdentifier path){
- return ShardStrategyFactory.getStrategy(path).findShard(path);
+ public String getTransactionChainId() {
+ return transactionChainId;
+ }
+
+ protected ActorContext getActorContext() {
+ return actorContext;
+ }
+
+ /**
+ * Interface for a transaction operation to be invoked later.
+ */
+ private static interface TransactionOperation {
+ void invoke(TransactionContext transactionContext);
}
- private void createTransactionIfMissing(ActorContext actorContext,
- YangInstanceIdentifier path) {
+ /**
+ * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
+ * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
+ * retry task after a short delay.
+ * <p>
+ * The end result from a completed CreateTransaction message is a TransactionContext that is
+ * used to perform transaction operations. Transaction operations that occur before the
+ * CreateTransaction completes are cache and executed once the CreateTransaction completes,
+ * successfully or not.
+ */
+ private class TransactionFutureCallback extends OnComplete<Object> {
+
+ /**
+ * The list of transaction operations to execute once the CreateTransaction completes.
+ */
+ @GuardedBy("txOperationsOnComplete")
+ private final List<TransactionOperation> txOperationsOnComplete = Lists.newArrayList();
+
+ /**
+ * The TransactionContext resulting from the CreateTransaction reply.
+ */
+ private volatile TransactionContext transactionContext;
+
+ /**
+ * The target primary shard.
+ */
+ private volatile ActorSelection primaryShard;
+
+ private volatile int createTxTries = (int) (actorContext.getDatastoreContext().
+ getShardLeaderElectionTimeout().duration().toMillis() /
+ CREATE_TX_TRY_INTERVAL.toMillis());
+
+ private final String shardName;
+
+ TransactionFutureCallback(String shardName) {
+ this.shardName = shardName;
+ }
+
+ String getShardName() {
+ return shardName;
+ }
- if(transactionChainProxy != null){
- transactionChainProxy.waitTillCurrentTransactionReady();
+ TransactionContext getTransactionContext() {
+ return transactionContext;
}
- String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
- TransactionContext transactionContext =
- remoteTransactionPaths.get(shardName);
+ /**
+ * Sets the target primary shard and initiates a CreateTransaction try.
+ */
+ void setPrimaryShard(ActorSelection primaryShard) {
+ LOG.debug("Tx {} Primary shard found - trying create transaction", identifier);
- if (transactionContext != null) {
- // A transaction already exists with that shard
- return;
+ this.primaryShard = primaryShard;
+ tryCreateTransaction();
}
- try {
- Optional<ActorSelection> primaryShard = actorContext.findPrimaryShard(shardName);
- if (!primaryShard.isPresent()) {
- throw new PrimaryNotFoundException("Primary could not be found for shard " + shardName);
+ /**
+ * Adds a TransactionOperation to be executed after the CreateTransaction completes.
+ */
+ void addTxOperationOnComplete(TransactionOperation operation) {
+ synchronized(txOperationsOnComplete) {
+ if(transactionContext == null) {
+ LOG.debug("Tx {} Adding operation on complete {}", identifier);
+
+ txOperationsOnComplete.add(operation);
+ } else {
+ operation.invoke(transactionContext);
+ }
}
+ }
- Object response = actorContext.executeOperation(primaryShard.get(),
- new CreateTransaction(identifier.toString(), this.transactionType.ordinal(),
+ /**
+ * Performs a CreateTransaction try async.
+ */
+ private void tryCreateTransaction() {
+ Future<Object> createTxFuture = sendCreateTransaction(primaryShard,
+ new CreateTransaction(identifier.toString(),
+ TransactionProxy.this.transactionType.ordinal(),
getTransactionChainId()).toSerializable());
- if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
- CreateTransactionReply reply =
- CreateTransactionReply.fromSerializable(response);
- String transactionPath = reply.getTransactionPath();
+ createTxFuture.onComplete(this, actorContext.getActorSystem().dispatcher());
+ }
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
+ @Override
+ public void onComplete(Throwable failure, Object response) {
+ if(failure instanceof NoShardLeaderException) {
+ // There's no leader for the shard yet - schedule and try again, unless we're out
+ // of retries. Note: createTxTries is volatile as it may be written by different
+ // threads however not concurrently, therefore decrementing it non-atomically here
+ // is ok.
+ if(--createTxTries > 0) {
+ LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
+ identifier, shardName);
+
+ actorContext.getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
+ new Runnable() {
+ @Override
+ public void run() {
+ tryCreateTransaction();
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ return;
}
- ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
+ }
- if (transactionType == TransactionType.READ_ONLY) {
- // Add the actor to the remoteTransactionActors list for access by the
- // cleanup PhantonReference.
- remoteTransactionActors.add(transactionActor);
+ // Create the TransactionContext from the response or failure and execute delayed
+ // TransactionOperations. This entire section is done atomically (ie synchronized) with
+ // respect to #addTxOperationOnComplete to handle timing issues and ensure no
+ // TransactionOperation is missed and that they are processed in the order they occurred.
+ synchronized(txOperationsOnComplete) {
+ // Store the new TransactionContext locally until we've completed invoking the
+ // TransactionOperations. This avoids thread timing issues which could cause
+ // out-of-order TransactionOperations. Eg, on a modification operation, if the
+ // TransactionContext is non-null, then we directly call the TransactionContext.
+ // However, at the same time, the code may be executing the cached
+ // TransactionOperations. So to avoid thus timing, we don't publish the
+ // TransactionContext until after we've executed all cached TransactionOperations.
+ TransactionContext localTransactionContext;
+ if(failure != null) {
+ LOG.debug("Tx {} Creating NoOpTransaction because of error: {}", identifier,
+ failure.getMessage());
+
+ localTransactionContext = new NoOpTransactionContext(failure, identifier);
+ } else if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
+ localTransactionContext = createValidTransactionContext(
+ CreateTransactionReply.fromSerializable(response));
+ } else {
+ IllegalArgumentException exception = new IllegalArgumentException(String.format(
+ "Invalid reply type %s for CreateTransaction", response.getClass()));
+
+ localTransactionContext = new NoOpTransactionContext(exception, identifier);
+ }
- // Write to the memory barrier volatile to publish the above update to the
- // remoteTransactionActors list for thread visibility.
- remoteTransactionActorsMB.set(true);
+ for(TransactionOperation oper: txOperationsOnComplete) {
+ oper.invoke(localTransactionContext);
}
- transactionContext = new TransactionContextImpl(shardName, transactionPath,
- transactionActor, identifier, actorContext, schemaContext);
+ txOperationsOnComplete.clear();
- remoteTransactionPaths.put(shardName, transactionContext);
- } else {
- throw new IllegalArgumentException(String.format(
- "Invalid reply type {} for CreateTransaction", response.getClass()));
- }
- } catch (Exception e) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+ // We're done invoking the TransactionOperations so we can now publish the
+ // TransactionContext.
+ transactionContext = localTransactionContext;
}
- remoteTransactionPaths
- .put(shardName, new NoOpTransactionContext(shardName, e, identifier));
}
- }
- public String getTransactionChainId() {
- if(transactionChainProxy == null){
- return "";
+ private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
+ String transactionPath = reply.getTransactionPath();
+
+ LOG.debug("Tx {} Received transaction actor path {}", identifier, transactionPath);
+
+ ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
+
+ if (transactionType == TransactionType.READ_ONLY) {
+ // Add the actor to the remoteTransactionActors list for access by the
+ // cleanup PhantonReference.
+ remoteTransactionActors.add(transactionActor);
+
+ // Write to the memory barrier volatile to publish the above update to the
+ // remoteTransactionActors list for thread visibility.
+ remoteTransactionActorsMB.set(true);
+ }
+
+ // TxActor is always created where the leader of the shard is.
+ // Check if TxActor is created in the same node
+ boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
+
+ return new TransactionContextImpl(transactionPath, transactionActor, identifier,
+ actorContext, schemaContext, isTxActorLocal, reply.getVersion());
}
- return transactionChainProxy.getTransactionChainId();
}
-
private interface TransactionContext {
- String getShardName();
-
void closeTransaction();
Future<ActorSelection> readyTransaction();
private static abstract class AbstractTransactionContext implements TransactionContext {
protected final TransactionIdentifier identifier;
- protected final String shardName;
protected final List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- AbstractTransactionContext(String shardName, TransactionIdentifier identifier) {
- this.shardName = shardName;
+ AbstractTransactionContext(TransactionIdentifier identifier) {
this.identifier = identifier;
}
- @Override
- public String getShardName() {
- return shardName;
- }
-
@Override
public List<Future<Object>> getRecordedOperationFutures() {
return recordedOperationFutures;
private final ActorContext actorContext;
private final SchemaContext schemaContext;
- private final String actorPath;
+ private final String transactionPath;
private final ActorSelection actor;
-
- private TransactionContextImpl(String shardName, String actorPath,
- ActorSelection actor, TransactionIdentifier identifier, ActorContext actorContext,
- SchemaContext schemaContext) {
- super(shardName, identifier);
- this.actorPath = actorPath;
+ private final boolean isTxActorLocal;
+ private final int remoteTransactionVersion;
+
+ private TransactionContextImpl(String transactionPath, ActorSelection actor, TransactionIdentifier identifier,
+ ActorContext actorContext, SchemaContext schemaContext,
+ boolean isTxActorLocal, int remoteTransactionVersion) {
+ super(identifier);
+ this.transactionPath = transactionPath;
this.actor = actor;
this.actorContext = actorContext;
this.schemaContext = schemaContext;
+ this.isTxActorLocal = isTxActorLocal;
+ this.remoteTransactionVersion = remoteTransactionVersion;
}
private ActorSelection getActor() {
@Override
public void closeTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} closeTransaction called", identifier);
- }
+ LOG.debug("Tx {} closeTransaction called", identifier);
+
actorContext.sendOperationAsync(getActor(), new CloseTransaction().toSerializable());
}
@Override
public Future<ActorSelection> readyTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
+ LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
identifier, recordedOperationFutures.size());
- }
+
// Send the ReadyTransaction message to the Tx actor.
+ ReadyTransaction readyTransaction = new ReadyTransaction();
final Future<Object> replyFuture = actorContext.executeOperationAsync(getActor(),
- new ReadyTransaction().toSerializable());
+ isTxActorLocal ? readyTransaction : readyTransaction.toSerializable());
// Combine all the previously recorded put/merge/delete operation reply Futures and the
// ReadyTransactionReply Future into one Future. If any one fails then the combined
// Transform the combined Future into a Future that returns the cohort actor path from
// the ReadyTransactionReply. That's the end result of the ready operation.
- return combinedFutures.transform(new AbstractFunction1<Iterable<Object>, ActorSelection>() {
+ return combinedFutures.transform(new Mapper<Iterable<Object>, ActorSelection>() {
@Override
- public ActorSelection apply(Iterable<Object> notUsed) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
+ public ActorSelection checkedApply(Iterable<Object> notUsed) {
+ LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
identifier);
- }
+
// At this point all the Futures succeeded and we need to extract the cohort
// actor path from the ReadyTransactionReply. For the recorded operations, they
// don't return any data so we're only interested that they completed
// Note the Future get call here won't block as it's complete.
Object serializedReadyReply = replyFuture.value().get().get();
- if(serializedReadyReply.getClass().equals(
- ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(
- serializedReadyReply);
+ if (serializedReadyReply instanceof ReadyTransactionReply) {
+ return actorContext.actorSelection(((ReadyTransactionReply)serializedReadyReply).getCohortPath());
+
+ } else if(serializedReadyReply.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
+ ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
+ String cohortPath = reply.getCohortPath();
+
+ // In Helium we used to return the local path of the actor which represented
+ // a remote ThreePhaseCommitCohort. The local path would then be converted to
+ // a remote path using this resolvePath method. To maintain compatibility with
+ // a Helium node we need to continue to do this conversion.
+ // At some point in the future when upgrades from Helium are not supported
+ // we could remove this code to resolvePath and just use the cohortPath as the
+ // resolved cohortPath
+ if(TransactionContextImpl.this.remoteTransactionVersion < CreateTransaction.HELIUM_1_VERSION) {
+ cohortPath = actorContext.resolvePath(transactionPath, cohortPath);
+ }
+
+ return actorContext.actorSelection(cohortPath);
- return actorContext.actorSelection(reply.getCohortPath());
} else {
// Throwing an exception here will fail the Future.
-
throw new IllegalArgumentException(String.format("Invalid reply type {}",
serializedReadyReply.getClass()));
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+
+ DeleteData deleteData = new DeleteData(path);
recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
- new DeleteData(path).toSerializable()));
+ isTxActorLocal ? deleteData : deleteData.toSerializable()));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+
+ MergeData mergeData = new MergeData(path, data, schemaContext);
recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
- new MergeData(path, data, schemaContext).toSerializable()));
+ isTxActorLocal ? mergeData : mergeData.toSerializable()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
+
+ WriteData writeData = new WriteData(path, data, schemaContext);
recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
- new WriteData(path, data, schemaContext).toSerializable()));
+ isTxActorLocal ? writeData : writeData.toSerializable()));
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
final YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
+
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishReadData(path, returnFuture);
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData: verifying {} previous recorded operations",
+ LOG.debug("Tx {} readData: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
- }
+
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
Lists.newArrayList(recordedOperationFutures),
actorContext.getActorSystem().dispatcher());
+
OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData: a recorded operation failed: {}",
+ LOG.debug("Tx {} readData: a recorded operation failed: {}",
identifier, failure);
- }
returnFuture.setException(new ReadFailedException(
"The read could not be performed because a previous put, merge,"
+ "or delete operation failed", failure));
private void finishReadData(final YangInstanceIdentifier path,
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
+
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object readResponse) throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} read operation failed: {}", identifier, failure);
- }
+ LOG.debug("Tx {} read operation failed: {}", identifier, failure);
returnFuture.setException(new ReadFailedException(
"Error reading data for path " + path, failure));
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} read operation succeeded", identifier, failure);
- }
- if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,
- path, readResponse);
- if (reply.getNormalizedNode() == null) {
- returnFuture.set(Optional.<NormalizedNode<?, ?>>absent());
- } else {
- returnFuture.set(Optional.<NormalizedNode<?, ?>>of(
- reply.getNormalizedNode()));
- }
+ LOG.debug("Tx {} read operation succeeded", identifier, failure);
+
+ if (readResponse instanceof ReadDataReply) {
+ ReadDataReply reply = (ReadDataReply) readResponse;
+ returnFuture.set(Optional.<NormalizedNode<?, ?>>fromNullable(reply.getNormalizedNode()));
+
+ } else if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+ ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext, path, readResponse);
+ returnFuture.set(Optional.<NormalizedNode<?, ?>>fromNullable(reply.getNormalizedNode()));
+
} else {
returnFuture.setException(new ReadFailedException(
- "Invalid response reading data for path " + path));
+ "Invalid response reading data for path " + path));
}
}
}
};
+ ReadData readData = new ReadData(path);
Future<Object> readFuture = actorContext.executeOperationAsync(getActor(),
- new ReadData(path).toSerializable());
+ isTxActorLocal ? readData : readData.toSerializable());
+
readFuture.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
}
public CheckedFuture<Boolean, ReadFailedException> dataExists(
final YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+
final SettableFuture<Boolean> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishDataExists(path, returnFuture);
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
+ LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
- }
+
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
+ LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
identifier, failure);
- }
returnFuture.setException(new ReadFailedException(
"The data exists could not be performed because a previous "
+ "put, merge, or delete operation failed", failure));
private void finishDataExists(final YangInstanceIdentifier path,
final SettableFuture<Boolean> returnFuture) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
+
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object response) throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
- }
+ LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
returnFuture.setException(new ReadFailedException(
"Error checking data exists for path " + path, failure));
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
- }
- if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
- returnFuture.set(Boolean.valueOf(DataExistsReply.
- fromSerializable(response).exists()));
+ LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+
+ if (response instanceof DataExistsReply) {
+ returnFuture.set(Boolean.valueOf(((DataExistsReply) response).exists()));
+
+ } else if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
+ returnFuture.set(Boolean.valueOf(DataExistsReply.fromSerializable(response).exists()));
+
} else {
returnFuture.setException(new ReadFailedException(
"Invalid response checking exists for path " + path));
}
};
+ DataExists dataExists = new DataExists(path);
Future<Object> future = actorContext.executeOperationAsync(getActor(),
- new DataExists(path).toSerializable());
+ isTxActorLocal ? dataExists : dataExists.toSerializable());
+
future.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
}
}
private final Logger LOG = LoggerFactory.getLogger(NoOpTransactionContext.class);
- private final Exception failure;
+ private final Throwable failure;
- public NoOpTransactionContext(String shardName, Exception failure,
- TransactionIdentifier identifier){
- super(shardName, identifier);
+ public NoOpTransactionContext(Throwable failure, TransactionIdentifier identifier){
+ super(identifier);
this.failure = failure;
}
@Override
public void closeTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
- }
+ LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
}
@Override
public Future<ActorSelection> readyTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction called", identifier);
- }
+ LOG.debug("Tx {} readyTransaction called", identifier);
return akka.dispatch.Futures.failed(failure);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
- YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
- }
+ YangInstanceIdentifier path) {
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error reading data for path " + path, failure));
}
@Override
public CheckedFuture<Boolean, ReadFailedException> dataExists(
- YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
- }
+ YangInstanceIdentifier path) {
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error checking exists for path " + path, failure));
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.compat;
+
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.japi.Creator;
+
+/**
+ * An actor to maintain backwards compatibility for the base Helium version where the 3-phase commit
+ * messages don't contain the transactionId. This actor just forwards a new message containing the
+ * transactionId to the parent Shard.
+ *
+ * @author Thomas Pantelis
+ */
+public class BackwardsCompatibleThreePhaseCommitCohort extends AbstractUntypedActor {
+
+ private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
+
+ private final String transactionId;
+
+ private BackwardsCompatibleThreePhaseCommitCohort(String transactionId) {
+ this.transactionId = transactionId;
+ }
+
+ @Override
+ public void handleReceive(Object message) throws Exception {
+ if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
+ LOG.debug("BackwardsCompatibleThreePhaseCommitCohort CanCommitTransaction");
+
+ getContext().parent().forward(new CanCommitTransaction(transactionId).toSerializable(),
+ getContext());
+ } else if(message.getClass().equals(PreCommitTransaction.SERIALIZABLE_CLASS)) {
+ LOG.debug("BackwardsCompatibleThreePhaseCommitCohort PreCommitTransaction");
+
+ // The Shard doesn't need the PreCommitTransaction message so just return the reply here.
+ getSender().tell(new PreCommitTransactionReply().toSerializable(), self());
+ } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
+ LOG.debug("BackwardsCompatibleThreePhaseCommitCohort CommitTransaction");
+
+ getContext().parent().forward(new CommitTransaction(transactionId).toSerializable(),
+ getContext());
+
+ // We're done now - we can self-destruct
+ self().tell(PoisonPill.getInstance(), self());
+ } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
+ LOG.debug("BackwardsCompatibleThreePhaseCommitCohort AbortTransaction");
+
+ getContext().parent().forward(new AbortTransaction(transactionId).toSerializable(),
+ getContext());
+ self().tell(PoisonPill.getInstance(), self());
+ }
+ }
+
+ public static Props props(String transactionId) {
+ return Props.create(new BackwardsCompatibleThreePhaseCommitCohortCreator(transactionId));
+ }
+
+ private static class BackwardsCompatibleThreePhaseCommitCohortCreator
+ implements Creator<BackwardsCompatibleThreePhaseCommitCohort> {
+ private static final long serialVersionUID = 1L;
+
+ private final String transactionId;
+
+ BackwardsCompatibleThreePhaseCommitCohortCreator(String transactionId) {
+ this.transactionId = transactionId;
+ }
+
+ @Override
+ public BackwardsCompatibleThreePhaseCommitCohort create() throws Exception {
+ return new BackwardsCompatibleThreePhaseCommitCohort(transactionId);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.typesafe.config.Config;
+
+public interface ConfigurationReader {
+ Config read();
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+import java.io.File;
+
+public class FileConfigurationReader implements ConfigurationReader{
+
+ public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+
+ @Override
+ public Config read() {
+ File defaultConfigFile = new File(AKKA_CONF_PATH);
+ Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
+ return ConfigFactory.parseFile(defaultConfigFile);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+public class ResourceConfigurationReader implements ConfigurationReader {
+ @Override
+ public Config read() {
+ return ConfigFactory.load();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+/**
+ * Exception thrown when attempting to find a local shard but it doesn't exist.
+ *
+ * @author Thomas Pantelis
+ */
+public class LocalShardNotFoundException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ public LocalShardNotFoundException(String message){
+ super(message);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+/**
+ * Exception indicating a shard has no current leader.
+ *
+ * @author Thomas Pantelis
+ */
+public class NoShardLeaderException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ public NoShardLeaderException(String message){
+ super(message);
+ }
+}
package org.opendaylight.controller.cluster.datastore.exceptions;
public class NotInitializedException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
public NotInitializedException(String message) {
super(message);
}
package org.opendaylight.controller.cluster.datastore.exceptions;
public class PrimaryNotFoundException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
public PrimaryNotFoundException(String message){
super(message);
}
package org.opendaylight.controller.cluster.datastore.exceptions;
public class TimeoutException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
public TimeoutException(String message, Exception e){
super(message, e);
}
package org.opendaylight.controller.cluster.datastore.exceptions;
public class UnknownMessageException extends Exception {
+ private static final long serialVersionUID = 1L;
private final Object message;
public UnknownMessageException(Object message) {
import java.io.Serializable;
public class ActorInitialized implements Serializable {
+ private static final long serialVersionUID = 1L;
}
import java.io.Serializable;
public class ActorNotInitialized implements Serializable {
+ private static final long serialVersionUID = 1L;
}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CanCommitTransactionReply implements SerializableMessage {
- public static Class<ThreePhaseCommitCohortMessages.CanCommitTransactionReply> SERIALIZABLE_CLASS =
+ public static final Class<ThreePhaseCommitCohortMessages.CanCommitTransactionReply> SERIALIZABLE_CLASS =
ThreePhaseCommitCohortMessages.CanCommitTransactionReply.class;
private final Boolean canCommit;
- public CanCommitTransactionReply(Boolean canCommit) {
+ public CanCommitTransactionReply(final Boolean canCommit) {
this.canCommit = canCommit;
}
return ThreePhaseCommitCohortMessages.CanCommitTransactionReply.newBuilder().setCanCommit(canCommit).build();
}
- public static CanCommitTransactionReply fromSerializable(Object message) {
+ public static CanCommitTransactionReply fromSerializable(final Object message) {
return new CanCommitTransactionReply(
((ThreePhaseCommitCohortMessages.CanCommitTransactionReply) message).getCanCommit());
}
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
public class CloseDataChangeListenerRegistration implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.CloseDataChangeListenerRegistration.class;
+ public static final Class<ListenerRegistrationMessages.CloseDataChangeListenerRegistration> SERIALIZABLE_CLASS =
+ ListenerRegistrationMessages.CloseDataChangeListenerRegistration.class;
@Override
public Object toSerializable() {
return ListenerRegistrationMessages.CloseDataChangeListenerRegistration.newBuilder().build();
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
public class CloseDataChangeListenerRegistrationReply implements SerializableMessage{
- public static Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.CloseDataChangeListenerRegistrationReply.class;
+ public static final Class<ListenerRegistrationMessages.CloseDataChangeListenerRegistrationReply> SERIALIZABLE_CLASS =
+ ListenerRegistrationMessages.CloseDataChangeListenerRegistrationReply.class;
@Override
public Object toSerializable() {
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class CloseTransaction implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CloseTransaction.class;
+ public static final Class<ShardTransactionMessages.CloseTransaction> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.CloseTransaction.class;
@Override
public Object toSerializable() {
return ShardTransactionMessages.CloseTransaction.newBuilder().build();
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
public class CloseTransactionChain implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS =
+ public static final Class<ShardTransactionChainMessages.CloseTransactionChain> SERIALIZABLE_CLASS =
ShardTransactionChainMessages.CloseTransactionChain.class;
private final String transactionChainId;
- public CloseTransactionChain(String transactionChainId){
+ public CloseTransactionChain(final String transactionChainId){
this.transactionChainId = transactionChainId;
}
.setTransactionChainId(transactionChainId).build();
}
- public static CloseTransactionChain fromSerializable(Object message){
+ public static CloseTransactionChain fromSerializable(final Object message){
ShardTransactionChainMessages.CloseTransactionChain closeTransactionChain
= (ShardTransactionChainMessages.CloseTransactionChain) message;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
public class CloseTransactionChainReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CloseTransactionChainReply.class;
+ public static final Class<ShardTransactionChainMessages.CloseTransactionChainReply> SERIALIZABLE_CLASS =
+ ShardTransactionChainMessages.CloseTransactionChainReply.class;
@Override
public Object toSerializable() {
return ShardTransactionChainMessages.CloseTransactionChainReply.newBuilder().build();
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class CloseTransactionReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CloseTransactionReply.class;
+ public static final Class<ShardTransactionMessages.CloseTransactionReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.CloseTransactionReply.class;
@Override
public Object toSerializable() {
return ShardTransactionMessages.CloseTransactionReply.newBuilder().build();
public class CreateTransaction implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
+ public static final Class<ShardTransactionMessages.CreateTransaction> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.CreateTransaction.class;
+
+ public static final int HELIUM_1_VERSION = 1;
+ public static final int CURRENT_VERSION = HELIUM_1_VERSION;
+
private final String transactionId;
private final int transactionType;
private final String transactionChainId;
+ private final int version;
public CreateTransaction(String transactionId, int transactionType) {
this(transactionId, transactionType, "");
}
public CreateTransaction(String transactionId, int transactionType, String transactionChainId) {
+ this(transactionId, transactionType, transactionChainId, CURRENT_VERSION);
+ }
+ private CreateTransaction(String transactionId, int transactionType, String transactionChainId,
+ int version) {
this.transactionId = transactionId;
this.transactionType = transactionType;
this.transactionChainId = transactionChainId;
-
+ this.version = version;
}
-
public String getTransactionId() {
return transactionId;
}
return transactionType;
}
+ public int getVersion() {
+ return version;
+ }
+
@Override
public Object toSerializable() {
return ShardTransactionMessages.CreateTransaction.newBuilder()
.setTransactionId(transactionId)
.setTransactionType(transactionType)
- .setTransactionChainId(transactionChainId).build();
+ .setTransactionChainId(transactionChainId)
+ .setMessageVersion(version).build();
}
public static CreateTransaction fromSerializable(Object message) {
ShardTransactionMessages.CreateTransaction createTransaction =
(ShardTransactionMessages.CreateTransaction) message;
return new CreateTransaction(createTransaction.getTransactionId(),
- createTransaction.getTransactionType(), createTransaction.getTransactionChainId());
+ createTransaction.getTransactionType(), createTransaction.getTransactionChainId(),
+ createTransaction.getMessageVersion());
}
public String getTransactionChainId() {
public class CreateTransactionReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransactionReply.class;
+ public static final Class<ShardTransactionMessages.CreateTransactionReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.CreateTransactionReply.class;
private final String transactionPath;
private final String transactionId;
+ private final int version;
- public CreateTransactionReply(String transactionPath,
- String transactionId) {
+ public CreateTransactionReply(final String transactionPath,
+ final String transactionId) {
+ this(transactionPath, transactionId, CreateTransaction.CURRENT_VERSION);
+ }
+
+ public CreateTransactionReply(final String transactionPath,
+ final String transactionId, final int version) {
this.transactionPath = transactionPath;
this.transactionId = transactionId;
+ this.version = version;
}
+
public String getTransactionPath() {
return transactionPath;
}
return transactionId;
}
+ public int getVersion() {
+ return version;
+ }
+
+ @Override
public Object toSerializable(){
return ShardTransactionMessages.CreateTransactionReply.newBuilder()
.setTransactionActorPath(transactionPath)
.setTransactionId(transactionId)
+ .setMessageVersion(version)
.build();
}
- public static CreateTransactionReply fromSerializable(Object serializable){
+ public static CreateTransactionReply fromSerializable(final Object serializable){
ShardTransactionMessages.CreateTransactionReply o = (ShardTransactionMessages.CreateTransactionReply) serializable;
- return new CreateTransactionReply(o.getTransactionActorPath(), o.getTransactionId());
+ return new CreateTransactionReply(o.getTransactionActorPath(), o.getTransactionId(), o.getMessageVersion());
}
}
import org.opendaylight.controller.protobuff.messages.datachange.notification.DataChangeListenerMessages;
public class DataChangedReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = DataChangeListenerMessages.DataChangedReply.class;
+ public static final Class<DataChangeListenerMessages.DataChangedReply> SERIALIZABLE_CLASS =
+ DataChangeListenerMessages.DataChangedReply.class;
@Override
public Object toSerializable() {
return DataChangeListenerMessages.DataChangedReply.newBuilder().build();
public class DataExists implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DataExists.class;
+ public static final Class<ShardTransactionMessages.DataExists> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.DataExists.class;
private final YangInstanceIdentifier path;
- public DataExists(YangInstanceIdentifier path) {
+ public DataExists(final YangInstanceIdentifier path) {
this.path = path;
}
InstanceIdentifierUtils.toSerializable(path)).build();
}
- public static DataExists fromSerializable(Object serializable){
+ public static DataExists fromSerializable(final Object serializable){
ShardTransactionMessages.DataExists o = (ShardTransactionMessages.DataExists) serializable;
return new DataExists(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class DataExistsReply implements SerializableMessage{
-
-
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DataExistsReply.class;
+ public static final Class<ShardTransactionMessages.DataExistsReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.DataExistsReply.class;
private final boolean exists;
- public DataExistsReply(boolean exists) {
+ public DataExistsReply(final boolean exists) {
this.exists = exists;
}
.setExists(exists).build();
}
- public static DataExistsReply fromSerializable(Object serializable){
+ public static DataExistsReply fromSerializable(final Object serializable){
ShardTransactionMessages.DataExistsReply o = (ShardTransactionMessages.DataExistsReply) serializable;
return new DataExistsReply(o.getExists());
}
public class DeleteData implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DeleteData.class;
+ public static final Class<ShardTransactionMessages.DeleteData> SERIALIZABLE_CLASS = ShardTransactionMessages.DeleteData.class;
private final YangInstanceIdentifier path;
- public DeleteData(YangInstanceIdentifier path) {
+ public DeleteData(final YangInstanceIdentifier path) {
this.path = path;
}
.setInstanceIdentifierPathArguments(InstanceIdentifierUtils.toSerializable(path)).build();
}
- public static DeleteData fromSerializable(Object serializable){
+ public static DeleteData fromSerializable(final Object serializable){
ShardTransactionMessages.DeleteData o = (ShardTransactionMessages.DeleteData) serializable;
return new DeleteData(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class DeleteDataReply implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DeleteDataReply.class;
+ public static final Class<ShardTransactionMessages.DeleteDataReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.DeleteDataReply.class;
@Override
public Object toSerializable() {
return ShardTransactionMessages.DeleteDataReply.newBuilder().build();
*/
public class FindLocalShard {
private final String shardName;
+ private final boolean waitUntilInitialized;
- public FindLocalShard(String shardName) {
+ public FindLocalShard(String shardName, boolean waitUntilInitialized) {
this.shardName = shardName;
+ this.waitUntilInitialized = waitUntilInitialized;
}
public String getShardName() {
return shardName;
}
+
+ public boolean isWaitUntilInitialized() {
+ return waitUntilInitialized;
+ }
}
*
*/
public class FindPrimary implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = FindPrimary.class;
+ public static final Class<FindPrimary> SERIALIZABLE_CLASS = FindPrimary.class;
+
private final String shardName;
+ private final boolean waitUntilInitialized;
- public FindPrimary(String shardName){
+ public FindPrimary(String shardName, boolean waitUntilInitialized){
Preconditions.checkNotNull(shardName, "shardName should not be null");
this.shardName = shardName;
+ this.waitUntilInitialized = waitUntilInitialized;
}
public String getShardName() {
return shardName;
}
- @Override
- public Object toSerializable() {
- return this;
- }
+ public boolean isWaitUntilInitialized() {
+ return waitUntilInitialized;
+ }
- public static FindPrimary fromSerializable(Object message){
- return (FindPrimary) message;
- }
+ @Override
+ public Object toSerializable() {
+ return this;
+ }
+
+ public static FindPrimary fromSerializable(Object message){
+ return (FindPrimary) message;
+ }
}
private final String transactionID;
private final DOMStoreThreePhaseCommitCohort cohort;
private final Modification modification;
+ private final boolean returnSerialized;
+ private final int txnClientVersion;
- public ForwardedReadyTransaction(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
- Modification modification) {
+ public ForwardedReadyTransaction(String transactionID, int txnClientVersion,
+ DOMStoreThreePhaseCommitCohort cohort, Modification modification,
+ boolean returnSerialized) {
this.transactionID = transactionID;
this.cohort = cohort;
this.modification = modification;
+ this.returnSerialized = returnSerialized;
+ this.txnClientVersion = txnClientVersion;
}
public String getTransactionID() {
public Modification getModification() {
return modification;
}
+
+ public boolean isReturnSerialized() {
+ return returnSerialized;
+ }
+
+ public int getTxnClientVersion() {
+ return txnClientVersion;
+ }
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class MergeDataReply implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.MergeDataReply.class;
+ public static final Class<ShardTransactionMessages.MergeDataReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.MergeDataReply.class;
@Override
public Object toSerializable() {
public class PreCommitTransaction implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransaction.class;
+ public static final Class<ThreePhaseCommitCohortMessages.PreCommitTransaction> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.PreCommitTransaction.class;
@Override
public Object toSerializable() {
public class PreCommitTransactionReply implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransactionReply.class;
+ public static final Class<ThreePhaseCommitCohortMessages.PreCommitTransactionReply> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.PreCommitTransactionReply.class;
@Override
public Object toSerializable() {
public class PrimaryFound implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = PrimaryFound.class;
+ public static final Class<PrimaryFound> SERIALIZABLE_CLASS = PrimaryFound.class;
private final String primaryPath;
- public PrimaryFound(String primaryPath) {
+ public PrimaryFound(final String primaryPath) {
this.primaryPath = primaryPath;
}
}
@Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
+ public boolean equals(final Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
PrimaryFound that = (PrimaryFound) o;
- if (!primaryPath.equals(that.primaryPath)) return false;
+ if (!primaryPath.equals(that.primaryPath)) {
+ return false;
+ }
return true;
}
return this;
}
- public static PrimaryFound fromSerializable(Object message){
+ public static PrimaryFound fromSerializable(final Object message){
return (PrimaryFound) message;
}
}
import com.google.common.base.Preconditions;
public class PrimaryNotFound implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = PrimaryNotFound.class;
+ public static final Class<PrimaryNotFound> SERIALIZABLE_CLASS = PrimaryNotFound.class;
private final String shardName;
- public PrimaryNotFound(String shardName){
+ public PrimaryNotFound(final String shardName){
Preconditions.checkNotNull(shardName, "shardName should not be null");
}
@Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
+ public boolean equals(final Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
PrimaryNotFound that = (PrimaryNotFound) o;
- if (shardName != null ? !shardName.equals(that.shardName) : that.shardName != null) return false;
+ if (shardName != null ? !shardName.equals(that.shardName) : that.shardName != null) {
+ return false;
+ }
return true;
}
return this;
}
- public static PrimaryNotFound fromSerializable(Object message){
+ public static PrimaryNotFound fromSerializable(final Object message){
return (PrimaryNotFound) message;
}
}
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class ReadData {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.ReadData.class;
+ public static final Class<ShardTransactionMessages.ReadData> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.ReadData.class;
private final YangInstanceIdentifier path;
- public ReadData(YangInstanceIdentifier path) {
+ public ReadData(final YangInstanceIdentifier path) {
this.path = path;
}
.build();
}
- public static ReadData fromSerializable(Object serializable){
+ public static ReadData fromSerializable(final Object serializable){
ShardTransactionMessages.ReadData o = (ShardTransactionMessages.ReadData) serializable;
return new ReadData(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
}
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class ReadyTransaction implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.ReadyTransaction.class;
+ public static final Class<ShardTransactionMessages.ReadyTransaction> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.ReadyTransaction.class;
@Override
public Object toSerializable() {
private final String cohortPath;
public ReadyTransactionReply(String cohortPath) {
-
this.cohortPath = cohortPath;
}
@Override
public ShardTransactionMessages.ReadyTransactionReply toSerializable() {
- return ShardTransactionMessages.ReadyTransactionReply.newBuilder().
- setActorPath(cohortPath).build();
+ return ShardTransactionMessages.ReadyTransactionReply.newBuilder()
+ .setActorPath(cohortPath)
+ .build();
}
public static ReadyTransactionReply fromSerializable(Object serializable) {
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
public class RegisterChangeListenerReply implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.RegisterChangeListenerReply.class;
+ public static final Class<ListenerRegistrationMessages.RegisterChangeListenerReply> SERIALIZABLE_CLASS =
+ ListenerRegistrationMessages.RegisterChangeListenerReply.class;
private final ActorPath listenerRegistrationPath;
- public RegisterChangeListenerReply(ActorPath listenerRegistrationPath) {
+ public RegisterChangeListenerReply(final ActorPath listenerRegistrationPath) {
this.listenerRegistrationPath = listenerRegistrationPath;
}
.setListenerRegistrationPath(listenerRegistrationPath.toString()).build();
}
- public static RegisterChangeListenerReply fromSerializable(ActorSystem actorSystem,Object serializable){
+ public static RegisterChangeListenerReply fromSerializable(final ActorSystem actorSystem,final Object serializable){
ListenerRegistrationMessages.RegisterChangeListenerReply o = (ListenerRegistrationMessages.RegisterChangeListenerReply) serializable;
return new RegisterChangeListenerReply(
actorSystem.actorFor(o.getListenerRegistrationPath()).path()
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class WriteDataReply implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.WriteDataReply.class;
+ public static final Class<ShardTransactionMessages.WriteDataReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.WriteDataReply.class;
@Override
public Object toSerializable() {
return ShardTransactionMessages.WriteDataReply.newBuilder().build();
* MergeModification stores all the parameters required to merge data into the specified path
*/
public class MergeModification extends WriteModification {
+ private static final long serialVersionUID = 1L;
- public MergeModification(YangInstanceIdentifier path, NormalizedNode data,
- SchemaContext schemaContext) {
+ public MergeModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ final SchemaContext schemaContext) {
super(path, data, schemaContext);
}
@Override
- public void apply(DOMStoreWriteTransaction transaction) {
+ public void apply(final DOMStoreWriteTransaction transaction) {
transaction.merge(path, data);
}
- public static MergeModification fromSerializable(Object serializable, SchemaContext schemaContext) {
+ public static MergeModification fromSerializable(final Object serializable, final SchemaContext schemaContext) {
PersistentMessages.Modification o = (PersistentMessages.Modification) serializable;
Decoded decoded = new NormalizedNodeToNodeCodec(schemaContext).decode(o.getPath(), o.getData());
return new MergeModification(decoded.getDecodedPath(), decoded.getDecodedNode(), schemaContext);
* WriteModification stores all the parameters required to write data to the specified path
*/
public class WriteModification extends AbstractModification {
-
- protected final NormalizedNode data;
+ private static final long serialVersionUID = 1L;
+ protected final NormalizedNode<?, ?> data;
private final SchemaContext schemaContext;
- public WriteModification(YangInstanceIdentifier path, NormalizedNode data, SchemaContext schemaContext) {
+ public WriteModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final SchemaContext schemaContext) {
super(path);
this.data = data;
this.schemaContext = schemaContext;
}
@Override
- public void apply(DOMStoreWriteTransaction transaction) {
+ public void apply(final DOMStoreWriteTransaction transaction) {
transaction.write(path, data);
}
- public NormalizedNode getData() {
+ public NormalizedNode<?, ?> getData() {
return data;
}
.build();
}
- public static WriteModification fromSerializable(Object serializable, SchemaContext schemaContext) {
+ public static WriteModification fromSerializable(final Object serializable, final SchemaContext schemaContext) {
PersistentMessages.Modification o = (PersistentMessages.Modification) serializable;
Decoded decoded = new NormalizedNodeToNodeCodec(schemaContext).decode(o.getPath(), o.getData());
return new WriteModification(decoded.getDecodedPath(), decoded.getDecodedNode(), schemaContext);
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.datastore.Configuration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.cluster.datastore.Configuration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class ShardStrategyFactory {
private static Map<String, ShardStrategy> moduleNameToStrategyMap =
- new ConcurrentHashMap();
+ new ConcurrentHashMap<>();
private static final String UNKNOWN_MODULE_NAME = "unknown";
private static Configuration configuration;
- public static void setConfiguration(Configuration configuration){
+ public static void setConfiguration(final Configuration configuration){
ShardStrategyFactory.configuration = configuration;
moduleNameToStrategyMap = configuration.getModuleNameToShardStrategyMap();
}
- public static ShardStrategy getStrategy(YangInstanceIdentifier path) {
+ public static ShardStrategy getStrategy(final YangInstanceIdentifier path) {
Preconditions.checkState(configuration != null, "configuration should not be missing");
Preconditions.checkNotNull(path, "path should not be null");
}
- private static String getModuleName(YangInstanceIdentifier path) {
+ private static String getModuleName(final YangInstanceIdentifier path) {
String namespace = path.getPathArguments().iterator().next().getNodeType().getNamespace().toASCIIString();
Optional<String> optional =
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
+import akka.actor.Address;
import akka.actor.PoisonPill;
+import akka.dispatch.Mapper;
+import akka.pattern.AskTimeoutException;
import akka.util.Timeout;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.Configuration;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
+import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
+import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
-import java.util.concurrent.TimeUnit;
-
import static akka.pattern.Patterns.ask;
/**
private static final Logger
LOG = LoggerFactory.getLogger(ActorContext.class);
- private static final FiniteDuration DEFAULT_OPER_DURATION = Duration.create(5, TimeUnit.SECONDS);
-
public static final String MAILBOX = "bounded-mailbox";
+ private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
+ new Mapper<Throwable, Throwable>() {
+ @Override
+ public Throwable apply(Throwable failure) {
+ Throwable actualFailure = failure;
+ if(failure instanceof AskTimeoutException) {
+ // A timeout exception most likely means the shard isn't initialized.
+ actualFailure = new NotInitializedException(
+ "Timed out trying to find the primary shard. Most likely cause is the " +
+ "shard is not initialized yet.");
+ }
+
+ return actualFailure;
+ }
+ };
+
private final ActorSystem actorSystem;
private final ActorRef shardManager;
private final ClusterWrapper clusterWrapper;
private final Configuration configuration;
+ private final DatastoreContext datastoreContext;
private volatile SchemaContext schemaContext;
- private FiniteDuration operationDuration = DEFAULT_OPER_DURATION;
- private Timeout operationTimeout = new Timeout(operationDuration);
+ private final FiniteDuration operationDuration;
+ private final Timeout operationTimeout;
+ private final String selfAddressHostPort;
public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
- ClusterWrapper clusterWrapper,
- Configuration configuration) {
+ ClusterWrapper clusterWrapper, Configuration configuration) {
+ this(actorSystem, shardManager, clusterWrapper, configuration,
+ DatastoreContext.newBuilder().build());
+ }
+
+ public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
+ ClusterWrapper clusterWrapper, Configuration configuration,
+ DatastoreContext datastoreContext) {
this.actorSystem = actorSystem;
this.shardManager = shardManager;
this.clusterWrapper = clusterWrapper;
this.configuration = configuration;
+ this.datastoreContext = datastoreContext;
+
+ operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(),
+ TimeUnit.SECONDS);
+ operationTimeout = new Timeout(operationDuration);
+
+ Address selfAddress = clusterWrapper.getSelfAddress();
+ if (selfAddress != null && !selfAddress.host().isEmpty()) {
+ selfAddressHostPort = selfAddress.host().get() + ":" + selfAddress.port().get();
+ } else {
+ selfAddressHostPort = null;
+ }
+ }
+
+ public DatastoreContext getDatastoreContext() {
+ return datastoreContext;
}
public ActorSystem getActorSystem() {
}
}
- public void setOperationTimeout(int timeoutInSeconds) {
- operationDuration = Duration.create(timeoutInSeconds, TimeUnit.SECONDS);
- operationTimeout = new Timeout(operationDuration);
- }
-
public SchemaContext getSchemaContext() {
return schemaContext;
}
return Optional.of(actorSystem.actorSelection(path));
}
+ public Future<ActorSelection> findPrimaryShardAsync(final String shardName) {
+ Future<Object> future = executeOperationAsync(shardManager,
+ new FindPrimary(shardName, true).toSerializable(),
+ datastoreContext.getShardInitializationTimeout());
+
+ return future.transform(new Mapper<Object, ActorSelection>() {
+ @Override
+ public ActorSelection checkedApply(Object response) throws Exception {
+ if(response.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
+ PrimaryFound found = PrimaryFound.fromSerializable(response);
+
+ LOG.debug("Primary found {}", found.getPrimaryPath());
+ return actorSystem.actorSelection(found.getPrimaryPath());
+ } else if(response instanceof ActorNotInitialized) {
+ throw new NotInitializedException(
+ String.format("Found primary shard %s but it's not initialized yet. " +
+ "Please try again later", shardName));
+ } else if(response instanceof PrimaryNotFound) {
+ throw new PrimaryNotFoundException(
+ String.format("No primary shard found for %S.", shardName));
+ }
+
+ throw new UnknownMessageException(String.format(
+ "FindPrimary returned unkown response: %s", response));
+ }
+ }, FIND_PRIMARY_FAILURE_TRANSFORMER, getActorSystem().dispatcher());
+ }
+
/**
- * Finds a local shard given it's shard name and return it's ActorRef
+ * Finds a local shard given its shard name and return it's ActorRef
*
* @param shardName the name of the local shard that needs to be found
* @return a reference to a local shard actor which represents the shard
* specified by the shardName
*/
public Optional<ActorRef> findLocalShard(String shardName) {
- Object result = executeOperation(shardManager, new FindLocalShard(shardName));
+ Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
if (result instanceof LocalShardFound) {
LocalShardFound found = (LocalShardFound) result;
return Optional.absent();
}
+ /**
+ * Finds a local shard async given its shard name and return a Future from which to obtain the
+ * ActorRef.
+ *
+ * @param shardName the name of the local shard that needs to be found
+ */
+ public Future<ActorRef> findLocalShardAsync( final String shardName) {
+ Future<Object> future = executeOperationAsync(shardManager,
+ new FindLocalShard(shardName, true), datastoreContext.getShardInitializationTimeout());
+
+ return future.map(new Mapper<Object, ActorRef>() {
+ @Override
+ public ActorRef checkedApply(Object response) throws Throwable {
+ if(response instanceof LocalShardFound) {
+ LocalShardFound found = (LocalShardFound)response;
+ LOG.debug("Local shard found {}", found.getPath());
+ return found.getPath();
+ } else if(response instanceof ActorNotInitialized) {
+ throw new NotInitializedException(
+ String.format("Found local shard for %s but it's not initialized yet.",
+ shardName));
+ } else if(response instanceof LocalShardNotFound) {
+ throw new LocalShardNotFoundException(
+ String.format("Local shard for %s does not exist.", shardName));
+ }
+
+ throw new UnknownMessageException(String.format(
+ "FindLocalShard returned unkown response: %s", response));
+ }
+ }, getActorSystem().dispatcher());
+ }
private String findPrimaryPathOrNull(String shardName) {
- Object result = executeOperation(shardManager, new FindPrimary(shardName).toSerializable());
+ Object result = executeOperation(shardManager, new FindPrimary(shardName, false).toSerializable());
if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
PrimaryFound found = PrimaryFound.fromSerializable(result);
*
* @param actor the ActorSelection
* @param message the message to send
+ * @param timeout the operation timeout
* @return a Future containing the eventual result
*/
- public Future<Object> executeOperationAsync(ActorSelection actor, Object message) {
+ public Future<Object> executeOperationAsync(ActorSelection actor, Object message,
+ Timeout timeout) {
Preconditions.checkArgument(actor != null, "actor must not be null");
Preconditions.checkArgument(message != null, "message must not be null");
LOG.debug("Sending message {} to {}", message.getClass().toString(), actor.toString());
- return ask(actor, message, operationTimeout);
+ return ask(actor, message, timeout);
+ }
+
+ /**
+ * Execute an operation on a remote actor asynchronously.
+ *
+ * @param actor the ActorSelection
+ * @param message the message to send
+ * @return a Future containing the eventual result
+ */
+ public Future<Object> executeOperationAsync(ActorSelection actor, Object message) {
+ return executeOperationAsync(actor, message, operationTimeout);
}
/**
actorSystem.shutdown();
}
+ public ClusterWrapper getClusterWrapper() {
+ return clusterWrapper;
+ }
+
public String getCurrentMemberName(){
return clusterWrapper.getCurrentMemberName();
}
public FiniteDuration getOperationDuration() {
return operationDuration;
}
+
+ public boolean isPathLocal(String path) {
+ if (Strings.isNullOrEmpty(path)) {
+ return false;
+ }
+
+ int pathAtIndex = path.indexOf("@");
+ if (pathAtIndex == -1) {
+ //if the path is of local format, then its local and is co-located
+ return true;
+
+ } else if (selfAddressHostPort != null) {
+ // self-address and tx actor path, both are of remote path format
+ int slashIndex = path.indexOf("/", pathAtIndex);
+
+ if (slashIndex == -1) {
+ return false;
+ }
+
+ String hostPort = path.substring(pathAtIndex + 1, slashIndex);
+ return hostPort.equals(selfAddressHostPort);
+
+ } else {
+ // self address is local format and tx actor path is remote format
+ return false;
+ }
+ }
+
+ /**
+ * @deprecated This method is present only to support backward compatibility with Helium and should not be
+ * used any further
+ *
+ *
+ * @param primaryPath
+ * @param localPathOfRemoteActor
+ * @return
+ */
+ @Deprecated
+ public String resolvePath(final String primaryPath,
+ final String localPathOfRemoteActor) {
+ StringBuilder builder = new StringBuilder();
+ String[] primaryPathElements = primaryPath.split("/");
+ builder.append(primaryPathElements[0]).append("//")
+ .append(primaryPathElements[1]).append(primaryPathElements[2]);
+ String[] remotePathElements = localPathOfRemoteActor.split("/");
+ for (int i = 3; i < remotePathElements.length; i++) {
+ builder.append("/").append(remotePathElements[i]);
+ }
+
+ return builder.toString();
+ }
}
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
+ .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
+ .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
props.getShardTransactionCommitQueueCapacity().getValue().intValue())
+ .persistent(props.getPersistent().booleanValue())
+ .shardIsolatedLeaderCheckIntervalInMillis(
+ props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.build();
return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
getValue().intValue())
.shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
.shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
+ .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
+ .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
.shardTransactionCommitTimeoutInSeconds(
props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
.shardTransactionCommitQueueCapacity(
props.getShardTransactionCommitQueueCapacity().getValue().intValue())
+ .persistent(props.getPersistent().booleanValue())
+ .shardIsolatedLeaderCheckIntervalInMillis(
+ props.getShardIsolatedLeaderCheckIntervalInMillis().getValue())
.build();
return DistributedDataStoreFactory.createInstance("operational",
description "The maximum allowed capacity for each shard's transaction commit queue.";
}
+ leaf shard-initialization-timeout-in-seconds {
+ default 300; // 5 minutes
+ type non-zero-uint32-type;
+ description "The maximum amount of time to wait for a shard to initialize from persistence
+ on startup before failing an operation (eg transaction create and change
+ listener registration).";
+ }
+
+ leaf shard-leader-election-timeout-in-seconds {
+ default 30;
+ type non-zero-uint32-type;
+ description "The maximum amount of time to wait for a shard to elect a leader before failing
+ an operation (eg transaction create).";
+ }
+
leaf enable-metric-capture {
default false;
type boolean;
type non-zero-uint32-type;
description "Max queue size that an actor's mailbox can reach";
}
+
+ leaf persistent {
+ default true;
+ type boolean;
+ description "Enable or disable data persistence";
+ }
+
+ leaf shard-isolated-leader-check-interval-in-millis {
+ default 5000;
+ type heartbeat-interval-type;
+ description "The interval at which the leader of the shard will check if its majority
+ followers are active and term itself as isolated";
+ }
}
// Augments the 'configuration' choice node under modules/module.
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import com.typesafe.config.ConfigFactory;
-import junit.framework.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
import java.io.File;
import java.util.List;
import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
public class ConfigurationImplTest {
import akka.actor.ActorRef;
import akka.actor.Props;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
public class DataChangeListenerProxyTest extends AbstractActorTest {
private static class MockDataChangedEvent implements AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- Map<YangInstanceIdentifier,NormalizedNode<?,?>> createdData = new HashMap();
- Map<YangInstanceIdentifier,NormalizedNode<?,?>> updatedData = new HashMap();
- Map<YangInstanceIdentifier,NormalizedNode<?,?>> originalData = new HashMap();
+ Map<YangInstanceIdentifier,NormalizedNode<?,?>> createdData = new HashMap<>();
+ Map<YangInstanceIdentifier,NormalizedNode<?,?>> updatedData = new HashMap<>();
+ Map<YangInstanceIdentifier,NormalizedNode<?,?>> originalData = new HashMap<>();
Assert.assertTrue(messages instanceof List);
- List<Object> listMessages = (List<Object>) messages;
+ List<?> listMessages = (List<?>) messages;
Assert.assertEquals(1, listMessages.size());
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.cluster.datastore;
+import java.util.concurrent.TimeUnit;
import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
import akka.actor.Props;
-import junit.framework.Assert;
+import akka.actor.Terminated;
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.Futures;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import org.junit.Assert;
import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
-import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
-import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
+import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.eq;
-import java.util.List;
+/**
+ * Unit tests for DataChangeListenerRegistrationProxy.
+ *
+ * @author Thomas Pantelis
+ */
+public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest {
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.assertNotNull;
-import static junit.framework.TestCase.assertTrue;
+ @SuppressWarnings("unchecked")
+ private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> mockListener =
+ Mockito.mock(AsyncDataChangeListener.class);
-public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{
+ @Test
+ public void testGetInstance() throws Exception {
+ DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard", Mockito.mock(ActorContext.class), mockListener);
+
+ Assert.assertEquals(mockListener, proxy.getInstance());
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test(timeout=10000)
+ public void testSuccessfulRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard-1", actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final DataChangeScope scope = AsyncDataBroker.DataChangeScope.ONE;
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init(path, scope);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardFound(getRef()));
+
+ RegisterChangeListener registerMsg = expectMsgClass(timeout, RegisterChangeListener.class);
+ Assert.assertEquals("getPath", path, registerMsg.getPath());
+ Assert.assertEquals("getScope", scope, registerMsg.getScope());
+
+ reply(new RegisterChangeListenerReply(getRef().path()));
+
+ for(int i = 0; (i < 20 * 5) && proxy.getListenerRegistrationActor() == null; i++) {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.assertEquals("getListenerRegistrationActor", getSystem().actorSelection(getRef().path()),
+ proxy.getListenerRegistrationActor());
+
+ watch(proxy.getDataChangeListenerActor());
- private ActorRef dataChangeListenerActor = getSystem().actorOf(Props.create(DoNothingActor.class));
+ proxy.close();
- private static class MockDataChangeListener implements
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> {
+ // The listener registration actor should get a Close message
+ expectMsgClass(timeout, CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS);
- @Override public void onDataChanged(
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- throw new UnsupportedOperationException("onDataChanged");
- }
+ // The DataChangeListener actor should be terminated
+ expectMsgClass(timeout, Terminated.class);
+
+ proxy.close();
+
+ expectNoMsg();
+ }};
}
- @Test
- public void testGetInstance() throws Exception {
- final Props props = Props.create(MessageCollectorActor.class);
- final ActorRef actorRef = getSystem().actorOf(props);
+ @Test(timeout=10000)
+ public void testLocalShardNotFound() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
- MockDataChangeListener listener =
- new MockDataChangeListener();
- DataChangeListenerRegistrationProxy proxy =
- new DataChangeListenerRegistrationProxy(
- getSystem().actorSelection(actorRef.path()),
- listener, dataChangeListenerActor);
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard-1", actorContext, mockListener);
- Assert.assertEquals(listener, proxy.getInstance());
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final DataChangeScope scope = AsyncDataBroker.DataChangeScope.ONE;
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init(path, scope);
+ }
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardNotFound("shard-1"));
+
+ expectNoMsg(duration("1 seconds"));
+ }};
}
- @Test
- public void testClose() throws Exception {
- final Props props = Props.create(MessageCollectorActor.class);
- final ActorRef actorRef = getSystem().actorOf(props);
+ @Test(timeout=10000)
+ public void testLocalShardNotInitialized() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
- DataChangeListenerRegistrationProxy proxy =
- new DataChangeListenerRegistrationProxy(
- getSystem().actorSelection(actorRef.path()),
- new MockDataChangeListener(), dataChangeListenerActor);
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard-1", actorContext, mockListener);
- proxy.close();
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final DataChangeScope scope = AsyncDataBroker.DataChangeScope.ONE;
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init(path, scope);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new ActorNotInitialized());
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+ expectNoMsg();
+ }
+ };
+ }};
+ }
+
+ @Test
+ public void testFailedRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorSystem mockActorSystem = mock(ActorSystem.class);
- //Check if it was received by the remote actor
- ActorContext
- testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration());
- Object messages = testContext
- .executeOperation(actorRef, "messages");
+ ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class),
+ "testFailedRegistration");
+ doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
+ ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(
+ MoreExecutors.sameThreadExecutor());
+ doReturn(executor).when(mockActorSystem).dispatcher();
- assertNotNull(messages);
+ ActorContext actorContext = mock(ActorContext.class);
- assertTrue(messages instanceof List);
+ String shardName = "shard-1";
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ shardName, actorContext, mockListener);
- List<Object> listMessages = (List<Object>) messages;
+ doReturn(mockActorSystem).when(actorContext).getActorSystem();
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+ doReturn(Futures.failed(new RuntimeException("mock"))).
+ when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
- assertEquals(1, listMessages.size());
+ proxy.init(YangInstanceIdentifier.of(TestModel.TEST_QNAME),
+ AsyncDataBroker.DataChangeScope.ONE);
- assertTrue(listMessages.get(0).getClass()
- .equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS));
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
}
+ @SuppressWarnings("unchecked")
@Test
- public void testCloseWhenRegistrationIsNull() throws Exception {
- final Props props = Props.create(MessageCollectorActor.class);
- final ActorRef actorRef = getSystem().actorOf(props);
+ public void testCloseBeforeRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = mock(ActorContext.class);
- DataChangeListenerRegistrationProxy proxy =
- new DataChangeListenerRegistrationProxy(
- new MockDataChangeListener(), dataChangeListenerActor);
+ String shardName = "shard-1";
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ shardName, actorContext, mockListener);
- proxy.close();
+ doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(getSystem().actorSelection(getRef().path())).
+ when(actorContext).actorSelection(getRef().path());
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
- //Check if it was received by the remote actor
- ActorContext
- testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration());
- Object messages = testContext
- .executeOperation(actorRef, "messages");
+ Answer<Future<Object>> answer = new Answer<Future<Object>>() {
+ @Override
+ public Future<Object> answer(InvocationOnMock invocation) {
+ proxy.close();
+ return Futures.successful((Object)new RegisterChangeListenerReply(getRef().path()));
+ }
+ };
- assertNotNull(messages);
+ doAnswer(answer).when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
- assertTrue(messages instanceof List);
+ proxy.init(YangInstanceIdentifier.of(TestModel.TEST_QNAME),
+ AsyncDataBroker.DataChangeScope.ONE);
- List<Object> listMessages = (List<Object>) messages;
+ expectMsgClass(duration("5 seconds"), CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS);
- assertEquals(0, listMessages.size());
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
}
}
import akka.actor.DeadLetter;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
+import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
final Props props = DataChangeListener.props(mockListener);
final ActorRef subject = getSystem().actorOf(props, "testDataChangedWithNoSender");
- // Let the DataChangeListener know that notifications should be enabled
- subject.tell(new EnableNotification(true), ActorRef.noSender());
+ getSystem().eventStream().subscribe(getRef(), DeadLetter.class);
subject.tell(new DataChanged(CompositeModel.createTestContext(), mockChangeEvent),
ActorRef.noSender());
- getSystem().eventStream().subscribe(getRef(), DeadLetter.class);
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
- expectNoMsg();
+ // Make sure no DataChangedReply is sent to DeadLetters.
+ while(true) {
+ DeadLetter deadLetter;
+ try {
+ deadLetter = expectMsgClass(duration("1 seconds"), DeadLetter.class);
+ } catch (AssertionError e) {
+ // Timed out - got no DeadLetter - this is good
+ break;
}
- };
+
+ // We may get DeadLetters for other messages we don't care about.
+ Assert.assertFalse("Unexpected DataChangedReply",
+ deadLetter.message() instanceof DataChangedReply);
+ }
}};
}
}
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Uninterruptibles;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
+import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainClosedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import java.util.concurrent.TimeUnit;
public class DistributedDataStoreIntegrationTest extends AbstractActorTest {
+ private final DatastoreContext.Builder datastoreContextBuilder =
+ DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100);
+
@Test
public void testWriteTransactionWithSingleShard() throws Exception{
- System.setProperty("shard.persistent", "true");
new IntegrationTestKit(getSystem()) {{
DistributedDataStore dataStore =
setupDistributedDataStore("transactionIntegrationTest", "test-1");
@Test
public void testWriteTransactionWithMultipleShards() throws Exception{
- System.setProperty("shard.persistent", "true");
new IntegrationTestKit(getSystem()) {{
DistributedDataStore dataStore =
setupDistributedDataStore("testWriteTransactionWithMultipleShards", "cars-1", "people-1");
DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
- Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
- assertEquals("canCommit", true, canCommit);
- cohort.preCommit().get(5, TimeUnit.SECONDS);
- cohort.commit().get(5, TimeUnit.SECONDS);
+ doCommit(cohort);
- // 5. Verify the data in the store
+ // Verify the data in the store
DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
DistributedDataStore dataStore =
setupDistributedDataStore("testReadWriteTransaction", "test-1");
- // 1. Create a read-write Tx
+ // 1. Create a read-write Tx
DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
assertNotNull("newReadWriteTransaction returned null", readWriteTx);
// 5. Commit the Tx
- Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
- assertEquals("canCommit", true, canCommit);
- cohort.preCommit().get(5, TimeUnit.SECONDS);
- cohort.commit().get(5, TimeUnit.SECONDS);
+ doCommit(cohort);
// 6. Verify the data in the store
}};
}
+ @Test
+ public void testTransactionWritesWithShardNotInitiallyReady() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionWritesWithShardNotInitiallyReady";
+ String shardName = "test-1";
+
+ // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
+ // initialized until we create and submit the write the Tx.
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the write Tx
+
+ final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newReadWriteTransaction returned null", writeTx);
+
+ // Do some modification operations and ready the Tx on a separate thread.
+
+ final YangInstanceIdentifier listEntryPath = YangInstanceIdentifier.builder(
+ TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME,
+ TestModel.ID_QNAME, 1).build();
+
+ final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReady = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ writeTx.merge(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(
+ TestModel.OUTER_LIST_QNAME).build());
+
+ writeTx.write(listEntryPath, ImmutableNodes.mapEntry(
+ TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
+
+ writeTx.delete(listEntryPath);
+
+ txCohort.set(writeTx.ready());
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReady.countDown();
+ }
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Tx ready", true, done);
+
+ // At this point the Tx operations should be waiting for the shard to initialize so
+ // trigger the latch to let the shard recovery to continue.
+
+ blockRecoveryLatch.countDown();
+
+ // Wait for the Tx commit to complete.
+
+ doCommit(txCohort.get());
+
+ // Verify the data in the store
+
+ DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
+
+ Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).
+ get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+
+ optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+
+ optional = readTx.read(listEntryPath).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", false, optional.isPresent());
+
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test
+ public void testTransactionReadsWithShardNotInitiallyReady() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionReadsWithShardNotInitiallyReady";
+ String shardName = "test-1";
+
+ // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
+ // initialized until we create the Tx.
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the read-write Tx
+
+ final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
+ assertNotNull("newReadWriteTransaction returned null", readWriteTx);
+
+ // Do some reads on the Tx on a separate thread.
+
+ final AtomicReference<CheckedFuture<Boolean, ReadFailedException>> txExistsFuture =
+ new AtomicReference<>();
+ final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
+ txReadFuture = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReadsDone = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ readWriteTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ txExistsFuture.set(readWriteTx.exists(TestModel.TEST_PATH));
+
+ txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReadsDone.countDown();
+ }
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReadsDone, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Tx reads done", true, done);
+
+ // At this point the Tx operations should be waiting for the shard to initialize so
+ // trigger the latch to let the shard recovery to continue.
+
+ blockRecoveryLatch.countDown();
+
+ // Wait for the reads to complete and verify.
+
+ assertEquals("exists", true, txExistsFuture.get().checkedGet(5, TimeUnit.SECONDS));
+ assertEquals("read", true, txReadFuture.get().checkedGet(5, TimeUnit.SECONDS).isPresent());
+
+ readWriteTx.close();
+
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test(expected=NotInitializedException.class)
+ public void testTransactionCommitFailureWithShardNotInitialized() throws Throwable{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionCommitFailureWithShardNotInitialized";
+ String shardName = "test-1";
+
+ // Set the shard initialization timeout low for the test.
+
+ datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
+
+ // Setup the InMemoryJournal to block shard recovery indefinitely.
+
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the write Tx
+
+ final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newReadWriteTransaction returned null", writeTx);
+
+ // Do some modifications and ready the Tx on a separate thread.
+
+ final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReady = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ txCohort.set(writeTx.ready());
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReady.countDown();
+ }
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Tx ready", true, done);
+
+ // Wait for the commit to complete. Since the shard never initialized, the Tx should
+ // have timed out and throw an appropriate exception cause.
+
+ try {
+ txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
+ } catch(ExecutionException e) {
+ throw e.getCause();
+ } finally {
+ blockRecoveryLatch.countDown();
+ cleanup(dataStore);
+ }
+ }};
+ }
+
+ @Test(expected=NotInitializedException.class)
+ public void testTransactionReadFailureWithShardNotInitialized() throws Throwable{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionReadFailureWithShardNotInitialized";
+ String shardName = "test-1";
+
+ // Set the shard initialization timeout low for the test.
+
+ datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
+
+ // Setup the InMemoryJournal to block shard recovery indefinitely.
+
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the read-write Tx
+
+ final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
+ assertNotNull("newReadWriteTransaction returned null", readWriteTx);
+
+ // Do a read on the Tx on a separate thread.
+
+ final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
+ txReadFuture = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReadDone = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ readWriteTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
+
+ readWriteTx.close();
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReadDone.countDown();
+ }
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReadDone, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Tx read done", true, done);
+
+ // Wait for the read to complete. Since the shard never initialized, the Tx should
+ // have timed out and throw an appropriate exception cause.
+
+ try {
+ txReadFuture.get().checkedGet(5, TimeUnit.SECONDS);
+ } catch(ReadFailedException e) {
+ throw e.getCause();
+ } finally {
+ blockRecoveryLatch.countDown();
+ cleanup(dataStore);
+ }
+ }};
+ }
+
+ @Test(expected=NoShardLeaderException.class)
+ public void testTransactionCommitFailureWithNoShardLeader() throws Throwable{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionCommitFailureWithNoShardLeader";
+ String shardName = "test-1";
+
+ // We don't want the shard to become the leader so prevent shard election from completing
+ // by setting the election timeout, which is based on the heartbeat interval, really high.
+
+ datastoreContextBuilder.shardHeartbeatIntervalInMillis(30000);
+
+ // Set the leader election timeout low for the test.
+
+ datastoreContextBuilder.shardLeaderElectionTimeout(1, TimeUnit.MILLISECONDS);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the write Tx.
+
+ final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newReadWriteTransaction returned null", writeTx);
+
+ // Do some modifications and ready the Tx on a separate thread.
+
+ final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReady = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ txCohort.set(writeTx.ready());
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReady.countDown();
+ }
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Tx ready", true, done);
+
+ // Wait for the commit to complete. Since no shard leader was elected in time, the Tx
+ // should have timed out and throw an appropriate exception cause.
+
+ try {
+ txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
+ } catch(ExecutionException e) {
+ throw e.getCause();
+ } finally {
+ cleanup(dataStore);
+ }
+ }};
+ }
+
@Test
public void testTransactionAbort() throws Exception{
System.setProperty("shard.persistent", "true");
@Test
public void testTransactionChain() throws Exception{
- System.setProperty("shard.persistent", "true");
new IntegrationTestKit(getSystem()) {{
- DistributedDataStore dataStore =
- setupDistributedDataStore("transactionChainIntegrationTest", "test-1");
+ DistributedDataStore dataStore = setupDistributedDataStore("testTransactionChain", "test-1");
// 1. Create a Tx chain and write-only Tx
// 2. Write some data
- NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- writeTx.write(TestModel.TEST_PATH, containerNode);
+ NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ writeTx.write(TestModel.TEST_PATH, testNode);
// 3. Ready the Tx for commit
- DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
+
+ // 4. Commit the Tx on another thread that first waits for the second read Tx.
+
+ final CountDownLatch continueCommit1 = new CountDownLatch(1);
+ final CountDownLatch commit1Done = new CountDownLatch(1);
+ final AtomicReference<Exception> commit1Error = new AtomicReference<>();
+ new Thread() {
+ @Override
+ public void run() {
+ try {
+ continueCommit1.await();
+ doCommit(cohort1);
+ } catch (Exception e) {
+ commit1Error.set(e);
+ } finally {
+ commit1Done.countDown();
+ }
+ }
+ }.start();
- // 4. Commit the Tx
+ // 5. Create a new read Tx from the chain to read and verify the data from the first
+ // Tx is visible after being readied.
- Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
- assertEquals("canCommit", true, canCommit);
- cohort.preCommit().get(5, TimeUnit.SECONDS);
- cohort.commit().get(5, TimeUnit.SECONDS);
+ DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
+ Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", testNode, optional.get());
- // 5. Verify the data in the store
+ // 6. Create a new RW Tx from the chain, write more data, and ready it
- DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
+ DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
+ MapNode outerNode = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build();
+ rwTx.write(TestModel.OUTER_LIST_PATH, outerNode);
- Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+ DOMStoreThreePhaseCommitCohort cohort2 = rwTx.ready();
+
+ // 7. Create a new read Tx from the chain to read the data from the last RW Tx to
+ // verify it is visible.
+
+ readTx = txChain.newReadWriteTransaction();
+ optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", outerNode, optional.get());
+
+ // 8. Wait for the 2 commits to complete and close the chain.
+
+ continueCommit1.countDown();
+ Uninterruptibles.awaitUninterruptibly(commit1Done, 5, TimeUnit.SECONDS);
+
+ if(commit1Error.get() != null) {
+ throw commit1Error.get();
+ }
+
+ doCommit(cohort2);
+
+ txChain.close();
+
+ // 9. Create a new read Tx from the data store and verify committed data.
+
+ readTx = dataStore.newReadOnlyTransaction();
+ optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
assertEquals("isPresent", true, optional.isPresent());
- assertEquals("Data node", containerNode, optional.get());
+ assertEquals("Data node", outerNode, optional.get());
+
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test
+ public void testCreateChainedTransactionsInQuickSuccession() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore = setupDistributedDataStore(
+ "testCreateChainedTransactionsInQuickSuccession", "test-1");
+
+ DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+
+ NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ int nTxs = 20;
+ List<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(nTxs);
+ for(int i = 0; i < nTxs; i++) {
+ DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
+
+ rwTx.merge(TestModel.TEST_PATH, testNode);
+
+ cohorts.add(rwTx.ready());
+
+ }
+
+ for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
+ doCommit(cohort);
+ }
+
+ txChain.close();
+
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test
+ public void testCreateChainedTransactionAfterEmptyTxReadied() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore = setupDistributedDataStore(
+ "testCreateChainedTransactionAfterEmptyTxReadied", "test-1");
+
+ DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+
+ DOMStoreReadWriteTransaction rwTx1 = txChain.newReadWriteTransaction();
+
+ rwTx1.ready();
+
+ DOMStoreReadWriteTransaction rwTx2 = txChain.newReadWriteTransaction();
+
+ Optional<NormalizedNode<?, ?>> optional = rwTx2.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", false, optional.isPresent());
txChain.close();
}};
}
+ @Test
+ public void testCreateChainedTransactionWhenPreviousNotReady() throws Throwable {
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore = setupDistributedDataStore(
+ "testCreateChainedTransactionWhenPreviousNotReady", "test-1");
+
+ final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+
+ DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+ assertNotNull("newWriteOnlyTransaction returned null", writeTx);
+
+ writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ // Try to create another Tx of each type - each should fail b/c the previous Tx wasn't
+ // readied.
+
+ assertExceptionOnTxChainCreates(txChain, IllegalStateException.class);
+ }};
+ }
+
+ @Test
+ public void testCreateChainedTransactionAfterClose() throws Throwable {
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore = setupDistributedDataStore(
+ "testCreateChainedTransactionAfterClose", "test-1");
+
+ DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+
+ txChain.close();
+
+ // Try to create another Tx of each type - should fail b/c the previous Tx was closed.
+
+ assertExceptionOnTxChainCreates(txChain, TransactionChainClosedException.class);
+ }};
+ }
+
+ @Test
+ public void testChangeListenerRegistration() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("testChangeListenerRegistration", "test-1");
+
+ testWriteTransaction(dataStore, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ MockDataChangeListener listener = new MockDataChangeListener(1);
+
+ ListenerRegistration<MockDataChangeListener>
+ listenerReg = dataStore.registerChangeListener(TestModel.TEST_PATH, listener,
+ DataChangeScope.SUBTREE);
+
+ assertNotNull("registerChangeListener returned null", listenerReg);
+
+ // Wait for the initial notification
+
+ listener.waitForChangeEvents(TestModel.TEST_PATH);
+
+ listener.reset(2);
+
+ // Write 2 updates.
+
+ testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
+
+ YangInstanceIdentifier listPath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
+ nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build();
+ testWriteTransaction(dataStore, listPath,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
+
+ // Wait for the 2 updates.
+
+ listener.waitForChangeEvents(TestModel.OUTER_LIST_PATH, listPath);
+
+ listenerReg.close();
+
+ testWriteTransaction(dataStore, YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
+ nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2));
+
+ listener.expectNoMoreChanges("Received unexpected change after close");
+
+ cleanup(dataStore);
+ }};
+ }
+
class IntegrationTestKit extends ShardTestKit {
IntegrationTestKit(ActorSystem actorSystem) {
}
DistributedDataStore setupDistributedDataStore(String typeName, String... shardNames) {
+ return setupDistributedDataStore(typeName, true, shardNames);
+ }
+
+ DistributedDataStore setupDistributedDataStore(String typeName, boolean waitUntilLeader,
+ String... shardNames) {
MockClusterWrapper cluster = new MockClusterWrapper();
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
ShardStrategyFactory.setConfiguration(config);
- DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
+ DatastoreContext datastoreContext = datastoreContextBuilder.build();
DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
config, datastoreContext);
SchemaContext schemaContext = SchemaContextHelper.full();
dataStore.onGlobalContextUpdated(schemaContext);
- for(String shardName: shardNames) {
- ActorRef shard = null;
- for(int i = 0; i < 20 * 5 && shard == null; i++) {
- Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
- Optional<ActorRef> shardReply = dataStore.getActorContext().findLocalShard(shardName);
- if(shardReply.isPresent()) {
- shard = shardReply.get();
+ if(waitUntilLeader) {
+ for(String shardName: shardNames) {
+ ActorRef shard = null;
+ for(int i = 0; i < 20 * 5 && shard == null; i++) {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ Optional<ActorRef> shardReply = dataStore.getActorContext().findLocalShard(shardName);
+ if(shardReply.isPresent()) {
+ shard = shardReply.get();
+ }
}
- }
- assertNotNull("Shard was not created", shard);
+ assertNotNull("Shard was not created", shard);
- System.out.println("!!!!!!shard: "+shard.path().toString());
- waitUntilLeader(shard);
+ waitUntilLeader(shard);
+ }
}
return dataStore;
// 4. Commit the Tx
- Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
- assertEquals("canCommit", true, canCommit);
- cohort.preCommit().get(5, TimeUnit.SECONDS);
- cohort.commit().get(5, TimeUnit.SECONDS);
+ doCommit(cohort);
// 5. Verify the data in the store
assertEquals("Data node", nodeToWrite, optional.get());
}
+ void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
+ Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit);
+ cohort.preCommit().get(5, TimeUnit.SECONDS);
+ cohort.commit().get(5, TimeUnit.SECONDS);
+ }
+
void cleanup(DistributedDataStore dataStore) {
dataStore.getActorContext().getShardManager().tell(PoisonPill.getInstance(), null);
}
+
+ void assertExceptionOnCall(Callable<Void> callable, Class<? extends Exception> expType)
+ throws Exception {
+ try {
+ callable.call();
+ fail("Expected " + expType.getSimpleName());
+ } catch(Exception e) {
+ assertEquals("Exception type", expType, e.getClass());
+ }
+ }
+
+ void assertExceptionOnTxChainCreates(final DOMStoreTransactionChain txChain,
+ Class<? extends Exception> expType) throws Exception {
+ assertExceptionOnCall(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ txChain.newWriteOnlyTransaction();
+ return null;
+ }
+ }, expType);
+
+ assertExceptionOnCall(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ txChain.newReadWriteTransaction();
+ return null;
+ }
+ }, expType);
+
+ assertExceptionOnCall(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ txChain.newReadOnlyTransaction();
+ return null;
+ }
+ }, expType);
+ }
}
}
+++ /dev/null
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorPath;
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.ExecutionContexts;
-import akka.dispatch.Futures;
-import akka.util.Timeout;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
-import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.ExecutionContextExecutor;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-import java.util.concurrent.TimeUnit;
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class DistributedDataStoreTest extends AbstractActorTest{
-
- private DistributedDataStore distributedDataStore;
- private MockActorContext mockActorContext;
- private ActorRef doNothingActorRef;
-
- @Before
- public void setUp() throws Exception {
- ShardStrategyFactory.setConfiguration(new MockConfiguration());
- final Props props = Props.create(DoNothingActor.class);
-
- doNothingActorRef = getSystem().actorOf(props);
-
- mockActorContext = new MockActorContext(getSystem(), doNothingActorRef);
- distributedDataStore = new DistributedDataStore(mockActorContext);
- distributedDataStore.onGlobalContextUpdated(
- TestModel.createTestContext());
-
- // Make CreateTransactionReply as the default response. Will need to be
- // tuned if a specific test requires some other response
- mockActorContext.setExecuteShardOperationResponse(
- CreateTransactionReply.newBuilder()
- .setTransactionActorPath(doNothingActorRef.path().toString())
- .setTransactionId("txn-1 ")
- .build());
- }
-
- @After
- public void tearDown() throws Exception {
-
- }
-
- @SuppressWarnings("resource")
- @Test
- public void testConstructor(){
- ActorSystem actorSystem = mock(ActorSystem.class);
-
- new DistributedDataStore(actorSystem, "config",
- mock(ClusterWrapper.class), mock(Configuration.class),
- DatastoreContext.newBuilder().build());
-
- verify(actorSystem).actorOf(any(Props.class), eq("shardmanager-config"));
- }
-
- @Test
- public void testRegisterChangeListenerWhenShardIsNotLocal() throws Exception {
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH, new AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>() {
- @Override
- public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- throw new UnsupportedOperationException("onDataChanged");
- }
- }, AsyncDataBroker.DataChangeScope.BASE);
-
- // Since we do not expect the shard to be local registration will return a NoOpRegistration
- assertTrue(registration instanceof NoOpDataChangeListenerRegistration);
-
- assertNotNull(registration);
- }
-
- @Test
- public void testRegisterChangeListenerWhenShardIsLocal() throws Exception {
- ActorContext actorContext = mock(ActorContext.class);
-
- distributedDataStore = new DistributedDataStore(actorContext);
- distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
-
- Future future = mock(Future.class);
- when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
- when(actorContext.getActorSystem()).thenReturn(getSystem());
- when(actorContext.findLocalShard(anyString())).thenReturn(Optional.of(doNothingActorRef));
- when(actorContext
- .executeOperationAsync(eq(doNothingActorRef), anyObject(), any(Timeout.class))).thenReturn(future);
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
- mock(AsyncDataChangeListener.class),
- AsyncDataBroker.DataChangeScope.BASE);
-
- assertNotNull(registration);
-
- assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
- }
-
- @Test
- public void testRegisterChangeListenerWhenSuccessfulReplyReceived() throws Exception {
- ActorContext actorContext = mock(ActorContext.class);
-
- distributedDataStore = new DistributedDataStore(actorContext);
- distributedDataStore.onGlobalContextUpdated(
- TestModel.createTestContext());
-
- ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
-
- // Make Future successful
- Future f = Futures.successful(new RegisterChangeListenerReply(doNothingActorRef.path()));
-
- // Setup the mocks
- ActorSystem actorSystem = mock(ActorSystem.class);
- ActorSelection actorSelection = mock(ActorSelection.class);
-
- when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
- when(actorSystem.dispatcher()).thenReturn(executor);
- when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
- when(actorContext.getActorSystem()).thenReturn(actorSystem);
- when(actorContext.findLocalShard(anyString())).thenReturn(Optional.of(doNothingActorRef));
- when(actorContext
- .executeOperationAsync(eq(doNothingActorRef), anyObject(), any(Timeout.class))).thenReturn(f);
- when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
- mock(AsyncDataChangeListener.class),
- AsyncDataBroker.DataChangeScope.BASE);
-
- assertNotNull(registration);
-
- assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
-
- ActorSelection listenerRegistrationActor =
- ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
-
- assertNotNull(listenerRegistrationActor);
-
- assertEquals(actorSelection, listenerRegistrationActor);
- }
-
- @Test
- public void testRegisterChangeListenerWhenSuccessfulReplyFailed() throws Exception {
- ActorContext actorContext = mock(ActorContext.class);
-
- distributedDataStore = new DistributedDataStore(actorContext);
- distributedDataStore.onGlobalContextUpdated(
- TestModel.createTestContext());
-
- ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
-
- // Make Future fail
- Future f = Futures.failed(new IllegalArgumentException());
-
- // Setup the mocks
- ActorSystem actorSystem = mock(ActorSystem.class);
- ActorSelection actorSelection = mock(ActorSelection.class);
-
- when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
- when(actorSystem.dispatcher()).thenReturn(executor);
- when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
- when(actorContext.getActorSystem()).thenReturn(actorSystem);
- when(actorContext.findLocalShard(anyString())).thenReturn(Optional.of(doNothingActorRef));
- when(actorContext
- .executeOperationAsync(eq(doNothingActorRef), anyObject(), any(Timeout.class))).thenReturn(f);
- when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
- mock(AsyncDataChangeListener.class),
- AsyncDataBroker.DataChangeScope.BASE);
-
- assertNotNull(registration);
-
- assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
-
- ActorSelection listenerRegistrationActor =
- ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
-
- assertNull(listenerRegistrationActor);
-
- }
-
-
- @Test
- public void testCreateTransactionChain() throws Exception {
- final DOMStoreTransactionChain transactionChain = distributedDataStore.createTransactionChain();
- assertNotNull(transactionChain);
- }
-
- @Test
- public void testNewReadOnlyTransaction() throws Exception {
- final DOMStoreReadTransaction transaction = distributedDataStore.newReadOnlyTransaction();
- assertNotNull(transaction);
- }
-
- @Test
- public void testNewWriteOnlyTransaction() throws Exception {
- final DOMStoreWriteTransaction transaction = distributedDataStore.newWriteOnlyTransaction();
- assertNotNull(transaction);
- }
-
- @Test
- public void testNewReadWriteTransaction() throws Exception {
- final DOMStoreReadWriteTransaction transaction = distributedDataStore.newReadWriteTransaction();
- assertNotNull(transaction);
- }
-}
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.japi.Creator;
+import akka.pattern.Patterns;
import akka.persistence.RecoveryCompleted;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import akka.japi.Creator;
+import akka.util.Timeout;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.Uninterruptibles;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+
import java.net.URI;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- shardManager.tell(new FindPrimary("non-existent").toSerializable(), getRef());
+ shardManager.tell(new FindPrimary("non-existent", false).toSerializable(), getRef());
expectMsgEquals(duration("5 seconds"),
new PrimaryNotFound("non-existent").toSerializable());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false).toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
}};
}
@Test
- public void testOnReceiveFindPrimaryForNotInitialzedShard() throws Exception {
+ public void testOnReceiveFindPrimaryForNotInitializedShard() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false).toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), ActorNotInitialized.class);
}};
}
+ @Test
+ public void testOnReceiveFindPrimaryWaitForShardInitialized() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ // We're passing waitUntilInitialized = true to FindPrimary so the response should be
+ // delayed until we send ActorInitialized.
+ Future<Object> future = Patterns.ask(shardManager, new FindPrimary(Shard.DEFAULT_NAME, true),
+ new Timeout(5, TimeUnit.SECONDS));
+
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ Object resp = Await.result(future, duration("5 seconds"));
+ assertTrue("Expected: PrimaryFound, Actual: " + resp, resp instanceof PrimaryFound);
+ }};
+ }
+
@Test
public void testOnReceiveFindLocalShardForNonExistentShard() throws Exception {
new JavaTestKit(getSystem()) {{
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- shardManager.tell(new FindLocalShard("non-existent"), getRef());
+ shardManager.tell(new FindLocalShard("non-existent", false), getRef());
LocalShardNotFound notFound = expectMsgClass(duration("5 seconds"), LocalShardNotFound.class);
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
+ shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
LocalShardFound found = expectMsgClass(duration("5 seconds"), LocalShardFound.class);
final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- //shardManager.tell(new ActorInitialized(), mockShardActor);
- shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
+ shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
expectMsgClass(duration("5 seconds"), ActorNotInitialized.class);
}};
}
+ @Test
+ public void testOnReceiveFindLocalShardWaitForShardInitialized() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ // We're passing waitUntilInitialized = true to FindLocalShard so the response should be
+ // delayed until we send ActorInitialized.
+ Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
+ new Timeout(5, TimeUnit.SECONDS));
+
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ Object resp = Await.result(future, duration("5 seconds"));
+ assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
+ }};
+ }
+
@Test
public void testOnReceiveMemberUp() throws Exception {
new JavaTestKit(getSystem()) {{
MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
- shardManager.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
PrimaryFound found = PrimaryFound.fromSerializable(expectMsgClass(duration("5 seconds"),
PrimaryFound.SERIALIZABLE_CLASS));
MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
- shardManager.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
MockClusterWrapper.sendMemberRemoved(shardManager, "member-2", getRef().path().toString());
- shardManager.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
}};
}};
}
+ @Test
+ public void testRecoveryApplicable(){
+ new JavaTestKit(getSystem()) {
+ {
+ final Props persistentProps = ShardManager.props(shardMrgIDSuffix,
+ new MockClusterWrapper(),
+ new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(true).build());
+ final TestActorRef<ShardManager> persistentShardManager =
+ TestActorRef.create(getSystem(), persistentProps);
+
+ DataPersistenceProvider dataPersistenceProvider1 = persistentShardManager.underlyingActor().getDataPersistenceProvider();
+
+ assertTrue("Recovery Applicable", dataPersistenceProvider1.isRecoveryApplicable());
+
+ final Props nonPersistentProps = ShardManager.props(shardMrgIDSuffix,
+ new MockClusterWrapper(),
+ new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(false).build());
+ final TestActorRef<ShardManager> nonPersistentShardManager =
+ TestActorRef.create(getSystem(), nonPersistentProps);
+
+ DataPersistenceProvider dataPersistenceProvider2 = nonPersistentShardManager.underlyingActor().getDataPersistenceProvider();
+
+ assertFalse("Recovery Not Applicable", dataPersistenceProvider2.isRecoveryApplicable());
+
+
+ }};
+
+ }
+
+ @Test
+ public void testOnUpdateSchemaContextUpdateKnownModulesCallsDataPersistenceProvider()
+ throws Exception {
+ final CountDownLatch persistLatch = new CountDownLatch(1);
+ final Creator<ShardManager> creator = new Creator<ShardManager>() {
+ private static final long serialVersionUID = 1L;
+ @Override
+ public ShardManager create() throws Exception {
+ return new ShardManager(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build()) {
+ @Override
+ protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor
+ = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+ return dataPersistenceProviderMonitor;
+ }
+ };
+ }
+ };
+
+ new JavaTestKit(getSystem()) {{
+
+ final TestActorRef<ShardManager> shardManager =
+ TestActorRef.create(getSystem(), Props.create(new DelegatingShardManagerCreator(creator)));
+
+ ModuleIdentifier foo = mock(ModuleIdentifier.class);
+ when(foo.getNamespace()).thenReturn(new URI("foo"));
+
+ Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
+ moduleIdentifierSet.add(foo);
+
+ SchemaContext schemaContext = mock(SchemaContext.class);
+ when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
+
+ shardManager.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertEquals("Persisted", true,
+ Uninterruptibles.awaitUninterruptibly(persistLatch, 5, TimeUnit.SECONDS));
+
+ }};
+ }
+
+
private static class TestShardManager extends ShardManager {
private final CountDownLatch recoveryComplete = new CountDownLatch(1);
}
}
+
+ private static class DelegatingShardManagerCreator implements Creator<ShardManager> {
+ private static final long serialVersionUID = 1L;
+ private Creator<ShardManager> delegate;
+
+ public DelegatingShardManagerCreator(Creator<ShardManager> delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public ShardManager create() throws Exception {
+ return delegate.create();
+ }
+ }
}
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.opendaylight.controller.cluster.datastore.messages.CreateTransaction.CURRENT_VERSION;
import akka.actor.ActorRef;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.dispatch.Dispatchers;
import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.pattern.Patterns;
-import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
import akka.util.Timeout;
import com.google.common.base.Function;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.cluster.raft.Snapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.inOrder;
+
public class ShardTest extends AbstractActorTest {
private static final SchemaContext SCHEMA_CONTEXT = TestModel.createTestContext();
- private static final ShardIdentifier IDENTIFIER = ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
private static final AtomicInteger NEXT_SHARD_NUM = new AtomicInteger();
- private static String shardName() {
- return "shard" + NEXT_SHARD_NUM.getAndIncrement();
- }
+ private final ShardIdentifier shardID = ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config" + NEXT_SHARD_NUM.getAndIncrement()).build();
private DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
- shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).build();
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).
+ shardHeartbeatIntervalInMillis(100).build();
@Before
public void setUp() {
- System.setProperty("shard.persistent", "false");
-
InMemorySnapshotStore.clear();
InMemoryJournal.clear();
}
}
private Props newShardProps() {
- return Shard.props(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+ return Shard.props(shardID, Collections.<ShardIdentifier,String>emptyMap(),
dataStoreContext, SCHEMA_CONTEXT);
}
@Test
- public void testOnReceiveRegisterListener() throws Exception {
- new JavaTestKit(getSystem()) {{
- ActorRef subject = getSystem().actorOf(newShardProps(), "testRegisterChangeListener");
+ public void testRegisterChangeListener() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps(), "testRegisterChangeListener");
+
+ waitUntilLeader(shard);
- subject.tell(new UpdateSchemaContext(SchemaContextHelper.full()), getRef());
+ shard.tell(new UpdateSchemaContext(SchemaContextHelper.full()), ActorRef.noSender());
- subject.tell(new RegisterChangeListener(TestModel.TEST_PATH,
- getRef().path(), AsyncDataBroker.DataChangeScope.BASE), getRef());
+ MockDataChangeListener listener = new MockDataChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataChangeListener.props(listener),
+ "testRegisterChangeListener-DataChangeListener");
- EnableNotification enable = expectMsgClass(duration("3 seconds"), EnableNotification.class);
- assertEquals("isEnabled", false, enable.isEnabled());
+ shard.tell(new RegisterChangeListener(TestModel.TEST_PATH,
+ dclActor.path(), AsyncDataBroker.DataChangeScope.BASE), getRef());
RegisterChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
RegisterChangeListenerReply.class);
- assertTrue(reply.getListenerRegistrationPath().toString().matches(
+ String replyPath = reply.getListenerRegistrationPath().toString();
+ assertTrue("Incorrect reply path: " + replyPath, replyPath.matches(
"akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ listener.waitForChangeEvents(path);
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @SuppressWarnings("serial")
+ @Test
+ public void testChangeListenerNotifiedWhenNotTheLeaderOnRegistration() throws Exception {
+ // This test tests the timing window in which a change listener is registered before the
+ // shard becomes the leader. We verify that the listener is registered and notified of the
+ // existing data when the shard becomes the leader.
+ new ShardTestKit(getSystem()) {{
+ // For this test, we want to send the RegisterChangeListener message after the shard
+ // has recovered from persistence and before it becomes the leader. So we subclass
+ // Shard to override onReceiveCommand and, when the first ElectionTimeout is received,
+ // we know that the shard has been initialized to a follower and has started the
+ // election process. The following 2 CountDownLatches are used to coordinate the
+ // ElectionTimeout with the sending of the RegisterChangeListener message.
+ final CountDownLatch onFirstElectionTimeout = new CountDownLatch(1);
+ final CountDownLatch onChangeListenerRegistered = new CountDownLatch(1);
+ Creator<Shard> creator = new Creator<Shard>() {
+ boolean firstElectionTimeout = true;
+
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ dataStoreContext, SCHEMA_CONTEXT) {
+ @Override
+ public void onReceiveCommand(final Object message) throws Exception {
+ if(message instanceof ElectionTimeout && firstElectionTimeout) {
+ // Got the first ElectionTimeout. We don't forward it to the
+ // base Shard yet until we've sent the RegisterChangeListener
+ // message. So we signal the onFirstElectionTimeout latch to tell
+ // the main thread to send the RegisterChangeListener message and
+ // start a thread to wait on the onChangeListenerRegistered latch,
+ // which the main thread signals after it has sent the message.
+ // After the onChangeListenerRegistered is triggered, we send the
+ // original ElectionTimeout message to proceed with the election.
+ firstElectionTimeout = false;
+ final ActorRef self = getSelf();
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.awaitUninterruptibly(
+ onChangeListenerRegistered, 5, TimeUnit.SECONDS);
+ self.tell(message, self);
+ }
+ }.start();
+
+ onFirstElectionTimeout.countDown();
+ } else {
+ super.onReceiveCommand(message);
+ }
+ }
+ };
+ }
+ };
+
+ MockDataChangeListener listener = new MockDataChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataChangeListener.props(listener),
+ "testRegisterChangeListenerWhenNotLeaderInitially-DataChangeListener");
+
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)),
+ "testRegisterChangeListenerWhenNotLeaderInitially");
+
+ // Write initial data into the in-memory store.
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ // Wait until the shard receives the first ElectionTimeout message.
+ assertEquals("Got first ElectionTimeout", true,
+ onFirstElectionTimeout.await(5, TimeUnit.SECONDS));
+
+ // Now send the RegisterChangeListener and wait for the reply.
+ shard.tell(new RegisterChangeListener(path, dclActor.path(),
+ AsyncDataBroker.DataChangeScope.SUBTREE), getRef());
+
+ RegisterChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterChangeListenerReply.class);
+ assertNotNull("getListenerRegistrationPath", reply.getListenerRegistrationPath());
+
+ // Sanity check - verify the shard is not the leader yet.
+ shard.tell(new FindLeader(), getRef());
+ FindLeaderReply findLeadeReply =
+ expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+ assertNull("Expected the shard not to be the leader", findLeadeReply.getLeaderActor());
+
+ // Signal the onChangeListenerRegistered latch to tell the thread above to proceed
+ // with the election process.
+ onChangeListenerRegistered.countDown();
+
+ // Wait for the shard to become the leader and notify our listener with the existing
+ // data in the store.
+ listener.waitForChangeEvents(path);
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
public void testCreateTransaction(){
new ShardTestKit(getSystem()) {{
- ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateTransaction");
+ ActorRef shard = getSystem().actorOf(newShardProps(), "testCreateTransaction");
- waitUntilLeader(subject);
+ waitUntilLeader(shard);
- subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shard.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- subject.tell(new CreateTransaction("txn-1",
+ shard.tell(new CreateTransaction("txn-1",
TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
String path = reply.getTransactionActorPath().toString();
assertTrue("Unexpected transaction path " + path,
path.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
- expectNoMsg();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
public void testCreateTransactionOnChain(){
new ShardTestKit(getSystem()) {{
- final ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateTransactionOnChain");
+ final ActorRef shard = getSystem().actorOf(newShardProps(), "testCreateTransactionOnChain");
- waitUntilLeader(subject);
+ waitUntilLeader(shard);
- subject.tell(new CreateTransaction("txn-1",
+ shard.tell(new CreateTransaction("txn-1",
TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
getRef());
String path = reply.getTransactionActorPath().toString();
assertTrue("Unexpected transaction path " + path,
path.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
- expectNoMsg();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
+ @SuppressWarnings("serial")
@Test
- public void testPeerAddressResolved(){
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- Props props = Shard.props(identifier,
- Collections.<ShardIdentifier, String>singletonMap(identifier, null),
- dataStoreContext, SCHEMA_CONTEXT);
- final ActorRef subject = getSystem().actorOf(props, "testPeerAddressResolved");
-
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ public void testPeerAddressResolved() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ final CountDownLatch recoveryComplete = new CountDownLatch(1);
+ class TestShard extends Shard {
+ TestShard() {
+ super(shardID, Collections.<ShardIdentifier, String>singletonMap(shardID, null),
+ dataStoreContext, SCHEMA_CONTEXT);
+ }
- subject.tell(
- new PeerAddressResolved(identifier, "akka://foobar"),
- getRef());
+ Map<String, String> getPeerAddresses() {
+ return getRaftActorContext().getPeerAddresses();
+ }
- expectNoMsg();
+ @Override
+ protected void onRecoveryComplete() {
+ try {
+ super.onRecoveryComplete();
+ } finally {
+ recoveryComplete.countDown();
+ }
}
- };
+ }
+
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(new Creator<Shard>() {
+ @Override
+ public TestShard create() throws Exception {
+ return new TestShard();
+ }
+ })), "testPeerAddressResolved");
+
+ //waitUntilLeader(shard);
+ assertEquals("Recovery complete", true,
+ Uninterruptibles.awaitUninterruptibly(recoveryComplete, 5, TimeUnit.SECONDS));
+
+ String address = "akka://foobar";
+ shard.underlyingActor().onReceiveCommand(new PeerAddressResolved(shardID, address));
+
+ assertEquals("getPeerAddresses", address,
+ ((TestShard)shard.underlyingActor()).getPeerAddresses().get(shardID.toString()));
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
- public void testApplySnapshot() throws ExecutionException, InterruptedException {
- TestActorRef<Shard> ref = TestActorRef.create(getSystem(), newShardProps());
+ public void testApplySnapshot() throws Exception {
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(),
+ "testApplySnapshot");
NormalizedNodeToNodeCodec codec =
new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT);
- writeToStore(ref, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
YangInstanceIdentifier root = YangInstanceIdentifier.builder().build();
- NormalizedNode<?,?> expected = readStore(ref, root);
+ NormalizedNode<?,?> expected = readStore(shard, root);
NormalizedNodeMessages.Container encode = codec.encode(expected);
encode.getNormalizedNode().toByteString().toByteArray(),
Collections.<ReplicatedLogEntry>emptyList(), 1, 2, 3, 4));
- ref.underlyingActor().onReceiveCommand(applySnapshot);
+ shard.underlyingActor().onReceiveCommand(applySnapshot);
- NormalizedNode<?,?> actual = readStore(ref, root);
+ NormalizedNode<?,?> actual = readStore(shard, root);
assertEquals(expected, actual);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
@Test
public void testApplyState() throws Exception {
- TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps());
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testApplyState");
NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
NormalizedNode<?,?> actual = readStore(shard, TestModel.TEST_PATH);
assertEquals("Applied state", node, actual);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
@SuppressWarnings("serial")
DOMStoreReadTransaction readTx = testStore.newReadOnlyTransaction();
NormalizedNode<?, ?> root = readTx.read(YangInstanceIdentifier.builder().build()).get().get();
- InMemorySnapshotStore.addSnapshot(IDENTIFIER.toString(), Snapshot.create(
+ InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT).encode(
root).
getNormalizedNode().toByteString().toByteArray(),
// Set up the InMemoryJournal.
- InMemoryJournal.addEntry(IDENTIFIER.toString(), 0, new ReplicatedLogImplEntry(0, 1, newPayload(
+ InMemoryJournal.addEntry(shardID.toString(), 0, new ReplicatedLogImplEntry(0, 1, newPayload(
new WriteModification(TestModel.OUTER_LIST_PATH,
ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
SCHEMA_CONTEXT))));
Modification mod = new MergeModification(path,
ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i),
SCHEMA_CONTEXT);
- InMemoryJournal.addEntry(IDENTIFIER.toString(), i, new ReplicatedLogImplEntry(i, 1,
+ InMemoryJournal.addEntry(shardID.toString(), i, new ReplicatedLogImplEntry(i, 1,
newPayload(mod)));
}
- InMemoryJournal.addEntry(IDENTIFIER.toString(), nListEntries + 1,
+ InMemoryJournal.addEntry(shardID.toString(), nListEntries + 1,
new ApplyLogEntries(nListEntries));
// Create the actor and wait for recovery complete.
Creator<Shard> creator = new Creator<Shard>() {
@Override
public Shard create() throws Exception {
- return new Shard(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+ return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
dataStoreContext, SCHEMA_CONTEXT) {
@Override
protected void onRecoveryComplete() {
shard.underlyingActor().getShardMBean().getCommitIndex());
assertEquals("Last applied", nListEntries,
shard.underlyingActor().getShardMBean().getLastApplied());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}
- private CompositeModificationPayload newPayload(Modification... mods) {
+ private CompositeModificationPayload newPayload(final Modification... mods) {
MutableCompositeModification compMod = new MutableCompositeModification();
for(Modification mod: mods) {
compMod.addModification(mod);
return new CompositeModificationPayload(compMod.toSerializable());
}
- private DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(String cohortName,
- InMemoryDOMDataStore dataStore, YangInstanceIdentifier path, NormalizedNode data,
- MutableCompositeModification modification) {
+ private DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(final String cohortName,
+ final InMemoryDOMDataStore dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ final MutableCompositeModification modification) {
return setupMockWriteTransaction(cohortName, dataStore, path, data, modification, null);
}
- private DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(String cohortName,
- InMemoryDOMDataStore dataStore, YangInstanceIdentifier path, NormalizedNode data,
- MutableCompositeModification modification,
+ private DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(final String cohortName,
+ final InMemoryDOMDataStore dataStore, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+ final MutableCompositeModification modification,
final Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit) {
DOMStoreWriteTransaction tx = dataStore.newWriteOnlyTransaction();
doAnswer(new Answer<ListenableFuture<Boolean>>() {
@Override
- public ListenableFuture<Boolean> answer(InvocationOnMock invocation) {
+ public ListenableFuture<Boolean> answer(final InvocationOnMock invocation) {
return realCohort.canCommit();
}
}).when(cohort).canCommit();
doAnswer(new Answer<ListenableFuture<Void>>() {
@Override
- public ListenableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
+ public ListenableFuture<Void> answer(final InvocationOnMock invocation) throws Throwable {
if(preCommit != null) {
return preCommit.apply(realCohort);
} else {
doAnswer(new Answer<ListenableFuture<Void>>() {
@Override
- public ListenableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
+ public ListenableFuture<Void> answer(final InvocationOnMock invocation) throws Throwable {
return realCohort.commit();
}
}).when(cohort).commit();
doAnswer(new Answer<ListenableFuture<Void>>() {
@Override
- public ListenableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
+ public ListenableFuture<Void> answer(final InvocationOnMock invocation) throws Throwable {
return realCohort.abort();
}
}).when(cohort).abort();
@SuppressWarnings({ "unchecked" })
@Test
public void testConcurrentThreePhaseCommits() throws Throwable {
- System.setProperty("shard.persistent", "true");
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testConcurrentThreePhaseCommits");
waitUntilLeader(shard);
// Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
// Send the ForwardedReadyTransaction for the next 2 Tx's.
- shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID3, cohort3, modification3), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ cohort3, modification3, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
class OnFutureComplete extends OnComplete<Object> {
private final Class<?> expRespType;
- OnFutureComplete(Class<?> expRespType) {
+ OnFutureComplete(final Class<?> expRespType) {
this.expRespType = expRespType;
}
@Override
- public void onComplete(Throwable error, Object resp) {
+ public void onComplete(final Throwable error, final Object resp) {
if(error != null) {
- System.out.println(new java.util.Date()+": "+getClass().getSimpleName() + " failure: "+error);
caughtEx.set(new AssertionError(getClass().getSimpleName() + " failure", error));
} else {
try {
}
}
- void onSuccess(Object resp) throws Exception {
+ void onSuccess(final Object resp) throws Exception {
}
}
}
@Override
- public void onComplete(Throwable error, Object resp) {
+ public void onComplete(final Throwable error, final Object resp) {
super.onComplete(error, resp);
commitLatch.countDown();
}
class OnCanCommitFutureComplete extends OnFutureComplete {
private final String transactionID;
- OnCanCommitFutureComplete(String transactionID) {
+ OnCanCommitFutureComplete(final String transactionID) {
super(CanCommitTransactionReply.SERIALIZABLE_CLASS);
this.transactionID = transactionID;
}
@Override
- void onSuccess(Object resp) throws Exception {
+ void onSuccess(final Object resp) throws Exception {
CanCommitTransactionReply canCommitReply =
CanCommitTransactionReply.fromSerializable(resp);
assertEquals("Can commit", true, canCommitReply.getCanCommit());
assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
assertEquals(TestModel.ID_QNAME.getLocalName() + " value", 1, idLeaf.get().getValue());
+ for(int i = 0; i < 20 * 5; i++) {
+ long lastLogIndex = shard.underlyingActor().getShardMBean().getLastLogIndex();
+ if(lastLogIndex == 2) {
+ break;
+ }
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
assertEquals("Last log index", 2, shard.underlyingActor().getShardMBean().getLastLogIndex());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
public void testCommitPhaseFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitPhaseFailure");
waitUntilLeader(shard);
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message for the first Tx.
final CountDownLatch latch = new CountDownLatch(1);
canCommitFuture.onComplete(new OnComplete<Object>() {
@Override
- public void onComplete(Throwable t, Object resp) {
+ public void onComplete(final Throwable t, final Object resp) {
latch.countDown();
}
}, getSystem().dispatcher());
inOrder.verify(cohort1).preCommit();
inOrder.verify(cohort1).commit();
inOrder.verify(cohort2).canCommit();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
public void testPreCommitPhaseFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testPreCommitPhaseFailure");
waitUntilLeader(shard);
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID, cohort, modification), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message.
InOrder inOrder = inOrder(cohort);
inOrder.verify(cohort).canCommit();
inOrder.verify(cohort).preCommit();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
public void testCanCommitPhaseFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitPhaseFailure");
waitUntilLeader(shard);
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID, cohort, modification), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message.
shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
public void testAbortBeforeFinishCommit() throws Throwable {
- System.setProperty("shard.persistent", "true");
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testAbortBeforeFinishCommit");
waitUntilLeader(shard);
new AbortTransaction(transactionID).toSerializable(), timeout);
abortFuture.onComplete(new OnComplete<Object>() {
@Override
- public void onComplete(Throwable e, Object resp) {
+ public void onComplete(final Throwable e, final Object resp) {
abortComplete.countDown();
}
}, getSystem().dispatcher());
TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME),
modification, preCommit);
- shard.tell(new ForwardedReadyTransaction(transactionID, cohort, modification), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION,
+ cohort, modification, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST_PATH);
assertNotNull(TestModel.TEST_QNAME.getLocalName() + " not found", node);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitTimeout");
waitUntilLeader(shard);
// Ready the Tx's
- shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// canCommit 1st Tx. We don't send the commit so it should timeout.
NormalizedNode<?, ?> node = readStore(shard, listNodePath);
assertNotNull(listNodePath + " not found", node);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitQueueCapacityExceeded");
waitUntilLeader(shard);
// Ready the Tx's
- shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID3, cohort3, modification3), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID3, CURRENT_VERSION,
+ cohort3, modification3, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// canCommit 1st Tx.
shard.tell(new CanCommitTransaction(transactionID3).toSerializable(), getRef());
expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
public void testCanCommitBeforeReadyFailure() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitBeforeReadyFailure");
shard.tell(new CanCommitTransaction("tx").toSerializable(), getRef());
expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
public void testAbortTransaction() throws Throwable {
new ShardTestKit(getSystem()) {{
final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), shardName());
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testAbortTransaction");
waitUntilLeader(shard);
// Simulate the ForwardedReadyTransaction messages that would be sent
// by the ShardTransaction.
- shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID1, CURRENT_VERSION,
+ cohort1, modification1, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2), getRef());
+ shard.tell(new ForwardedReadyTransaction(transactionID2, CURRENT_VERSION,
+ cohort2, modification2, true), getRef());
expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
// Send the CanCommitTransaction message for the first Tx.
final CountDownLatch latch = new CountDownLatch(1);
canCommitFuture.onComplete(new OnComplete<Object>() {
@Override
- public void onComplete(Throwable t, Object resp) {
+ public void onComplete(final Throwable t, final Object resp) {
latch.countDown();
}
}, getSystem().dispatcher());
InOrder inOrder = inOrder(cohort1, cohort2);
inOrder.verify(cohort1).canCommit();
inOrder.verify(cohort2).canCommit();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
public void testCreateSnapshot() throws IOException, InterruptedException {
+ testCreateSnapshot(true, "testCreateSnapshot");
+ }
+
+ @Test
+ public void testCreateSnapshotWithNonPersistentData() throws IOException, InterruptedException {
+ testCreateSnapshot(false, "testCreateSnapshotWithNonPersistentData");
+ }
+
+ @SuppressWarnings("serial")
+ public void testCreateSnapshot(final boolean persistent, final String shardActorName) throws IOException, InterruptedException {
+ final DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(persistent).build();
+
new ShardTestKit(getSystem()) {{
final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
Creator<Shard> creator = new Creator<Shard>() {
@Override
public Shard create() throws Exception {
- return new Shard(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+ return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
dataStoreContext, SCHEMA_CONTEXT) {
@Override
- public void saveSnapshot(Object snapshot) {
- super.saveSnapshot(snapshot);
+ protected void commitSnapshot(final long sequenceNumber) {
+ super.commitSnapshot(sequenceNumber);
latch.get().countDown();
}
};
};
TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
- Props.create(new DelegatingShardCreator(creator)), "testCreateSnapshot");
+ Props.create(new DelegatingShardCreator(creator)), shardActorName);
waitUntilLeader(shard);
shard.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
commitTransaction(putTransaction);
- NormalizedNode expected = readStore(store);
+ NormalizedNode<?, ?> expected = readStore(store);
DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
commitTransaction(writeTransaction);
- NormalizedNode actual = readStore(store);
+ NormalizedNode<?, ?> actual = readStore(store);
assertEquals(expected, actual);
}
- private NormalizedNode readStore(InMemoryDOMDataStore store) throws ReadFailedException {
+ @Test
+ public void testRecoveryApplicable(){
+
+ final DatastoreContext persistentContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(true).build();
+
+ final Props persistentProps = Shard.props(shardID, Collections.<ShardIdentifier, String>emptyMap(),
+ persistentContext, SCHEMA_CONTEXT);
+
+ final DatastoreContext nonPersistentContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(false).build();
+
+ final Props nonPersistentProps = Shard.props(shardID, Collections.<ShardIdentifier, String>emptyMap(),
+ nonPersistentContext, SCHEMA_CONTEXT);
+
+ new ShardTestKit(getSystem()) {{
+ TestActorRef<Shard> shard1 = TestActorRef.create(getSystem(),
+ persistentProps, "testPersistence1");
+
+ assertTrue("Recovery Applicable", shard1.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+
+ shard1.tell(PoisonPill.getInstance(), ActorRef.noSender());
+
+ TestActorRef<Shard> shard2 = TestActorRef.create(getSystem(),
+ nonPersistentProps, "testPersistence2");
+
+ assertFalse("Recovery Not Applicable", shard2.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+
+ shard2.tell(PoisonPill.getInstance(), ActorRef.noSender());
+
+ }};
+
+ }
+
+
+ private NormalizedNode<?, ?> readStore(final InMemoryDOMDataStore store) throws ReadFailedException {
DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
transaction.read(YangInstanceIdentifier.builder().build());
return normalizedNode;
}
- private void commitTransaction(DOMStoreWriteTransaction transaction) {
+ private void commitTransaction(final DOMStoreWriteTransaction transaction) {
DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
ListenableFuture<Void> future =
commitCohort.preCommit();
return new AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>() {
@Override
public void onDataChanged(
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
+ final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
}
};
}
- private NormalizedNode<?,?> readStore(TestActorRef<Shard> shard, YangInstanceIdentifier id)
+ static NormalizedNode<?,?> readStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id)
throws ExecutionException, InterruptedException {
DOMStoreReadTransaction transaction = shard.underlyingActor().getDataStore().newReadOnlyTransaction();
return node;
}
- private void writeToStore(TestActorRef<Shard> shard, YangInstanceIdentifier id, NormalizedNode<?,?> node)
+ private void writeToStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id, final NormalizedNode<?,?> node)
throws ExecutionException, InterruptedException {
DOMStoreWriteTransaction transaction = shard.underlyingActor().getDataStore().newWriteOnlyTransaction();
commitCohort.commit().get();
}
+ @SuppressWarnings("serial")
private static final class DelegatingShardCreator implements Creator<Shard> {
private final Creator<Shard> delegate;
- DelegatingShardCreator(Creator<Shard> delegate) {
+ DelegatingShardCreator(final Creator<Shard> delegate) {
this.delegate = delegate;
}
*/
package org.opendaylight.controller.cluster.datastore;
-import java.util.concurrent.TimeUnit;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.pattern.Patterns;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.common.util.concurrent.Uninterruptibles;
import org.junit.Assert;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import com.google.common.util.concurrent.Uninterruptibles;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.pattern.Patterns;
-import akka.testkit.JavaTestKit;
-import akka.util.Timeout;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
class ShardTestKit extends JavaTestKit {
super(actorSystem);
}
- protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
+ protected void waitForLogMessage(final Class<?> logLevel, ActorRef subject, String logMessage){
// Wait for a specific log message to show up
final boolean result =
new JavaTestKit.EventFilter<Boolean>(logLevel
}
protected void waitUntilLeader(ActorRef shard) {
+ FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
for(int i = 0; i < 20 * 5; i++) {
- Future<Object> future = Patterns.ask(shard, new FindLeader(), new Timeout(5, TimeUnit.SECONDS));
+ Future<Object> future = Patterns.ask(shard, new FindLeader(), new Timeout(duration));
try {
- FindLeaderReply resp = (FindLeaderReply)Await.result(future, Duration.create(5, TimeUnit.SECONDS));
+ FindLeaderReply resp = (FindLeaderReply)Await.result(future, duration);
if(resp.getLeaderActor() != null) {
return;
}
- } catch (Exception e) {
+ } catch(TimeoutException e) {
+ } catch(Exception e) {
+ System.err.println("FindLeader threw ex");
e.printStackTrace();
}
+
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
Assert.fail("Leader not found for shard " + shard.path());
}
+
}
\ No newline at end of file
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.pattern.AskTimeoutException;
import akka.testkit.TestActorRef;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
-import java.util.Collections;
-import java.util.concurrent.TimeUnit;
/**
* Covers negative test cases
}
private ActorRef createShard(){
- return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext,
+ return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.<ShardIdentifier, String>emptyMap(), datastoreContext,
TestModel.createTestContext()));
}
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeWriteWithTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeReadWriteWithTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
.serialize(Builders.containerBuilder().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).build());
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeMergeTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props, "testNegativeMergeTransactionReady");
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeDeleteDataWhenTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Collections;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.WriteData;
+import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.duration.FiniteDuration;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.dispatch.Dispatchers;
+import akka.testkit.TestActorRef;
+
+/**
+ * Tests backwards compatibility support from Helium-1 to Helium.
+ *
+ * In Helium-1, the 3-phase commit support was moved from the ThreePhaseCommitCohort actor to the
+ * Shard. As a consequence, a new transactionId field was added to the CanCommitTransaction,
+ * CommitTransaction and AbortTransaction messages. With a base Helium version node, these messages
+ * would be sans transactionId so this test verifies the Shard handles that properly.
+ *
+ * @author Thomas Pantelis
+ */
+public class ShardTransactionHeliumBackwardsCompatibilityTest extends AbstractActorTest {
+
+ @Test
+ public void testTransactionCommit() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ SchemaContext schemaContext = TestModel.createTestContext();
+ Props shardProps = Shard.props(ShardIdentifier.builder().memberName("member-1").
+ shardName("inventory").type("config").build(),
+ Collections.<ShardIdentifier,String>emptyMap(),
+ DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).build(),
+ schemaContext).withDispatcher(Dispatchers.DefaultDispatcherId());
+
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(), shardProps,
+ "testTransactionCommit");
+
+ waitUntilLeader(shard);
+
+ // Send CreateTransaction message with no messages version
+
+ String transactionID = "txn-1";
+ shard.tell(ShardTransactionMessages.CreateTransaction.newBuilder()
+ .setTransactionId(transactionID)
+ .setTransactionType(TransactionProxy.TransactionType.WRITE_ONLY.ordinal())
+ .setTransactionChainId("").build(), getRef());
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ CreateTransactionReply reply = expectMsgClass(duration, CreateTransactionReply.class);
+
+ ActorSelection txActor = getSystem().actorSelection(reply.getTransactionActorPath());
+
+ // Write data to the Tx
+
+ txActor.tell(new WriteData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), schemaContext), getRef());
+
+ expectMsgClass(duration, WriteDataReply.class);
+
+ // Ready the Tx
+
+ txActor.tell(new ReadyTransaction().toSerializable(), getRef());
+
+ ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(expectMsgClass(
+ duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
+
+ ActorSelection cohortActor = getSystem().actorSelection(readyReply.getCohortPath());
+
+ // Send the CanCommitTransaction message with no transactionId.
+
+ cohortActor.tell(ThreePhaseCommitCohortMessages.CanCommitTransaction.newBuilder().build(),
+ getRef());
+
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the PreCommitTransaction message with no transactionId.
+
+ cohortActor.tell(ThreePhaseCommitCohortMessages.PreCommitTransaction.newBuilder().build(),
+ getRef());
+
+ expectMsgClass(duration, PreCommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the CommitTransaction message with no transactionId.
+
+ cohortActor.tell(ThreePhaseCommitCohortMessages.CommitTransaction.newBuilder().build(),
+ getRef());
+
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ NormalizedNode<?, ?> node = ShardTest.readStore(shard, TestModel.TEST_PATH);
+ Assert.assertNotNull("Data not found in store", node);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testTransactionAbort() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ SchemaContext schemaContext = TestModel.createTestContext();
+ Props shardProps = Shard.props(ShardIdentifier.builder().memberName("member-1").
+ shardName("inventory").type("config").build(),
+ Collections.<ShardIdentifier,String>emptyMap(),
+ DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).build(),
+ schemaContext).withDispatcher(Dispatchers.DefaultDispatcherId());
+
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(), shardProps,
+ "testTransactionAbort");
+
+ waitUntilLeader(shard);
+
+ // Send CreateTransaction message with no messages version
+
+ String transactionID = "txn-1";
+ shard.tell(ShardTransactionMessages.CreateTransaction.newBuilder()
+ .setTransactionId(transactionID)
+ .setTransactionType(TransactionProxy.TransactionType.WRITE_ONLY.ordinal())
+ .setTransactionChainId("").build(), getRef());
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ CreateTransactionReply reply = expectMsgClass(duration, CreateTransactionReply.class);
+
+ ActorSelection txActor = getSystem().actorSelection(reply.getTransactionActorPath());
+
+ // Write data to the Tx
+
+ txActor.tell(new WriteData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), schemaContext), getRef());
+
+ expectMsgClass(duration, WriteDataReply.class);
+
+ // Ready the Tx
+
+ txActor.tell(new ReadyTransaction().toSerializable(), getRef());
+
+ ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(expectMsgClass(
+ duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
+
+ ActorSelection cohortActor = getSystem().actorSelection(readyReply.getCohortPath());
+
+ // Send the CanCommitTransaction message with no transactionId.
+
+ cohortActor.tell(ThreePhaseCommitCohortMessages.CanCommitTransaction.newBuilder().build(),
+ getRef());
+
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the AbortTransaction message with no transactionId.
+
+ cohortActor.tell(ThreePhaseCommitCohortMessages.AbortTransaction.newBuilder().build(),
+ getRef());
+
+ expectMsgClass(duration, AbortTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+}
package org.opendaylight.controller.cluster.datastore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.Terminated;
import akka.testkit.TestActorRef;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.ShardWriteTransaction.GetCompositeModificationReply;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
-import java.util.Collections;
-import java.util.concurrent.TimeUnit;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
public class ShardTransactionTest extends AbstractActorTest {
private static ListeningExecutorService storeExecutor =
private ActorRef createShard(){
return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext()));
+ Collections.<ShardIdentifier, String>emptyMap(), datastoreContext, TestModel.createTestContext()));
}
@Test
public void testOnReceiveReadData() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject = getSystem().actorOf(props, "testReadData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- if (ReadDataReply.fromSerializable(testSchemaContext,YangInstanceIdentifier.builder().build(), in)
- .getNormalizedNode()!= null) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveReadData(getSystem().actorOf(props, "testReadDataRO"));
+
+ props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveReadData(getSystem().actorOf(props, "testReadDataRW"));
+ }
+
+ private void testOnReceiveReadData(final ActorRef transaction) {
+ //serialized read
+ transaction.tell(new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(),
+ getRef());
+
+ ShardTransactionMessages.ReadDataReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ReadDataReply.SERIALIZABLE_CLASS);
+
+ assertNotNull(ReadDataReply.fromSerializable(
+ testSchemaContext,YangInstanceIdentifier.builder().build(), replySerialized)
+ .getNormalizedNode());
+
+ // unserialized read
+ transaction.tell(new ReadData(YangInstanceIdentifier.builder().build()),getRef());
+
+ ReadDataReply reply = expectMsgClass(duration("5 seconds"), ReadDataReply.class);
+
+ assertNotNull(reply.getNormalizedNode());
}};
}
public void testOnReceiveReadDataWhenDataNotFound() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props( store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject = getSystem().actorOf(props, "testReadDataWhenDataNotFound");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new ReadData(TestModel.TEST_PATH).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- if (ReadDataReply.fromSerializable(testSchemaContext,TestModel.TEST_PATH, in)
- .getNormalizedNode()
- == null) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props( store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveReadDataWhenDataNotFound(getSystem().actorOf(
+ props, "testReadDataWhenDataNotFoundRO"));
+
+ props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveReadDataWhenDataNotFound(getSystem().actorOf(
+ props, "testReadDataWhenDataNotFoundRW"));
+ }
+
+ private void testOnReceiveReadDataWhenDataNotFound(final ActorRef transaction) {
+ // serialized read
+ transaction.tell(new ReadData(TestModel.TEST_PATH).toSerializable(), getRef());
+
+ ShardTransactionMessages.ReadDataReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ReadDataReply.SERIALIZABLE_CLASS);
+
+ assertTrue(ReadDataReply.fromSerializable(
+ testSchemaContext, TestModel.TEST_PATH, replySerialized).getNormalizedNode() == null);
+
+ // unserialized read
+ transaction.tell(new ReadData(TestModel.TEST_PATH),getRef());
+
+ ReadDataReply reply = expectMsgClass(duration("5 seconds"), ReadDataReply.class);
+
+ assertTrue(reply.getNormalizedNode() == null);
}};
}
public void testOnReceiveDataExistsPositive() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject = getSystem().actorOf(props, "testDataExistsPositive");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new DataExists(YangInstanceIdentifier.builder().build()).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
- if (DataExistsReply.fromSerializable(in)
- .exists()) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveDataExistsPositive(getSystem().actorOf(props, "testDataExistsPositiveRO"));
+
+ props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveDataExistsPositive(getSystem().actorOf(props, "testDataExistsPositiveRW"));
+ }
+
+ private void testOnReceiveDataExistsPositive(final ActorRef transaction) {
+ transaction.tell(new DataExists(YangInstanceIdentifier.builder().build()).toSerializable(),
+ getRef());
+
+ ShardTransactionMessages.DataExistsReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DataExistsReply.class);
+
+ assertTrue(DataExistsReply.fromSerializable(replySerialized).exists());
+
+ // unserialized read
+ transaction.tell(new DataExists(YangInstanceIdentifier.builder().build()),getRef());
+
+ DataExistsReply reply = expectMsgClass(duration("5 seconds"), DataExistsReply.class);
+
+ assertTrue(reply.exists());
}};
}
public void testOnReceiveDataExistsNegative() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject = getSystem().actorOf(props, "testDataExistsNegative");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new DataExists(TestModel.TEST_PATH).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
- if (!DataExistsReply.fromSerializable(in)
- .exists()) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveDataExistsNegative(getSystem().actorOf(props, "testDataExistsNegativeRO"));
+
+ props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+
+ testOnReceiveDataExistsNegative(getSystem().actorOf(props, "testDataExistsNegativeRW"));
+ }
+
+ private void testOnReceiveDataExistsNegative(final ActorRef transaction) {
+ transaction.tell(new DataExists(TestModel.TEST_PATH).toSerializable(), getRef());
+
+ ShardTransactionMessages.DataExistsReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DataExistsReply.class);
+
+ assertFalse(DataExistsReply.fromSerializable(replySerialized).exists());
+
+ // unserialized read
+ transaction.tell(new DataExists(TestModel.TEST_PATH),getRef());
+
+ DataExistsReply reply = expectMsgClass(duration("5 seconds"), DataExistsReply.class);
+
+ assertFalse(reply.exists());
}};
}
private void assertModification(final ActorRef subject,
final Class<? extends Modification> modificationType) {
new JavaTestKit(getSystem()) {{
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
- subject
- .tell(new ShardWriteTransaction.GetCompositedModification(),
- getRef());
-
- final CompositeModification compositeModification =
- new ExpectMsg<CompositeModification>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected CompositeModification match(Object in) {
- if (in instanceof ShardWriteTransaction.GetCompositeModificationReply) {
- return ((ShardWriteTransaction.GetCompositeModificationReply) in)
- .getModification();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertTrue(
- compositeModification.getModifications().size() == 1);
- assertEquals(modificationType,
- compositeModification.getModifications().get(0)
- .getClass());
-
- }
- };
+ subject.tell(new ShardWriteTransaction.GetCompositedModification(), getRef());
+
+ CompositeModification compositeModification = expectMsgClass(duration("3 seconds"),
+ GetCompositeModificationReply.class).getModification();
+
+ assertTrue(compositeModification.getModifications().size() == 1);
+ assertEquals(modificationType, compositeModification.getModifications().get(0).getClass());
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject =
- getSystem().actorOf(props, "testWriteData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(WriteDataReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- assertModification(subject, WriteModification.class);
- expectNoMsg();
- }
-
-
- };
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction = getSystem().actorOf(props, "testWriteData");
+
+ transaction.tell(new WriteData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()).toSerializable(),
+ getRef());
+
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.WriteDataReply.class);
+
+ assertModification(transaction, WriteModification.class);
+
+ //unserialized write
+ transaction.tell(new WriteData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ TestModel.createTestContext()),
+ getRef());
+
+ expectMsgClass(duration("5 seconds"), WriteDataReply.class);
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject =
- getSystem().actorOf(props, "testMergeData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new MergeData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), testSchemaContext).toSerializable(),
- getRef());
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction = getSystem().actorOf(props, "testMergeData");
- final String out = new ExpectMsg<String>(duration("500 milliseconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(MergeDataReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ transaction.tell(new MergeData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), testSchemaContext).toSerializable(),
+ getRef());
- assertEquals("match", out);
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.MergeDataReply.class);
- assertModification(subject, MergeModification.class);
+ assertModification(transaction, MergeModification.class);
- expectNoMsg();
- }
+ //unserialized merge
+ transaction.tell(new MergeData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), testSchemaContext),
+ getRef());
-
- };
+ expectMsgClass(duration("5 seconds"), MergeDataReply.class);
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject =
- getSystem().actorOf(props, "testDeleteData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction = getSystem().actorOf(props, "testDeleteData");
- subject.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(), getRef());
+ transaction.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(), getRef());
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(DeleteDataReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DeleteDataReply.class);
- assertEquals("match", out);
+ assertModification(transaction, DeleteModification.class);
- assertModification(subject, DeleteModification.class);
- expectNoMsg();
- }
+ //unserialized merge
+ transaction.tell(new DeleteData(TestModel.TEST_PATH), getRef());
-
- };
+ expectMsgClass(duration("5 seconds"), DeleteDataReply.class);
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject =
- getSystem().actorOf(props, "testReadyTransaction");
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction = getSystem().actorOf(props, "testReadyTransaction");
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ watch(transaction);
- subject.tell(new ReadyTransaction().toSerializable(), getRef());
+ transaction.tell(new ReadyTransaction().toSerializable(), getRef());
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
+ Terminated.class);
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
+ Terminated.class);
+ }};
- assertEquals("match", out);
+ // test
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shard = createShard();
+ final Props props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction = getSystem().actorOf(props, "testReadyTransaction2");
- expectNoMsg();
- }
+ watch(transaction);
+ transaction.tell(new ReadyTransaction(), getRef());
- };
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
+ Terminated.class);
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
+ Terminated.class);
}};
}
+ @SuppressWarnings("unchecked")
@Test
public void testOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject =
- getSystem().actorOf(props, "testCloseTransaction");
-
- watch(subject);
-
- new Within(duration("6 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new CloseTransaction().toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- System.out.println("!!!IN match 1: "+(in!=null?in.getClass():"NULL"));
- if (in.getClass().equals(CloseTransactionReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- final String termination = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- System.out.println("!!!IN match 2: "+(in!=null?in.getClass():"NULL"));
- if (in instanceof Terminated) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", termination);
- }
- };
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction = getSystem().actorOf(props, "testCloseTransaction");
+
+ watch(transaction);
+
+ transaction.tell(new CloseTransaction().toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), CloseTransactionReply.SERIALIZABLE_CLASS);
+ expectTerminated(duration("3 seconds"), transaction);
}};
}
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final TestActorRef subject = TestActorRef.apply(props,getSystem());
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
- subject.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(), ActorRef.noSender());
+ transaction.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(), ActorRef.noSender());
}
@Test
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats, "txn");
- final ActorRef subject =
+ testSchemaContext, datastoreContext, shardStats, "txn",
+ CreateTransaction.CURRENT_VERSION);
+ final ActorRef transaction =
getSystem().actorOf(props, "testShardTransactionInactivity");
- watch(subject);
-
- // The shard Tx actor should receive a ReceiveTimeout message and self-destruct.
-
- final String termination = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof Terminated) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ watch(transaction);
- assertEquals("match", termination);
+ expectMsgClass(duration("3 seconds"), Terminated.class);
}};
}
}
package org.opendaylight.controller.cluster.datastore;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
public class TransactionChainProxyTest {
ActorContext actorContext = mock(ActorContext.class);
SchemaContext schemaContext = mock(SchemaContext.class);
verify(context, times(1)).broadcast(anyObject());
}
+
+ @Test
+ public void testTransactionChainsHaveUniqueId(){
+ TransactionChainProxy one = new TransactionChainProxy(mock(ActorContext.class));
+ TransactionChainProxy two = new TransactionChainProxy(mock(ActorContext.class));
+
+ Assert.assertNotEquals(one.getTransactionChainId(), two.getTransactionChainId());
+ }
}
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.util.concurrent.CheckedFuture;
-
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.dispatch.Futures;
+import akka.testkit.JavaTestKit;
import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import org.junit.AfterClass;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mock;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
-
+import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
-
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.isA;
import static org.mockito.Mockito.times;
import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.WRITE_ONLY;
@SuppressWarnings("resource")
-public class TransactionProxyTest extends AbstractActorTest {
+public class TransactionProxyTest {
@SuppressWarnings("serial")
static class TestException extends RuntimeException {
CheckedFuture<?, ReadFailedException> invoke(TransactionProxy proxy) throws Exception;
}
+ private static ActorSystem system;
+
private final Configuration configuration = new MockConfiguration();
@Mock
private SchemaContext schemaContext;
+ @Mock
+ private ClusterWrapper mockClusterWrapper;
+
String memberName = "mock-member";
+ @BeforeClass
+ public static void setUpClass() throws IOException {
+
+ Config config = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder().
+ put("akka.actor.default-dispatcher.type",
+ "akka.testkit.CallingThreadDispatcherConfigurator").build()).
+ withFallback(ConfigFactory.load());
+ system = ActorSystem.create("test", config);
+ }
+
+ @AfterClass
+ public static void tearDownClass() throws IOException {
+ JavaTestKit.shutdownActorSystem(system);
+ system = null;
+ }
+
@Before
public void setUp(){
MockitoAnnotations.initMocks(this);
schemaContext = TestModel.createTestContext();
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().build();
+
doReturn(getSystem()).when(mockActorContext).getActorSystem();
doReturn(memberName).when(mockActorContext).getCurrentMemberName();
doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
+ doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
+ doReturn(dataStoreContext).when(mockActorContext).getDatastoreContext();
ShardStrategyFactory.setConfiguration(configuration);
}
+ private ActorSystem getSystem() {
+ return system;
+ }
+
private CreateTransaction eqCreateTransaction(final String memberName,
final TransactionType type) {
ArgumentMatcher<CreateTransaction> matcher = new ArgumentMatcher<CreateTransaction>() {
return argThat(matcher);
}
- private DataExists eqDataExists() {
+ private DataExists eqSerializedDataExists() {
ArgumentMatcher<DataExists> matcher = new ArgumentMatcher<DataExists>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private ReadData eqReadData() {
+ private DataExists eqDataExists() {
+ ArgumentMatcher<DataExists> matcher = new ArgumentMatcher<DataExists>() {
+ @Override
+ public boolean matches(Object argument) {
+ return (argument instanceof DataExists) &&
+ ((DataExists)argument).getPath().equals(TestModel.TEST_PATH);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private ReadData eqSerializedReadData() {
ArgumentMatcher<ReadData> matcher = new ArgumentMatcher<ReadData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private WriteData eqWriteData(final NormalizedNode<?, ?> nodeToWrite) {
+ private ReadData eqReadData() {
+ ArgumentMatcher<ReadData> matcher = new ArgumentMatcher<ReadData>() {
+ @Override
+ public boolean matches(Object argument) {
+ return (argument instanceof ReadData) &&
+ ((ReadData)argument).getPath().equals(TestModel.TEST_PATH);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private WriteData eqSerializedWriteData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private MergeData eqMergeData(final NormalizedNode<?, ?> nodeToWrite) {
+ private WriteData eqWriteData(final NormalizedNode<?, ?> nodeToWrite) {
+ ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
+ @Override
+ public boolean matches(Object argument) {
+ if(argument instanceof WriteData) {
+ WriteData obj = (WriteData) argument;
+ return obj.getPath().equals(TestModel.TEST_PATH) &&
+ obj.getData().equals(nodeToWrite);
+ }
+ return false;
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private MergeData eqSerializedMergeData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private DeleteData eqDeleteData() {
+ private MergeData eqMergeData(final NormalizedNode<?, ?> nodeToWrite) {
+ ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
+ @Override
+ public boolean matches(Object argument) {
+ if(argument instanceof MergeData) {
+ MergeData obj = ((MergeData) argument);
+ return obj.getPath().equals(TestModel.TEST_PATH) &&
+ obj.getData().equals(nodeToWrite);
+ }
+
+ return false;
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private DeleteData eqSerializedDeleteData() {
ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private Future<Object> readyTxReply(String path) {
+ private DeleteData eqDeleteData() {
+ ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
+ @Override
+ public boolean matches(Object argument) {
+ return argument instanceof DeleteData &&
+ ((DeleteData)argument).getPath().equals(TestModel.TEST_PATH);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private Future<Object> readySerializedTxReply(String path) {
return Futures.successful((Object)new ReadyTransactionReply(path).toSerializable());
}
- private Future<Object> readDataReply(NormalizedNode<?, ?> data) {
+ private Future<Object> readyTxReply(String path) {
+ return Futures.successful((Object)new ReadyTransactionReply(path));
+ }
+
+
+ private Future<Object> readSerializedDataReply(NormalizedNode<?, ?> data) {
return Futures.successful(new ReadDataReply(schemaContext, data).toSerializable());
}
- private Future<Object> dataExistsReply(boolean exists) {
+ private Future<ReadDataReply> readDataReply(NormalizedNode<?, ?> data) {
+ return Futures.successful(new ReadDataReply(schemaContext, data));
+ }
+
+ private Future<Object> dataExistsSerializedReply(boolean exists) {
return Futures.successful(new DataExistsReply(exists).toSerializable());
}
- private Future<Object> writeDataReply() {
+ private Future<DataExistsReply> dataExistsReply(boolean exists) {
+ return Futures.successful(new DataExistsReply(exists));
+ }
+
+ private Future<Object> writeSerializedDataReply() {
return Futures.successful(new WriteDataReply().toSerializable());
}
- private Future<Object> mergeDataReply() {
+ private Future<WriteDataReply> writeDataReply() {
+ return Futures.successful(new WriteDataReply());
+ }
+
+ private Future<Object> mergeSerializedDataReply() {
return Futures.successful(new MergeDataReply().toSerializable());
}
- private Future<Object> deleteDataReply() {
+ private Future<MergeDataReply> mergeDataReply() {
+ return Futures.successful(new MergeDataReply());
+ }
+
+ private Future<Object> deleteSerializedDataReply() {
return Futures.successful(new DeleteDataReply().toSerializable());
}
+ private Future<DeleteDataReply> deleteDataReply() {
+ return Futures.successful(new DeleteDataReply());
+ }
+
private ActorSelection actorSelection(ActorRef actorRef) {
return getSystem().actorSelection(actorRef.path());
}
- private CreateTransactionReply createTransactionReply(ActorRef actorRef){
+ private CreateTransactionReply createTransactionReply(ActorRef actorRef, int transactionVersion){
return CreateTransactionReply.newBuilder()
.setTransactionActorPath(actorRef.path().toString())
- .setTransactionId("txn-1").build();
+ .setTransactionId("txn-1")
+ .setMessageVersion(transactionVersion)
+ .build();
}
- private ActorRef setupActorContextWithInitialCreateTransaction(TransactionType type) {
- ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
- doReturn(getSystem().actorSelection(actorRef.path())).
+ private ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type, int transactionVersion) {
+ ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+ doReturn(actorSystem.actorSelection(actorRef.path())).
when(mockActorContext).actorSelection(actorRef.path().toString());
- doReturn(Optional.of(getSystem().actorSelection(actorRef.path()))).
- when(mockActorContext).findPrimaryShard(eq(DefaultShardStrategy.DEFAULT_SHARD));
+ doReturn(Futures.successful(actorSystem.actorSelection(actorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
- doReturn(createTransactionReply(actorRef)).when(mockActorContext).
- executeOperation(eq(getSystem().actorSelection(actorRef.path())),
+ doReturn(Futures.successful(createTransactionReply(actorRef, transactionVersion))).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(actorRef.path())),
eqCreateTransaction(memberName, type));
+
+ doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
+
return actorRef;
}
+ private ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type) {
+ return setupActorContextWithInitialCreateTransaction(actorSystem, type, CreateTransaction.CURRENT_VERSION);
+ }
+
+
private void propagateReadFailedExceptionCause(CheckedFuture<?, ReadFailedException> future)
throws Throwable {
@Test
public void testRead() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
@Test(expected = ReadFailedException.class)
public void testReadWithInvalidReplyMessageType() throws Exception {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
@Test(expected = TestException.class)
public void testReadWithAsyncRemoteOperatonFailure() throws Throwable {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
if (exToThrow instanceof PrimaryNotFoundException) {
- doReturn(Optional.absent()).when(mockActorContext).findPrimaryShard(anyString());
+ doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
} else {
- doReturn(Optional.of(getSystem().actorSelection(actorRef.path()))).
- when(mockActorContext).findPrimaryShard(anyString());
+ doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(anyString());
}
- doThrow(exToThrow).when(mockActorContext).executeOperation(any(ActorSelection.class), any());
+
+ doReturn(Futures.failed(exToThrow)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), any());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
@Test(expected = TestException.class)
public void testReadWithPriorRecordingOperationFailure() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqDeleteData());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
} finally {
verify(mockActorContext, times(0)).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ eq(actorSelection(actorRef)), eqSerializedReadData());
}
}
@Test
public void testReadWithPriorRecordingOperationSuccessful() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(expectedNode));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(expectedNode));
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
}
+ @Test(expected=IllegalArgumentException.class)
+ public void testInvalidCreateTransactionReply() throws Throwable {
+ ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext).
+ actorSelection(actorRef.path().toString());
+
+ doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
+ eq(getSystem().actorSelection(actorRef.path())), eqCreateTransaction(memberName, READ_ONLY));
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+
+ propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
+ }
+
@Test
public void testExists() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
- doReturn(dataExistsReply(false)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
assertEquals("Exists response", false, exists);
- doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
@Test(expected = ReadFailedException.class)
public void testExistsWithInvalidReplyMessageType() throws Exception {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
@Test(expected = TestException.class)
public void testExistsWithAsyncRemoteOperatonFailure() throws Throwable {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
@Test(expected = TestException.class)
public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqDeleteData());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
- doReturn(dataExistsReply(false)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
} finally {
verify(mockActorContext, times(0)).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
}
}
@Test
public void testExistsWithPriorRecordingOperationSuccessful() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
- doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
@Test
public void testWrite() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
WriteDataReply.SERIALIZABLE_CLASS);
@Test
public void testMerge() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
MergeDataReply.SERIALIZABLE_CLASS);
@Test
public void testDelete() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
- doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDeleteData());
+ doReturn(deleteSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDeleteData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
transactionProxy.delete(TestModel.TEST_PATH);
verify(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqDeleteData());
+ eq(actorSelection(actorRef)), eqSerializedDeleteData());
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
DeleteDataReply.SERIALIZABLE_CLASS);
Object expReply = expReplies[i++];
if(expReply instanceof ActorSelection) {
ActorSelection actual = Await.result(future, Duration.create(5, TimeUnit.SECONDS));
- assertEquals("Cohort actor path", (ActorSelection) expReply, actual);
+ assertEquals("Cohort actor path", expReply, actual);
} else {
// Expecting exception.
try {
@SuppressWarnings("unchecked")
@Test
public void testReady() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
- doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
}
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testReadyForwardCompatibility() throws Exception {
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE, 0);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
+
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
+
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+
+ doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(eq(actorRef.path().toString()),
+ eq(actorRef.path().toString()));
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
+ READ_WRITE);
+
+ transactionProxy.read(TestModel.TEST_PATH);
+
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+
+ ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ WriteDataReply.SERIALIZABLE_CLASS);
+
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
+
+ verify(mockActorContext).resolvePath(eq(actorRef.path().toString()),
+ eq(actorRef.path().toString()));
+ }
+
@SuppressWarnings("unchecked")
@Test
public void testReadyWithRecordingOperationFailure() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeOperationAsync(eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
- doReturn(readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ doReturn(false).when(mockActorContext).isPathLocal(actorRef.path().toString());
+
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ verifyCohortFutures(proxy, TestException.class);
+
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
MergeDataReply.SERIALIZABLE_CLASS, TestException.class);
-
- verifyCohortFutures(proxy, TestException.class);
}
@SuppressWarnings("unchecked")
@Test
public void testReadyWithReplyFailure() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)),
@Test
public void testReadyWithInitialCreateTransactionFailure() throws Exception {
- doReturn(Optional.absent()).when(mockActorContext).findPrimaryShard(anyString());
-// doThrow(new PrimaryNotFoundException("mock")).when(mockActorContext).executeShardOperation(
-// anyString(), any());
+ doReturn(Futures.failed(new PrimaryNotFoundException("mock"))).when(
+ mockActorContext).findPrimaryShardAsync(anyString());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
@SuppressWarnings("unchecked")
@Test
public void testReadyWithInvalidReplyMessageType() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
doReturn(Futures.successful(new Object())).when(mockActorContext).
executeOperationAsync(eq(actorSelection(actorRef)),
@Test
public void testGetIdentifier() {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
TransactionProxy.TransactionType.READ_ONLY);
@SuppressWarnings("unchecked")
@Test
public void testClose() throws Exception{
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
- doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
verify(mockActorContext).sendOperationAsync(
eq(actorSelection(actorRef)), isA(CloseTransaction.SERIALIZABLE_CLASS));
}
+
+
+ /**
+ * Method to test a local Tx actor. The Tx paths are matched to decide if the
+ * Tx actor is local or not. This is done by mocking the Tx actor path
+ * and the caller paths and ensuring that the paths have the remote-address format
+ *
+ * Note: Since the default akka provider for test is not a RemoteActorRefProvider,
+ * the paths returned for the actors for all the tests are not qualified remote paths.
+ * Hence are treated as non-local/remote actors. In short, all tests except
+ * few below run for remote actors
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testLocalTxActorRead() throws Exception {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
+ .setTransactionId("txn-1")
+ .setTransactionActorPath(actorPath)
+ .build();
+
+ doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, READ_ONLY));
+
+ doReturn(true).when(mockActorContext).isPathLocal(actorPath);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,READ_ONLY);
+
+ // negative test case with null as the reply
+ doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqReadData());
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
+ TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", false, readOptional.isPresent());
+
+ // test case with node as read data reply
+ NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqReadData());
+
+ readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+
+ assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
+
+ // test for local data exists
+ doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqDataExists());
+
+ boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
+
+ assertEquals("Exists response", true, exists);
+ }
+
+ @Test
+ public void testLocalTxActorWrite() throws Exception {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
+ .setTransactionId("txn-1")
+ .setTransactionActorPath(actorPath)
+ .build();
+
+ doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, WRITE_ONLY));
+
+ doReturn(true).when(mockActorContext).isPathLocal(actorPath);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqWriteData(nodeToWrite));
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+
+ verify(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqWriteData(nodeToWrite));
+
+ //testing local merge
+ doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqMergeData(nodeToWrite));
+
+ transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+
+ verify(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqMergeData(nodeToWrite));
+
+
+ //testing local delete
+ doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqDeleteData());
+
+ transactionProxy.delete(TestModel.TEST_PATH);
+
+ verify(mockActorContext).executeOperationAsync(any(ActorSelection.class), eqDeleteData());
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ WriteDataReply.class, MergeDataReply.class, DeleteDataReply.class);
+
+ // testing ready
+ doReturn(readyTxReply(shardActorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(ReadyTransaction.class));
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+
+ ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+
+ verifyCohortFutures(proxy, getSystem().actorSelection(shardActorRef.path()));
+ }
}
package org.opendaylight.controller.cluster.datastore.shardstrategy;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
package org.opendaylight.controller.cluster.datastore.shardstrategy;
+import static org.junit.Assert.assertEquals;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import static junit.framework.Assert.assertEquals;
-
public class ModuleShardStrategyTest {
@Rule
public ExpectedException expectedEx = ExpectedException.none();
package org.opendaylight.controller.cluster.datastore.shardstrategy;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
-
public class ShardStrategyFactoryTest {
@Rule
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
+import akka.actor.Address;
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
import com.google.common.base.Optional;
-
+import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
-import java.util.concurrent.TimeUnit;
-
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
@Test
public void testFindLocalShardWithShardNotFound(){
new JavaTestKit(getSystem()) {{
+ ActorRef shardManagerActorRef = getSystem()
+ .actorOf(MockShardManager.props(false, null));
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(false, null));
-
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
mock(Configuration.class));
- Optional<ActorRef> out = actorContext.findLocalShard("default");
- assertTrue(!out.isPresent());
- expectNoMsg();
- }
- };
+ Optional<ActorRef> out = actorContext.findLocalShard("default");
+ assertTrue(!out.isPresent());
}};
}
@Test
public void testExecuteRemoteOperation() {
new JavaTestKit(getSystem()) {{
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
-
- ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
+ ActorRef shardManagerActorRef = getSystem()
+ .actorOf(MockShardManager.props(true, shardActorRef));
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(true, shardActorRef));
-
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
mock(Configuration.class));
- ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
-
- Object out = actorContext.executeOperation(actor, "hello");
+ ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
- assertEquals("hello", out);
+ Object out = actorContext.executeOperation(actor, "hello");
- expectNoMsg();
- }
- };
+ assertEquals("hello", out);
}};
}
@Test
public void testExecuteRemoteOperationAsync() {
new JavaTestKit(getSystem()) {{
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ ActorRef shardManagerActorRef = getSystem()
+ .actorOf(MockShardManager.props(true, shardActorRef));
- ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
+ mock(Configuration.class));
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(true, shardActorRef));
+ ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
- mock(Configuration.class));
+ Future<Object> future = actorContext.executeOperationAsync(actor, "hello");
- ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
+ try {
+ Object result = Await.result(future, Duration.create(3, TimeUnit.SECONDS));
+ assertEquals("Result", "hello", result);
+ } catch(Exception e) {
+ throw new AssertionError(e);
+ }
+ }};
+ }
- Future<Object> future = actorContext.executeOperationAsync(actor, "hello");
+ @Test
+ public void testIsPathLocal() {
+ MockClusterWrapper clusterWrapper = new MockClusterWrapper();
+ ActorContext actorContext = null;
+
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(false, actorContext.isPathLocal(null));
+ assertEquals(false, actorContext.isPathLocal(""));
+
+ clusterWrapper.setSelfAddress(null);
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(false, actorContext.isPathLocal(""));
+
+ // even if the path is in local format, match the primary path (first 3 elements) and return true
+ clusterWrapper.setSelfAddress(new Address("akka", "test"));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(true, actorContext.isPathLocal("akka://test/user/$a"));
+
+ clusterWrapper.setSelfAddress(new Address("akka", "test"));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(true, actorContext.isPathLocal("akka://test/user/$a"));
+
+ clusterWrapper.setSelfAddress(new Address("akka", "test"));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(true, actorContext.isPathLocal("akka://test/user/token2/token3/$a"));
+
+ // self address of remote format,but Tx path local format.
+ clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(true, actorContext.isPathLocal(
+ "akka://system/user/shardmanager/shard/transaction"));
+
+ // self address of local format,but Tx path remote format.
+ clusterWrapper.setSelfAddress(new Address("akka.tcp", "system"));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(false, actorContext.isPathLocal(
+ "akka://system@127.0.0.1:2550/user/shardmanager/shard/transaction"));
+
+ //local path but not same
+ clusterWrapper.setSelfAddress(new Address("akka", "test"));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(true, actorContext.isPathLocal("akka://test1/user/$a"));
+
+ //ip and port same
+ clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(true, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2550/"));
+
+ // forward-slash missing in address
+ clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2550"));
+
+ //ips differ
+ clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.1.0.1:2550/"));
+
+ //ports differ
+ clusterWrapper.setSelfAddress(new Address("akka.tcp", "system", "127.0.0.1", 2550));
+ actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
+ assertEquals(false, actorContext.isPathLocal("akka.tcp://system@127.0.0.1:2551/"));
+ }
- try {
- Object result = Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- assertEquals("Result", "hello", result);
- } catch(Exception e) {
- throw new AssertionError(e);
- }
+ @Test
+ public void testResolvePathForRemoteActor() {
+ ActorContext actorContext =
+ new ActorContext(mock(ActorSystem.class), mock(ActorRef.class), mock(
+ ClusterWrapper.class),
+ mock(Configuration.class));
- expectNoMsg();
- }
- };
- }};
+ String actual = actorContext.resolvePath(
+ "akka.tcp://system@127.0.0.1:2550/user/shardmanager/shard",
+ "akka://system/user/shardmanager/shard/transaction");
+
+ String expected = "akka.tcp://system@127.0.0.1:2550/user/shardmanager/shard/transaction";
+
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testResolvePathForLocalActor() {
+ ActorContext actorContext =
+ new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class));
+
+ String actual = actorContext.resolvePath(
+ "akka://system/user/shardmanager/shard",
+ "akka://system/user/shardmanager/shard/transaction");
+
+ String expected = "akka://system/user/shardmanager/shard/transaction";
+
+ assertEquals(expected, actual);
}
+
+ @Test
+ public void testResolvePathForRemoteActorWithProperRemoteAddress() {
+ ActorContext actorContext =
+ new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+ mock(Configuration.class));
+
+ String actual = actorContext.resolvePath(
+ "akka.tcp://system@7.0.0.1:2550/user/shardmanager/shard",
+ "akka.tcp://system@7.0.0.1:2550/user/shardmanager/shard/transaction");
+
+ String expected = "akka.tcp://system@7.0.0.1:2550/user/shardmanager/shard/transaction";
+
+ assertEquals(expected, actual);
+ }
+
}
private static final Map<String, CountDownLatch> deleteMessagesCompleteLatches = new ConcurrentHashMap<>();
+ private static final Map<String, CountDownLatch> blockReadMessagesLatches = new ConcurrentHashMap<>();
+
public static void addEntry(String persistenceId, long sequenceNr, Object data) {
Map<Long, Object> journal = journals.get(persistenceId);
if(journal == null) {
deleteMessagesCompleteLatches.put(persistenceId, new CountDownLatch(1));
}
+ public static void addBlockReadMessagesLatch(String persistenceId, CountDownLatch latch) {
+ blockReadMessagesLatches.put(persistenceId, latch);
+ }
+
@Override
public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
return Futures.future(new Callable<Void>() {
@Override
public Void call() throws Exception {
+ CountDownLatch blockLatch = blockReadMessagesLatches.remove(persistenceId);
+ if(blockLatch != null) {
+ Uninterruptibles.awaitUninterruptibly(blockLatch);
+ }
+
Map<Long, Object> journal = journals.get(persistenceId);
if(journal == null) {
return null;
package org.opendaylight.controller.cluster.datastore.utils;
import akka.actor.ActorRef;
+import akka.actor.Address;
import akka.actor.AddressFromURIString;
import akka.cluster.ClusterEvent;
import akka.cluster.MemberStatus;
import akka.cluster.UniqueAddress;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import scala.collection.JavaConversions;
-
import java.util.HashSet;
import java.util.Set;
public class MockClusterWrapper implements ClusterWrapper{
- @Override public void subscribeToMemberEvents(ActorRef actorRef) {
+ private Address selfAddress = new Address("akka.tcp", "test", "127.0.0.1", 2550);
+
+ @Override
+ public void subscribeToMemberEvents(ActorRef actorRef) {
}
- @Override public String getCurrentMemberName() {
+ @Override
+ public String getCurrentMemberName() {
return "member-1";
}
+ @Override
+ public Address getSelfAddress() {
+ return selfAddress;
+ }
+
+ public void setSelfAddress(Address selfAddress) {
+ this.selfAddress = selfAddress;
+ }
+
public static void sendMemberUp(ActorRef to, String memberName, String address){
to.tell(createMemberUp(memberName, address), null);
}
package org.opendaylight.controller.cluster.datastore.utils;
import com.google.common.base.Optional;
-import org.opendaylight.controller.cluster.datastore.Configuration;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.opendaylight.controller.cluster.datastore.Configuration;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
public class MockConfiguration implements Configuration{
- @Override public List<String> getMemberShardNames(String memberName) {
+ @Override public List<String> getMemberShardNames(final String memberName) {
return Arrays.asList("default");
}
@Override public Optional<String> getModuleNameFromNameSpace(
- String nameSpace) {
+ final String nameSpace) {
return Optional.absent();
}
@Override
public Map<String, ShardStrategy> getModuleNameToShardStrategyMap() {
- return Collections.EMPTY_MAP;
+ return Collections.emptyMap();
}
@Override public List<String> getShardNamesFromModuleName(
- String moduleName) {
- return Collections.EMPTY_LIST;
+ final String moduleName) {
+ return Collections.emptyList();
}
- @Override public List<String> getMembersFromShardName(String shardName) {
+ @Override public List<String> getMembersFromShardName(final String shardName) {
if("default".equals(shardName)) {
return Arrays.asList("member-1", "member-2");
} else if("astronauts".equals(shardName)){
return Arrays.asList("member-2", "member-3");
}
- return Collections.EMPTY_LIST;
+ return Collections.emptyList();
}
@Override public Set<String> getAllShardNames() {
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * A mock DataChangeListener implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class MockDataChangeListener implements
+ AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> {
+
+ private final List<AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>> changeList =
+ Collections.synchronizedList(Lists.<AsyncDataChangeEvent<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>>newArrayList());
+
+ private volatile CountDownLatch changeLatch;
+ private int expChangeEventCount;
+
+ public MockDataChangeListener(int expChangeEventCount) {
+ reset(expChangeEventCount);
+ }
+
+ public void reset(int expChangeEventCount) {
+ changeLatch = new CountDownLatch(expChangeEventCount);
+ this.expChangeEventCount = expChangeEventCount;
+ changeList.clear();
+ }
+
+ @Override
+ public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
+ changeList.add(change);
+ changeLatch.countDown();
+ }
+
+ public void waitForChangeEvents(YangInstanceIdentifier... expPaths) {
+ boolean done = Uninterruptibles.awaitUninterruptibly(changeLatch, 5, TimeUnit.SECONDS);
+ if(!done) {
+ fail(String.format("Missing change notifications. Expected: %d. Actual: %d",
+ expChangeEventCount, (expChangeEventCount - changeLatch.getCount())));
+ }
+
+ for(int i = 0; i < expPaths.length; i++) {
+ assertTrue(String.format("Change %d does not contain %s", (i+1), expPaths[i]),
+ changeList.get(i).getCreatedData().containsKey(expPaths[i]));
+ }
+ }
+
+ public void expectNoMoreChanges(String assertMsg) {
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+ assertEquals(assertMsg, expChangeEventCount, changeList.size());
+ }
+}
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
-import junit.framework.Assert;
-
import java.util.List;
+import org.junit.Assert;
public class TestUtils {
- public static void assertFirstSentMessage(ActorSystem actorSystem, ActorRef actorRef, Class clazz){
+ public static void assertFirstSentMessage(final ActorSystem actorSystem, final ActorRef actorRef, final Class<?> clazz){
ActorContext testContext = new ActorContext(actorSystem, actorSystem.actorOf(
Props.create(DoNothingActor.class)), new MockClusterWrapper(), new MockConfiguration());
Object messages = testContext
Assert.assertTrue(messages instanceof List);
- List<Object> listMessages = (List<Object>) messages;
+ List<?> listMessages = (List<?>) messages;
Assert.assertEquals(1, listMessages.size());
public static final QName CAR_PRICE_QNAME = QName.create(CAR_QNAME, "price");
- public static NormalizedNode create(){
+ public static NormalizedNode<?, ?> create(){
// Create a list builder
CollectionNodeBuilder<MapEntryNode, MapNode> cars =
}
- public static NormalizedNode emptyContainer(){
+ public static NormalizedNode<?, ?> emptyContainer(){
return ImmutableContainerNodeBuilder.create()
.withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
.build();
- public static NormalizedNode create(){
+ public static NormalizedNode<?, ?> create(){
// Create a list builder
CollectionNodeBuilder<MapEntryNode, MapNode> cars =
}
- public static NormalizedNode emptyContainer(){
+ public static NormalizedNode<?, ?> emptyContainer(){
return ImmutableContainerNodeBuilder.create()
.withNodeIdentifier(
new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
package org.opendaylight.controller.md.cluster.datastore.model;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
/**
- * Data Broker which provides data transaction and data change listener fuctionality
+ * Data Broker which provides data transaction and data change listener functionality
* using {@link NormalizedNode} data format.
*
* This interface is type capture of generic interfaces and returns type captures
<packaging>bundle</packaging>
<dependencies>
- <dependency>
- <groupId>com.github.romix</groupId>
- <artifactId>java-concurrent-hash-trie-map</artifactId>
- </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-inmemory-datastore</artifactId>
- <version>1.2.0-SNAPSHOT</version>
-
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
org.opendaylight.yangtools.yang.util,
org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.dom.impl.rev131028.*</Private-Package>
<Import-Package>*</Import-Package>
- <Embed-Dependency>java-concurrent-hash-trie-map;inline=true</Embed-Dependency>
</instructions>
</configuration>
</plugin>
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.AbstractFuture;
+import com.google.common.util.concurrent.AbstractListeningExecutorService;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.AbstractListeningExecutorService;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
/**
* Implementation of DOMDataCommitExecutor that coordinates transaction commits concurrently. The 3
public void onSuccess(Boolean result) {
if (result == null || !result) {
handleException(clientSubmitFuture, transaction, cohorts, cohortSize,
- CAN_COMMIT, new TransactionCommitFailedException(
+ CAN_COMMIT, TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER,
+ new TransactionCommitFailedException(
"Can Commit failed, no detailed cause available."));
} else {
if(remaining.decrementAndGet() == 0) {
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT, t);
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT,
+ TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER, t);
}
};
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT, t);
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize, PRE_COMMIT,
+ TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER, t);
}
};
@Override
public void onFailure(Throwable t) {
- handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT, t);
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize, COMMIT,
+ TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER, t);
}
};
private void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, int cohortSize,
- final String phase, final Throwable t) {
+ final String phase, final TransactionCommitFailedExceptionMapper exMapper,
+ final Throwable t) {
if(clientSubmitFuture.isDone()) {
// We must have had failures from multiple cohorts.
e = new RuntimeException("Unexpected error occurred", t);
}
- final TransactionCommitFailedException clientException =
- TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
+ final TransactionCommitFailedException clientException = exMapper.apply(e);
// Transaction failed - tell all cohorts to abort.
final class TransactionCommitFailedExceptionMapper
extends ExceptionMapper<TransactionCommitFailedException> {
- static final TransactionCommitFailedExceptionMapper PRE_COMMIT_MAPPER = create("canCommit");
+ static final TransactionCommitFailedExceptionMapper PRE_COMMIT_MAPPER = create("preCommit");
- static final TransactionCommitFailedExceptionMapper CAN_COMMIT_ERROR_MAPPER = create("preCommit");
+ static final TransactionCommitFailedExceptionMapper CAN_COMMIT_ERROR_MAPPER = create("canCommit");
static final TransactionCommitFailedExceptionMapper COMMIT_ERROR_MAPPER = create("commit");
package org.opendaylight.controller.sal.dom.broker;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-
+import com.google.common.base.Optional;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-
public class BackwardsCompatibleMountPointManagerTest {
private static final Logger log = LoggerFactory.getLogger(BackwardsCompatibleMountPointManagerTest.class);
private DOMMountPoint mockMountPoint() {
final DOMMountPoint mock = mock(DOMMountPoint.class);
- doAnswer(new Answer() {
+ doAnswer(new Answer<Object>() {
@Override
public Object answer(final InvocationOnMock invocation) throws Throwable {
return Optional.of(mock(((Class<?>) invocation.getArguments()[0])));
package org.opendaylight.controller.sal.dom.broker;
-import static junit.framework.Assert.fail;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.AbstractMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@Deprecated
public class BackwardsCompatibleMountPointTest {
private static final Logger log = LoggerFactory.getLogger(BackwardsCompatibleMountPointManagerTest.class);
private DataNormalizer mockNormalizer() throws DataNormalizationException {
final DataNormalizer mock = mock(DataNormalizer.class);
- doReturn(new AbstractMap.SimpleEntry<YangInstanceIdentifier, NormalizedNode<?, ?>>(id, normalizedNode) {})
+ doReturn(new AbstractMap.SimpleEntry<YangInstanceIdentifier, NormalizedNode<?, ?>>(id, normalizedNode))
.when(mock).toNormalized(any(YangInstanceIdentifier.class), any(CompositeNode.class));
doReturn(compositeNode).when(mock).toLegacy(any(YangInstanceIdentifier.class), any(NormalizedNode.class));
doReturn(id).when(mock).toLegacy(any(YangInstanceIdentifier.class));
}
private NormalizedNode<?, ?> mockNormalizedNode() {
- final NormalizedNode mock = mock(NormalizedNode.class);
+ final NormalizedNode<?, ?> mock = mock(NormalizedNode.class);
doReturn("mockNormalizedNode").when(mock).toString();
return mock;
}
*/
package org.opendaylight.controller.md.sal.dom.broker.spi.rpc;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
-import com.google.common.base.Optional;
-
public abstract class RpcRoutingStrategy implements Identifiable<QName> {
+ private static final QName CONTEXT_REFERENCE = QName.cachedReference(QName.create("urn:opendaylight:yang:extension:yang-ext",
+ "2013-07-09", "context-reference"));
private final QName identifier;
- private static final QName CONTEXT_REFERENCE = QName.create("urn:opendaylight:yang:extension:yang-ext",
- "2013-07-09", "context-reference");
private RpcRoutingStrategy(final QName identifier) {
- super();
- this.identifier = identifier;
+ this.identifier = Preconditions.checkNotNull(identifier);
}
/**
public abstract QName getContext();
@Override
- public QName getIdentifier() {
+ public final QName getIdentifier() {
return identifier;
}
for (DataSchemaNode schemaNode : input.getChildNodes()) {
Optional<QName> context = getRoutingContext(schemaNode);
if (context.isPresent()) {
- return createRoutedStrategy(rpc, context.get(), schemaNode.getQName());
+ return new RoutedRpcStrategy(rpc.getQName(), context.get(), schemaNode.getQName());
}
}
}
- return createGlobalStrategy(rpc);
+ return new GlobalRpcStrategy(rpc.getQName());
}
- public static Optional<QName> getRoutingContext(final DataSchemaNode schemaNode) {
+ public static Optional<QName> getRoutingContext(final DataSchemaNode schemaNode) {
for (UnknownSchemaNode extension : schemaNode.getUnknownSchemaNodes()) {
if (CONTEXT_REFERENCE.equals(extension.getNodeType())) {
return Optional.fromNullable(extension.getQName());
return Optional.absent();
}
- private static RpcRoutingStrategy createRoutedStrategy(final RpcDefinition rpc, final QName context, final QName leafNode) {
- return new RoutedRpcStrategy(rpc.getQName(), context, leafNode);
- }
-
-
-
- private static RpcRoutingStrategy createGlobalStrategy(final RpcDefinition rpc) {
- GlobalRpcStrategy ret = new GlobalRpcStrategy(rpc.getQName());
- return ret;
- }
-
- private static class RoutedRpcStrategy extends RpcRoutingStrategy {
-
- final QName context;
+ private static final class RoutedRpcStrategy extends RpcRoutingStrategy {
+ private final QName context;
private final QName leaf;
private RoutedRpcStrategy(final QName identifier, final QName ctx, final QName leaf) {
super(identifier);
- this.context = ctx;
- this.leaf = leaf;
+ this.context = Preconditions.checkNotNull(ctx);
+ this.leaf = Preconditions.checkNotNull(leaf);
}
@Override
}
}
- private static class GlobalRpcStrategy extends RpcRoutingStrategy {
+ private static final class GlobalRpcStrategy extends RpcRoutingStrategy {
public GlobalRpcStrategy(final QName identifier) {
super(identifier);
@Override
public QName getContext() {
- throw new UnsupportedOperationException("Not routed strategy does not have context.");
+ throw new UnsupportedOperationException("Non-routed strategy does not have a context");
}
@Override
public QName getLeaf() {
- throw new UnsupportedOperationException("Not routed strategy does not have context.");
+ throw new UnsupportedOperationException("Non-routed strategy does not have a context");
}
}
}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Utility {@link DOMDataBroker} implementation which forwards all interface
+ * method invocation to a delegate instance.
+ */
+public abstract class ForwardingDOMDataBroker extends ForwardingObject implements DOMDataBroker {
+ @Override
+ protected abstract @Nonnull DOMDataBroker delegate();
+
+ @Override
+ public ListenerRegistration<DOMDataChangeListener> registerDataChangeListener(final LogicalDatastoreType store,
+ final YangInstanceIdentifier path, final DOMDataChangeListener listener,
+ final DataChangeScope triggeringScope) {
+ return delegate().registerDataChangeListener(store, path, listener, triggeringScope);
+ }
+
+ @Override
+ public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ return delegate().newReadOnlyTransaction();
+ }
+
+ @Override
+ public DOMDataReadWriteTransaction newReadWriteTransaction() {
+ return delegate().newReadWriteTransaction();
+ }
+
+ @Override
+ public DOMDataWriteTransaction newWriteOnlyTransaction() {
+ return delegate().newWriteOnlyTransaction();
+ }
+
+ @Override
+ public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
+ return delegate().createTransactionChain(listener);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.CheckedFuture;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Utility {@link DOMDataReadOnlyTransaction} implementation which forwards all interface
+ * method invocation to a delegate instance.
+ */
+public abstract class ForwardingDOMDataReadOnlyTransaction extends ForwardingObject implements DOMDataReadOnlyTransaction {
+ @Override
+ protected abstract @Nonnull DOMDataReadOnlyTransaction delegate();
+
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return delegate().read(store, path);
+ }
+
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return delegate().exists(store, path);
+ }
+
+ @Override
+ public Object getIdentifier() {
+ return delegate().getIdentifier();
+ }
+
+ @Override
+ public void close() {
+ delegate().close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListenableFuture;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Utility {@link DOMDataReadWriteTransaction} implementation which forwards all interface
+ * method invocation to a delegate instance.
+ */
+public abstract class ForwardingDOMDataReadWriteTransaction extends ForwardingObject implements DOMDataReadWriteTransaction {
+ @Override
+ protected abstract @Nonnull DOMDataReadWriteTransaction delegate();
+
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return delegate().read(store, path);
+ }
+
+ @Override
+ public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ return delegate().exists(store, path);
+ }
+
+ @Override
+ public Object getIdentifier() {
+ return delegate().getIdentifier();
+ }
+
+ @Override
+ public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ delegate().put(store, path, data);
+ }
+
+ @Override
+ public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ delegate().merge(store, path, data);
+ }
+
+ @Override
+ public boolean cancel() {
+ return delegate().cancel();
+ }
+
+ @Override
+ public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ delegate().delete(store, path);
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ return delegate().submit();
+ }
+
+ @Override
+ @Deprecated
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ return delegate().commit();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListenableFuture;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Utility {@link DOMDataWriteTransaction} implementation which forwards all interface
+ * method invocation to a delegate instance.
+ */
+public abstract class ForwardingDOMDataWriteTransaction extends ForwardingObject implements DOMDataWriteTransaction {
+ @Override
+ protected abstract @Nonnull DOMDataWriteTransaction delegate();
+
+ @Override
+ public Object getIdentifier() {
+ return delegate().getIdentifier();
+ }
+
+ @Override
+ public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ delegate().put(store, path, data);
+ }
+
+ @Override
+ public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+ delegate().merge(store, path, data);
+ }
+
+ @Override
+ public boolean cancel() {
+ return delegate().cancel();
+ }
+
+ @Override
+ public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+ delegate().delete(store, path);
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+ return delegate().submit();
+ }
+
+ @Override
+ @Deprecated
+ public ListenableFuture<RpcResult<TransactionStatus>> commit() {
+ return delegate().commit();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.spi;
+
+import com.google.common.collect.ForwardingObject;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+
+/**
+ * Utility {@link DOMTransactionChain} implementation which forwards all interface
+ * method invocation to a delegate instance.
+ */
+public abstract class ForwardingDOMTransactionChain extends ForwardingObject implements DOMTransactionChain {
+ @Override
+ protected abstract @Nonnull DOMTransactionChain delegate();
+
+ @Override
+ public void close() {
+ delegate().close();
+ }
+
+ @Override
+ public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+ return delegate().newReadOnlyTransaction();
+ }
+
+ @Override
+ public DOMDataReadWriteTransaction newReadWriteTransaction() {
+ return delegate().newReadWriteTransaction();
+ }
+
+ @Override
+ public DOMDataWriteTransaction newWriteOnlyTransaction() {
+ return delegate().newWriteOnlyTransaction();
+ }
+}
<version>1.2.0-SNAPSHOT</version>\r
</parent>\r
<artifactId>sal-dom-xsql-config</artifactId>\r
- <groupId>org.opendaylight.controller</groupId>\r
<description>Configuration files for md-sal</description>\r
<packaging>jar</packaging>\r
<properties>\r
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>yang-jmx-generator-plugin</artifactId>
- <version>${config.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
JDBCResultSet rs = new JDBCResultSet(sql);
try {
int count = 0;
- jdbcServer.execute(rs, this);
+ JDBCServer.execute(rs, this);
boolean isFirst = true;
int loc = rs.getFields().size() - 1;
int totalWidth = 0;
return cacheLoadedSuccessfuly;
}
- private static Map<Class, Set<Class>> superClassMap = new HashMap<Class, Set<Class>>();
+ private static Map<Class<?>, Set<Class<?>>> superClassMap = new HashMap<>();
- public static Set<Class> getInheritance(Class myObjectClass,
- Class returnType) {
+ public static Set<Class<?>> getInheritance(Class<?> myObjectClass,
+ Class<?> returnType) {
if (returnType != null && myObjectClass.equals(returnType)) {
- return new HashSet<Class>();
+ return new HashSet<>();
}
- Set<Class> result = superClassMap.get(myObjectClass);
+ Set<Class<?>> result = superClassMap.get(myObjectClass);
if (result != null) {
return result;
}
- result = new HashSet<Class>();
+ result = new HashSet<>();
superClassMap.put(myObjectClass, result);
if (returnType != null) {
if (!returnType.equals(myObjectClass)) {
- Class mySuperClass = myObjectClass.getSuperclass();
+ Class<?> mySuperClass = myObjectClass.getSuperclass();
while (mySuperClass != null) {
result.add(mySuperClass);
mySuperClass = mySuperClass.getSuperclass();
return result;
}
- public static Set<Class> collectInterfaces(Class cls) {
- Set<Class> result = new HashSet();
- Class myInterfaces[] = cls.getInterfaces();
+ public static Set<Class<?>> collectInterfaces(Class<?> cls) {
+ Set<Class<?>> result = new HashSet<>();
+ Class<?> myInterfaces[] = cls.getInterfaces();
if (myInterfaces != null) {
- for (Class in : myInterfaces) {
+ for (Class<?> in : myInterfaces) {
result.add(in);
result.addAll(collectInterfaces(in));
}
map.put(blNode.getBluePrintNodeName(), blNode);
}
- public Class getGenericType(ParameterizedType type) {
+ public Class<?> getGenericType(ParameterizedType type) {
Type[] typeArguments = type.getActualTypeArguments();
for (Type typeArgument : typeArguments) {
if (typeArgument instanceof ParameterizedType) {
ParameterizedType pType = (ParameterizedType) typeArgument;
- return (Class) pType.getRawType();
+ return (Class<?>) pType.getRawType();
} else if (typeArgument instanceof Class) {
- return (Class) typeArgument;
+ return (Class<?>) typeArgument;
}
}
return null;
}
- public Class getMethodReturnTypeFromGeneric(Method m) {
+ public Class<?> getMethodReturnTypeFromGeneric(Method m) {
Type rType = m.getGenericReturnType();
if (rType instanceof ParameterizedType) {
return getGenericType((ParameterizedType) rType);
this.children.add(ch);
}
- public boolean isModelChild(Class p) {
+ public boolean isModelChild(Class<?> p) {
if (this.relations.size() == 0) {
return false;
}
return "Unknown";
}
- public Class getInterface() {
+ public Class<?> getInterface() {
return this.myInterface;
}
}
}
- public List execute(Object o) {
- List result = new LinkedList();
+ public List<?> execute(Object o) {
+ List<Object> result = new LinkedList<>();
if (o == null) {
return null;
}
if (Set.class.isAssignableFrom(o.getClass())) {
- Set lst = (Set) o;
+ Set<?> lst = (Set<?>) o;
for (Object oo : lst) {
addToResult(result, execute(oo));
}
return result;
} else if (List.class.isAssignableFrom(o.getClass())) {
- List lst = (List) o;
+ List<?> lst = (List<?>) o;
for (Object oo : lst) {
addToResult(result, execute(oo));
}
return result;
}
- public static void addToResult(List result, Object o) {
+ private static void addToResult(List<Object> result, Object o) {
if (o == null) {
return;
}
if (Set.class.isAssignableFrom(o.getClass())) {
- Set lst = (Set) o;
+ Set<?> lst = (Set<?>) o;
for (Object oo : lst) {
result.add(oo);
}
} else if (List.class.isAssignableFrom(o.getClass())) {
- List lst = (List) o;
+ List<?> lst = (List<?>) o;
for (Object oo : lst) {
result.add(oo);
}
import java.io.Serializable;
-public class XSQLColumn implements Serializable, Comparable {
+public class XSQLColumn implements Serializable, Comparable<Object> {
+ private static final long serialVersionUID = 4854919735031714751L;
+
private String name = null;
private String tableName = null;
private int charWidth = -1;
- private Class type = null;
+ private Class<?> type = null;
private transient Object bluePrintNode = null;
private String origName = null;
private String origTableName = null;
types.put(Status.class, Status.class);
}
- public static boolean isColumnType(Class cls) {
+ public static boolean isColumnType(Class<?> cls) {
return types.containsKey(cls);
}
return "NULL";
}
- public static Class getTypeForODLColumn(Object odlNode){
+ public static Class<?> getTypeForODLColumn(Object odlNode){
Object type = get(odlNode,"type");
if(type instanceof Uint32 || type instanceof Uint64){
return long.class;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
public class JDBCCommand implements Serializable {
+ private static final long serialVersionUID = 1L;
+
public int type = 0;
public static final int TYPE_EXECUTE_QUERY = 1;
public static final int TYPE_QUERY_REPLY = 2;
return 1;
}
- public int isObjectFitCriteria(Object element, Class cls) {
+ public int isObjectFitCriteria(Object element, Class<?> cls) {
Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria.get(cls
.getName());
if (tblCriteria == null) {
}
}
- public void addRecord(ArrayList hierarchy) {
+ public void addRecord(ArrayList<?> hierarchy) {
Map rec = new HashMap();
for (int i = hierarchy.size() - 1; i >= 0; i--) {
Object element = hierarchy.get(i);
while (entry.getValue().next()) {
Map rec = entry.getValue().getCurrent();
Map newRec = new HashMap();
- for (Iterator iter = rec.entrySet().iterator(); iter.hasNext();) {
+ for (Iterator<?> iter = rec.entrySet().iterator(); iter.hasNext();) {
Map.Entry e = (Map.Entry) iter.next();
String key = (String) e.getKey();
Object value = e.getValue();
return true;
}
- public void addRecord(ArrayList hierarchy) {
+ public void addRecord(ArrayList<?> hierarchy) {
rs.addRecord(hierarchy);
}
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkCondition;
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkNotNull;
-
import com.google.common.base.Optional;
+import io.netty.util.concurrent.EventExecutor;
+import java.math.BigDecimal;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.concurrent.ExecutorService;
final NetconfClientDispatcher dispatcher = getClientDispatcherDependency();
listener.initializeRemoteConnection(dispatcher, clientConfig);
- return new AutoCloseable() {
- @Override
- public void close() throws Exception {
- listener.close();
- salFacade.close();
- }
- };
+ return new MyAutoCloseable(listener, salFacade);
}
private Optional<NetconfSessionCapabilities> getUserCapabilities() {
public NetconfReconnectingClientConfiguration getClientConfig(final NetconfDeviceCommunicator listener) {
final InetSocketAddress socketAddress = getSocketAddress();
- final ReconnectStrategy strategy = getReconnectStrategy();
final long clientConnectionTimeoutMillis = getConnectionTimeoutMillis();
+ final ReconnectStrategyFactory sf = new MyReconnectStrategyFactory(
+ getEventExecutorDependency(), getMaxConnectionAttempts(), getBetweenAttemptsTimeoutMillis(), getSleepFactor());
+ final ReconnectStrategy strategy = sf.createReconnectStrategy();
+
return NetconfReconnectingClientConfigurationBuilder.create()
.withAddress(socketAddress)
.withConnectionTimeoutMillis(clientConnectionTimeoutMillis)
.withProtocol(getTcpOnly() ?
NetconfClientConfiguration.NetconfClientProtocol.TCP :
NetconfClientConfiguration.NetconfClientProtocol.SSH)
- .withConnectStrategyFactory(new ReconnectStrategyFactory() {
- @Override
- public ReconnectStrategy createReconnectStrategy() {
- return getReconnectStrategy();
- }
- })
+ .withConnectStrategyFactory(sf)
.build();
}
- private ReconnectStrategy getReconnectStrategy() {
- final Long connectionAttempts;
- if (getMaxConnectionAttempts() != null && getMaxConnectionAttempts() > 0) {
- connectionAttempts = getMaxConnectionAttempts();
- } else {
- logger.trace("Setting {} on {} to infinity", maxConnectionAttemptsJmxAttribute, this);
- connectionAttempts = null;
+ private static final class MyAutoCloseable implements AutoCloseable {
+ private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
+ private final NetconfDeviceCommunicator listener;
+
+ public MyAutoCloseable(final NetconfDeviceCommunicator listener,
+ final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade) {
+ this.listener = listener;
+ this.salFacade = salFacade;
}
- final double sleepFactor = getSleepFactor().doubleValue();
- final int minSleep = getBetweenAttemptsTimeoutMillis();
- final Long maxSleep = null;
- final Long deadline = null;
- return new TimedReconnectStrategy(getEventExecutorDependency(), getBetweenAttemptsTimeoutMillis(),
- minSleep, sleepFactor, maxSleep, connectionAttempts, deadline);
+ @Override
+ public void close() {
+ listener.close();
+ salFacade.close();
+ }
+ }
+
+ private static final class MyReconnectStrategyFactory implements ReconnectStrategyFactory {
+ private final Long connectionAttempts;
+ private final EventExecutor executor;
+ private final double sleepFactor;
+ private final int minSleep;
+
+ MyReconnectStrategyFactory(final EventExecutor executor, final Long maxConnectionAttempts, final int minSleep, final BigDecimal sleepFactor) {
+ if (maxConnectionAttempts != null && maxConnectionAttempts > 0) {
+ connectionAttempts = maxConnectionAttempts;
+ } else {
+ logger.trace("Setting {} on {} to infinity", maxConnectionAttemptsJmxAttribute, this);
+ connectionAttempts = null;
+ }
+
+ this.sleepFactor = sleepFactor.doubleValue();
+ this.executor = executor;
+ this.minSleep = minSleep;
+ }
+
+ @Override
+ public ReconnectStrategy createReconnectStrategy() {
+ final Long maxSleep = null;
+ final Long deadline = null;
+
+ return new TimedReconnectStrategy(executor, minSleep,
+ minSleep, sleepFactor, maxSleep, connectionAttempts, deadline);
+ }
}
private InetSocketAddress getSocketAddress() {
logger.error("{}: Initialization in sal failed, disconnecting from device", id, t);
listener.close();
onRemoteSessionDown();
+ resetMessageTransformer();
+ }
+
+ /**
+ * Set the schema context inside transformer to null as is in initial state
+ */
+ private void resetMessageTransformer() {
+ updateMessageTransformer(null);
}
/**
for (final SchemaSourceRegistration<? extends SchemaSourceRepresentation> sourceRegistration : sourceRegistrations) {
sourceRegistration.close();
}
+ resetMessageTransformer();
}
@Override
package org.opendaylight.controller.sal.connect.netconf.schema.mapping;
import com.google.common.base.Optional;
-
import java.util.List;
import java.util.Set;
-
import javax.activation.UnsupportedDataTypeException;
-
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.sal.connect.api.MessageTransformer;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
if(schemaContext.isPresent()) {
if (NetconfMessageTransformUtil.isDataEditOperation(rpc)) {
final DataNodeContainer schemaForEdit = NetconfMessageTransformUtil.createSchemaForEdit(schemaContext.get());
- w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaForEdit, codecProvider);
+ w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaContext.get(), schemaForEdit, codecProvider);
} else if (NetconfMessageTransformUtil.isGetOperation(rpc)) {
final DataNodeContainer schemaForGet = NetconfMessageTransformUtil.createSchemaForGet(schemaContext.get());
- w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaForGet, codecProvider);
+ w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaContext.get(), schemaForGet, codecProvider);
} else if (NetconfMessageTransformUtil.isGetConfigOperation(rpc)) {
final DataNodeContainer schemaForGetConfig = NetconfMessageTransformUtil.createSchemaForGetConfig(schemaContext.get());
- w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaForGetConfig, codecProvider);
+ w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaContext.get(), schemaForGetConfig, codecProvider);
} else {
- final DataNodeContainer schemaForGetConfig = NetconfMessageTransformUtil.createSchemaForRpc(rpc, schemaContext.get());
- w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaForGetConfig, codecProvider);
+ final Optional<RpcDefinition> schemaForRpc = NetconfMessageTransformUtil.findSchemaForRpc(rpc, schemaContext.get());
+ if(schemaForRpc.isPresent()) {
+ final DataNodeContainer schemaForGetConfig = NetconfMessageTransformUtil.createSchemaForRpc(schemaForRpc.get());
+ w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, schemaContext.get(), schemaForGetConfig, codecProvider);
+ } else {
+ w3cPayload = toRpcRequestWithoutSchema(rpcPayload, codecProvider);
+ }
}
} else {
- w3cPayload = XmlDocumentUtils.toDocument(rpcPayload, codecProvider);
+ w3cPayload = toRpcRequestWithoutSchema(rpcPayload, codecProvider);
}
} catch (final UnsupportedDataTypeException e) {
throw new IllegalArgumentException("Unable to create message", e);
return new NetconfMessage(w3cPayload);
}
+ private Document toRpcRequestWithoutSchema(final CompositeNodeTOImpl rpcPayload, final XmlCodecProvider codecProvider) {
+ return XmlDocumentUtils.toDocument(rpcPayload, codecProvider);
+ }
+
@Override
public synchronized RpcResult<CompositeNode> toRpcResult(final NetconfMessage message, final QName rpc) {
if(schemaContext.isPresent()) {
@Override
public synchronized void onGlobalContextUpdated(final SchemaContext schemaContext) {
- this.schemaContext = Optional.of(schemaContext);
+ this.schemaContext = Optional.fromNullable(schemaContext);
}
}
*/
package org.opendaylight.controller.sal.connect.netconf.util;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Collections2;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+
public class NetconfMessageTransformUtil {
public static final String MESSAGE_ID_ATTR = "message-id";
static Node<?> toNode(final YangInstanceIdentifier.NodeIdentifierWithPredicates argument, final Node<?> node) {
final List<Node<?>> list = new ArrayList<>();
for (final Map.Entry<QName, Object> arg : argument.getKeyValues().entrySet()) {
- list.add(new SimpleNodeTOImpl(arg.getKey(), null, arg.getValue()));
+ list.add(new SimpleNodeTOImpl<>(arg.getKey(), null, arg.getValue()));
}
if (node != null) {
list.add(node);
return new NodeContainerProxy(NETCONF_RPC_QNAME, Sets.<DataSchemaNode>newHashSet(editConfigProxy));
}
+
+ public static Optional<RpcDefinition> findSchemaForRpc(final QName rpcName, final SchemaContext schemaContext) {
+ Preconditions.checkNotNull(rpcName);
+ Preconditions.checkNotNull(schemaContext);
+
+ for (final RpcDefinition rpcDefinition : schemaContext.getOperations()) {
+ if(rpcDefinition.getQName().equals(rpcName)) {
+ return Optional.of(rpcDefinition);
+ }
+ }
+
+ return Optional.absent();
+ }
+
/**
* Creates artificial schema node for schema defined rpc. This artificial schema looks like:
* <pre>
* This makes the translation of schema defined rpc request
* to xml use schema which is crucial for some types of nodes e.g. identity-ref.
*/
- public static DataNodeContainer createSchemaForRpc(final QName rpcName, final SchemaContext schemaContext) {
- Preconditions.checkNotNull(rpcName);
- Preconditions.checkNotNull(schemaContext);
-
- final NodeContainerProxy rpcBodyProxy = new NodeContainerProxy(rpcName, schemaContext.getChildNodes());
+ public static DataNodeContainer createSchemaForRpc(final RpcDefinition rpcDefinition) {
+ final NodeContainerProxy rpcBodyProxy = new NodeContainerProxy(rpcDefinition.getQName(), rpcDefinition.getInput().getChildNodes());
return new NodeContainerProxy(NETCONF_RPC_QNAME, Sets.<DataSchemaNode>newHashSet(rpcBodyProxy));
-
}
public static CompositeNodeTOImpl wrap(final QName name, final Node<?> node) {
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-
import com.google.common.base.Optional;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
// Make fallback attempt to fail due to empty resolved sources
final MissingSchemaSourceException schemaResolutionException = new MissingSchemaSourceException("fail first", TEST_SID);
- doAnswer(new Answer() {
+ doAnswer(new Answer<Object>() {
@Override
public Object answer(final InvocationOnMock invocation) throws Throwable {
if(((Collection<?>) invocation.getArguments()[0]).size() == 2) {
private SchemaSourceRegistry getSchemaRegistry() {
final SchemaSourceRegistry mock = mock(SchemaSourceRegistry.class);
- final SchemaSourceRegistration mockReg = mock(SchemaSourceRegistration.class);
+ final SchemaSourceRegistration<?> mockReg = mock(SchemaSourceRegistration.class);
doNothing().when(mockReg).close();
doReturn(mockReg).when(mock).registerSchemaSource(any(org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider.class), any(PotentialSchemaSource.class));
return mock;
device.onRemoteSessionUp(sessionCaps, listener);
verify(schemaContextProviderFactory, timeout(5000).times(2)).createSchemaContext(any(Collection.class));
- verify(messageTransformer, timeout(5000).times(2)).onGlobalContextUpdated(any(SchemaContext.class));
+ verify(messageTransformer, timeout(5000).times(3)).onGlobalContextUpdated(any(SchemaContext.class));
verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
-import static org.junit.matchers.JUnitMatchers.hasItem;
+import static org.hamcrest.CoreMatchers.hasItem;
import java.util.Set;
import org.junit.Test;
package org.opendaylight.controller.sal.connect.netconf;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
-
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import java.io.InputStream;
import java.util.Collections;
import java.util.List;
import java.util.Set;
-
import javax.xml.parsers.DocumentBuilderFactory;
import org.junit.Before;
import org.junit.Test;
package org.opendaylight.controller.sal.connect.netconf;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import java.io.InputStream;
import java.util.Collections;
import java.util.List;
import java.util.Set;
-
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.Module;
final org.w3c.dom.Node streamName = subscribeName.getFirstChild();
assertEquals(streamName.getLocalName(), "stream-name");
+
}
@Test
- public void testNoSchemaContextToRpcRequest() throws Exception {
- final String exampleNamespace = "http://example.net/me/my-own/1.0";
- final String exampleRevision = "2014-07-22";
- final QName myOwnMethodRpcQName = QName.create(exampleNamespace, exampleRevision, "my-own-method");
-
- final CompositeNodeBuilder<ImmutableCompositeNode> rootBuilder = ImmutableCompositeNode.builder();
- rootBuilder.setQName(myOwnMethodRpcQName);
-
- final CompositeNodeBuilder<ImmutableCompositeNode> inputBuilder = ImmutableCompositeNode.builder();
- inputBuilder.setQName(QName.create(exampleNamespace, exampleRevision, "input"));
- inputBuilder.addLeaf(QName.create(exampleNamespace, exampleRevision, "my-first-parameter"), "14");
- inputBuilder.addLeaf(QName.create(exampleNamespace, exampleRevision, "another-parameter"), "fred");
-
- rootBuilder.add(inputBuilder.toInstance());
- final ImmutableCompositeNode root = rootBuilder.toInstance();
-
- final NetconfMessage message = messageTransformer.toRpcRequest(myOwnMethodRpcQName, root);
- assertNotNull(message);
-
- final Document xmlDoc = message.getDocument();
- final org.w3c.dom.Node rpcChild = xmlDoc.getFirstChild();
- assertEquals(rpcChild.getLocalName(), "rpc");
-
- final org.w3c.dom.Node myOwnMethodNode = rpcChild.getFirstChild();
- assertEquals(myOwnMethodNode.getLocalName(), "my-own-method");
-
- final org.w3c.dom.Node firstParamNode = myOwnMethodNode.getFirstChild();
- assertEquals(firstParamNode.getLocalName(), "my-first-parameter");
-
- final org.w3c.dom.Node secParamNode = firstParamNode.getNextSibling();
- assertEquals(secParamNode.getLocalName(), "another-parameter");
+ public void testRpcResponse() throws Exception {
+ final NetconfMessage response = new NetconfMessage(XmlUtil.readXmlToDocument(
+ "<rpc-reply xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"m-5\">\n" +
+ "<data xmlns=\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\">" +
+ "module schema" +
+ "</data>\n" +
+ "</rpc-reply>\n"
+ ));
+ final RpcResult<CompositeNode> compositeNodeRpcResult = messageTransformer.toRpcResult(response, SUBSCRIBE_RPC_NAME);
+ final Node<?> dataNode = compositeNodeRpcResult.getResult().getValue().get(0);
+ assertEquals("module schema", dataNode.getValue());
}
+
}
package org.opendaylight.controller.sal.connect.netconf.listener;
+import static org.hamcrest.CoreMatchers.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import com.google.common.collect.Lists;
import java.util.List;
import org.junit.Test;
-import org.junit.matchers.JUnitMatchers;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.yangtools.yang.common.QName;
final NetconfSessionCapabilities merged = sessionCaps1.replaceModuleCaps(sessionCaps2);
assertCaps(merged, 2, 2 + 1 /*Preserved monitoring*/);
for (final QName qName : sessionCaps2.getModuleBasedCaps()) {
- assertThat(merged.getModuleBasedCaps(), JUnitMatchers.hasItem(qName));
+ assertThat(merged.getModuleBasedCaps(), hasItem(qName));
}
- assertThat(merged.getModuleBasedCaps(), JUnitMatchers.hasItem(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING));
+ assertThat(merged.getModuleBasedCaps(), hasItem(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING));
- assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:base:1.0"));
- assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
+ assertThat(merged.getNonModuleCaps(), hasItem("urn:ietf:params:netconf:base:1.0"));
+ assertThat(merged.getNonModuleCaps(), hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
}
@Test
package org.opendaylight.controller.sal.connect.netconf.sal.tx;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doReturn;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html.
+ */
+
+package org.opendaylight.controller.sal.connect.netconf.schema.mapping;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import org.hamcrest.CoreMatchers;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.netconf.NetconfToRpcRequestTest;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+public class NetconfMessageTransformerTest {
+
+ private static final QName COMMIT_Q_NAME = QName.create("namespace", "2012-12-12", "commit");
+
+ @Test
+ public void testToRpcRequestNoSchemaForRequest() throws Exception {
+ final NetconfMessageTransformer netconfMessageTransformer = getTransformer();
+ final NetconfMessage netconfMessage = netconfMessageTransformer.toRpcRequest(COMMIT_Q_NAME,
+ NodeFactory.createImmutableCompositeNode(COMMIT_Q_NAME, null, Collections.<Node<?>>emptyList()));
+ assertThat(XmlUtil.toString(netconfMessage.getDocument()), CoreMatchers.containsString("<commit"));
+ }
+
+ private NetconfMessageTransformer getTransformer() {
+ final NetconfMessageTransformer netconfMessageTransformer = new NetconfMessageTransformer();
+ netconfMessageTransformer.onGlobalContextUpdated(getSchema());
+ return netconfMessageTransformer;
+ }
+
+ @Test
+ public void testToRpcResultNoSchemaForResult() throws Exception {
+ final NetconfMessageTransformer netconfMessageTransformer = getTransformer();
+ final NetconfMessage response = new NetconfMessage(XmlUtil.readXmlToDocument(
+ "<rpc-reply><ok/></rpc-reply>"
+ ));
+ final RpcResult<CompositeNode> compositeNodeRpcResult = netconfMessageTransformer.toRpcResult(response, COMMIT_Q_NAME);
+ assertTrue(compositeNodeRpcResult.isSuccessful());
+ assertEquals("ok", compositeNodeRpcResult.getResult().getValue().get(0).getKey().getLocalName());
+ }
+
+ public SchemaContext getSchema() {
+ final List<InputStream> modelsToParse = Collections
+ .singletonList(NetconfToRpcRequestTest.class.getResourceAsStream("/schemas/rpc-notification-subscription.yang"));
+ final YangParserImpl parser = new YangParserImpl();
+ final Set<Module> configModules = parser.parseYangModelsFromStreams(modelsToParse);
+ final SchemaContext cfgCtx = parser.resolveSchemaContext(configModules);
+ assertNotNull(cfgCtx);
+ return cfgCtx;
+ }
+}
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
<dependency>
final Broker.ProviderSession brokerSession,
final RpcProvisionRegistry rpcProvisionRegistry) {
return Props.create(new Creator<RpcManager>() {
+ private static final long serialVersionUID = 1L;
@Override
public RpcManager create() throws Exception {
return new RpcManager(schemaContext, brokerSession, rpcProvisionRegistry);
import java.io.Serializable;
public class ExecuteRpc implements Serializable {
+ private static final long serialVersionUID = 1L;
private final String inputCompositeNode;
private final QName rpc;
import java.io.Serializable;
public class InvokeRpc implements Serializable {
+ private static final long serialVersionUID = 1L;
private final QName rpc;
private final YangInstanceIdentifier identifier;
*/
package org.opendaylight.controller.remote.rpc.messages;
-
-
import java.io.Serializable;
public class RpcResponse implements Serializable {
+ private static final long serialVersionUID = 1L;
private final String resultCompositeNode;
public RpcResponse(final String resultCompositeNode) {
import java.util.Map;
public class RoutingTable implements Copier<RoutingTable>, Serializable {
+ private static final long serialVersionUID = 1L;
private Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
private ActorRef router;
import java.io.Serializable;
public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
+ private static final long serialVersionUID = 1L;
private Long version = System.currentTimeMillis();
public static class BucketStoreMessages{
- public static class GetLocalBucket implements Serializable{}
+ public static class GetLocalBucket implements Serializable {
+ private static final long serialVersionUID = 1L;
+ }
public static class ContainsBucket implements Serializable {
+ private static final long serialVersionUID = 1L;
final private Bucket bucket;
public ContainsBucket(Bucket bucket){
}
public static class UpdateBucket extends ContainsBucket implements Serializable {
+ private static final long serialVersionUID = 1L;
public UpdateBucket(Bucket bucket){
super(bucket);
}
}
public static class GetLocalBucketReply extends ContainsBucket implements Serializable {
+ private static final long serialVersionUID = 1L;
public GetLocalBucketReply(Bucket bucket){
super(bucket);
}
}
- public static class GetAllBuckets implements Serializable{}
+ public static class GetAllBuckets implements Serializable {
+ private static final long serialVersionUID = 1L;
+ }
public static class GetBucketsByMembers implements Serializable{
+ private static final long serialVersionUID = 1L;
private Set<Address> members;
public GetBucketsByMembers(Set<Address> members){
}
public static class ContainsBuckets implements Serializable{
+ private static final long serialVersionUID = 1L;
private Map<Address, Bucket> buckets;
public ContainsBuckets(Map<Address, Bucket> buckets){
}
public static class GetAllBucketsReply extends ContainsBuckets implements Serializable{
+ private static final long serialVersionUID = 1L;
public GetAllBucketsReply(Map<Address, Bucket> buckets) {
super(buckets);
}
}
public static class GetBucketsByMembersReply extends ContainsBuckets implements Serializable{
+ private static final long serialVersionUID = 1L;
public GetBucketsByMembersReply(Map<Address, Bucket> buckets) {
super(buckets);
}
}
- public static class GetBucketVersions implements Serializable{}
+ public static class GetBucketVersions implements Serializable {
+ private static final long serialVersionUID = 1L;
+ }
public static class ContainsBucketVersions implements Serializable{
+ private static final long serialVersionUID = 1L;
Map<Address, Long> versions;
public ContainsBucketVersions(Map<Address, Long> versions) {
}
public static class GetBucketVersionsReply extends ContainsBucketVersions implements Serializable{
+ private static final long serialVersionUID = 1L;
public GetBucketVersionsReply(Map<Address, Long> versions) {
super(versions);
}
}
public static class UpdateRemoteBuckets extends ContainsBuckets implements Serializable{
+ private static final long serialVersionUID = 1L;
public UpdateRemoteBuckets(Map<Address, Bucket> buckets) {
super(buckets);
}
}
public static class GossiperMessages{
- public static class Tick implements Serializable {}
+ public static class Tick implements Serializable {
+ private static final long serialVersionUID = 1L;
+ }
- public static final class GossipTick extends Tick {}
+ public static final class GossipTick extends Tick {
+ private static final long serialVersionUID = 1L;
+ }
public static final class GossipStatus extends ContainsBucketVersions implements Serializable{
+ private static final long serialVersionUID = 1L;
private Address from;
public GossipStatus(Address from, Map<Address, Long> versions) {
}
public static final class GossipEnvelope extends ContainsBuckets implements Serializable {
+ private static final long serialVersionUID = 1L;
private final Address from;
private final Address to;
public class ConditionalProbe {
private final ActorRef actorRef;
- private final Predicate predicate;
+ private final Predicate<Object> predicate;
Logger log = LoggerFactory.getLogger(ConditionalProbe.class);
- public ConditionalProbe(ActorRef actorRef, Predicate predicate) {
+ public ConditionalProbe(ActorRef actorRef, Predicate<Object> predicate) {
this.actorRef = actorRef;
this.predicate = predicate;
}
Messages.BucketStoreMessages.UpdateRemoteBuckets.class);
}
- private JavaTestKit createProbeForMessage(ActorSystem node, ActorPath subjectPath, final Class clazz) {
+ private JavaTestKit createProbeForMessage(ActorSystem node, ActorPath subjectPath, final Class<?> clazz) {
final JavaTestKit probe = new JavaTestKit(node);
ConditionalProbe conditionalProbe =
- new ConditionalProbe(probe.getRef(), new Predicate() {
+ new ConditionalProbe(probe.getRef(), new Predicate<Object>() {
@Override
public boolean apply(@Nullable Object input) {
if (input != null)
@Test
public void testReceiveGossipTick_WhenNoRemoteMemberShouldIgnore(){
- mockGossiper.setClusterMembers(Collections.EMPTY_LIST);
+ mockGossiper.setClusterMembers(Collections.<Address>emptyList());
doNothing().when(mockGossiper).getLocalStatusAndSendTo(any(Address.class));
mockGossiper.receiveGossipTick();
verify(mockGossiper, times(0)).getLocalStatusAndSendTo(any(Address.class));
package org.opendaylight.controller.remote.rpc.utils;
-
+import static org.junit.Assert.assertTrue;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.japi.Pair;
import akka.testkit.JavaTestKit;
import akka.testkit.TestProbe;
import com.typesafe.config.ConfigFactory;
-import junit.framework.Assert;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
-
public class LatestEntryRoutingLogicTest {
static ActorSystem system;
pairList.add(new Pair<ActorRef, Long>(actor2, 3000L));
pairList.add(new Pair<ActorRef, Long>(actor3, 2000L));
RoutingLogic logic = new LatestEntryRoutingLogic(pairList);
- Assert.assertTrue(logic.select().equals(actor2));
+ assertTrue(logic.select().equals(actor2));
}
}
*/
package org.opendaylight.controller.sal.restconf.impl;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import javax.ws.rs.core.Response.Status;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.ws.rs.core.Response.Status;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
public class BrokerFacade {
private final static Logger LOG = LoggerFactory.getLogger(BrokerFacade.class);
try {
- CheckedFuture<Boolean, ReadFailedException> future =
- rwTx.exists(store, currentPath);
+ CheckedFuture<Boolean, ReadFailedException> future = rwTx.exists(store, currentPath);
exists = future.checkedGet();
} catch (ReadFailedException e) {
LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
throw new IllegalStateException("Failed to read pre-existing data", e);
}
-
if (!exists && iterator.hasNext()) {
rwTx.merge(store, currentPath, currentOp.createDefault(currentArg));
}
"Value is not instance of IdentityrefTypeDefinition but is {}. Therefore NULL is used as translation of - {}",
input == null ? "null" : input.getClass(), String.valueOf(input));
return null;
- } else if (type instanceof LeafrefTypeDefinition) {
- if (input instanceof IdentityValuesDTO) {
- return LEAFREF_DEFAULT_CODEC.deserialize(((IdentityValuesDTO) input).getOriginValue());
- }
- return LEAFREF_DEFAULT_CODEC.deserialize(input);
} else if (type instanceof InstanceIdentifierTypeDefinition) {
if (input instanceof IdentityValuesDTO) {
return instanceIdentifier.deserialize(input);
IdentityValue valueWithNamespace = data.getValuesWithNamespaces().get(0);
Module module = getModuleByNamespace(valueWithNamespace.getNamespace(), mountPoint);
if (module == null) {
- logger.info("Module by namespace '{}' of first node in instance-identiefier was not found.",
+ logger.info("Module by namespace '{}' of first node in instance-identifier was not found.",
valueWithNamespace.getNamespace());
logger.info("Instance-identifier will be translated as NULL for data - {}",
String.valueOf(valueWithNamespace.getValue()));
package org.opendaylight.controller.sal.restconf.impl;
import com.google.common.base.Objects;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
+import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModifiedNodeDoesNotExistException;
import org.opendaylight.yangtools.yang.data.composite.node.schema.cnsn.parser.CnSnToNormalizedNodeParserFactory;
import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
import org.opendaylight.yangtools.yang.model.api.SchemaPath;
import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
import org.opendaylight.yangtools.yang.model.api.type.IdentityrefTypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.LeafrefTypeDefinition;
import org.opendaylight.yangtools.yang.model.util.EmptyType;
+import org.opendaylight.yangtools.yang.model.util.SchemaContextUtil;
import org.opendaylight.yangtools.yang.parser.builder.impl.ContainerSchemaNodeBuilder;
import org.opendaylight.yangtools.yang.parser.builder.impl.LeafSchemaNodeBuilder;
import org.slf4j.Logger;
broker.commitConfigurationDataDelete(normalizedII).get();
}
} catch (Exception e) {
- throw new RestconfDocumentedException("Error creating data", e);
+ final Optional<Throwable> searchedException = Iterables.tryFind(Throwables.getCausalChain(e),
+ Predicates.instanceOf(ModifiedNodeDoesNotExistException.class));
+ if (searchedException.isPresent()) {
+ throw new RestconfDocumentedException("Data specified for deleting doesn't exist.", ErrorType.APPLICATION, ErrorTag.DATA_MISSING);
+ }
+ throw new RestconfDocumentedException("Error while deleting data", e);
}
-
return Response.status(Status.OK).build();
}
try {
this.normalizeNode(nodeWrap, schema, null, mountPoint);
} catch (IllegalArgumentException e) {
- throw new RestconfDocumentedException(e.getMessage(), ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
+ RestconfDocumentedException restconfDocumentedException = new RestconfDocumentedException(e.getMessage(), ErrorType.PROTOCOL, ErrorTag.INVALID_VALUE);
+ restconfDocumentedException.addSuppressed(e);
+ throw restconfDocumentedException;
}
if (nodeWrap instanceof CompositeNodeWrapper) {
return ((CompositeNodeWrapper) nodeWrap).unwrap();
final Object value = simpleNode.getValue();
Object inputValue = value;
TypeDefinition<? extends Object> typeDefinition = this.typeDefinition(schema);
- if ((typeDefinition instanceof IdentityrefTypeDefinition)) {
- if ((value instanceof String)) {
- inputValue = new IdentityValuesDTO(simpleNode.getNamespace().toString(), (String) value, null,
- (String) value);
- } // else value is already instance of IdentityValuesDTO
+
+ // For leafrefs, extract the type it is pointing to
+ if(typeDefinition instanceof LeafrefTypeDefinition) {
+ typeDefinition = SchemaContextUtil.getBaseTypeForLeafRef(((LeafrefTypeDefinition) typeDefinition), mountPoint == null ? this.controllerContext.getGlobalSchema() : mountPoint.getSchemaContext(), schema);
+ }
+
+ if (typeDefinition instanceof IdentityrefTypeDefinition) {
+ inputValue = parseToIdentityValuesDTO(simpleNode, value, inputValue);
}
Object outputValue = inputValue;
simpleNode.setValue(outputValue);
}
+ private Object parseToIdentityValuesDTO(final SimpleNodeWrapper simpleNode, final Object value, Object inputValue) {
+ if ((value instanceof String)) {
+ inputValue = new IdentityValuesDTO(simpleNode.getNamespace().toString(), (String) value, null,
+ (String) value);
+ } // else value is already instance of IdentityValuesDTO
+ return inputValue;
+ }
+
private void normalizeCompositeNode(final CompositeNodeWrapper compositeNodeBuilder,
final DataNodeContainer schema, final DOMMountPoint mountPoint, final QName currentAugment) {
final List<NodeWrapper<?>> children = compositeNodeBuilder.getValues();
@Test
public void leafrefToNotLeafTest() {
String json = toJson("/cnsn-to-json/leafref/xml/data_ref_to_not_leaf.xml");
- validateJson(".*\"cont-augment-module\\p{Blank}*:\\p{Blank}*lf6\":\\p{Blank}*\"44.33\".*", json);
+ validateJson(".*\"cont-augment-module\\p{Blank}*:\\p{Blank}*lf6\":\\p{Blank}*\"44\".*", json);
}
/**
}
assertNotNull(lf2);
- assertTrue(lf2.getValue() instanceof String);
- assertEquals("121", lf2.getValue());
-
+ assertEquals(121, lf2.getValue());
}
}
package org.opendaylight.controller.sal.restconf.impl.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
+import java.util.concurrent.Future;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import java.util.concurrent.Future;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-
/**
* Unit tests for BrokerFacade.
*
when(wTransaction.submit()).thenReturn(expFuture);
+ NormalizedNode<?, ?> dummyNode2 = createDummyNode("dummy:namespace2", "2014-07-01", "dummy local name2");
+
+
CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade
.commitConfigurationDataDelete(instanceID);
import java.io.IOException;
import java.net.URISyntaxException;
+
import javax.ws.rs.WebApplicationException;
+
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.sal.rest.impl.JsonToCompositeNodeProvider;
import org.opendaylight.controller.sal.rest.impl.XmlToCompositeNodeProvider;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class XmlAndJsonToCnSnLeafRefTest extends YangAndXmlAndDataSchemaLoader {
+ final QName refContQName = QName.create("referenced:module", "2014-04-17", "cont");
+ final QName refLf1QName = QName.create(refContQName, "lf1");
+ final QName contQName = QName.create("leafref:module", "2014-04-17", "cont");
+ final QName lf1QName = QName.create(contQName, "lf1");
+ final QName lf2QName = QName.create(contQName, "lf2");
+ final QName lf3QName = QName.create(contQName, "lf3");
+
@BeforeClass
public static void initialize() {
dataLoad("/leafref/yang", 2, "leafref-module", "cont");
CompositeNode cnSn = (CompositeNode)node;
TestUtils.normalizeCompositeNode(cnSn, modules, schemaNodePath);
- verifyContPredicate(cnSn, "/ns:cont/ns:lf1", "/cont/lf1", "/ns:cont/ns:lf1", "../lf1");
+
+ verifyContPredicate(cnSn, "lf4", YangInstanceIdentifier.builder().node(refContQName).node(refLf1QName).build());
+ verifyContPredicate(cnSn, "lf2", YangInstanceIdentifier.builder().node(contQName).node(lf1QName).build());
+ verifyContPredicate(cnSn, "lf3", YangInstanceIdentifier.builder().node(contQName).node(lf2QName).build());
+ verifyContPredicate(cnSn, "lf5", YangInstanceIdentifier.builder().node(contQName).node(lf3QName).build());
}
@Test
CompositeNode cnSn = (CompositeNode)node;
TestUtils.normalizeCompositeNode(cnSn, modules, schemaNodePath);
- verifyContPredicate(cnSn, "/leafref-module:cont/leafref-module:lf1", "/leafref-module:cont/leafref-module:lf1",
- "/referenced-module:cont/referenced-module:lf1", "/leafref-module:cont/leafref-module:lf1");
+
+ verifyContPredicate(cnSn, "lf4", YangInstanceIdentifier.builder().node(refContQName).node(refLf1QName).build());
+ verifyContPredicate(cnSn, "lf2", YangInstanceIdentifier.builder().node(contQName).node(lf1QName).build());
+ verifyContPredicate(cnSn, "lf3", YangInstanceIdentifier.builder().node(contQName).node(lf2QName).build());
+ verifyContPredicate(cnSn, "lf5", YangInstanceIdentifier.builder().node(contQName).node(lf3QName).build());
}
- private void verifyContPredicate(CompositeNode cnSn, String... values) throws URISyntaxException {
- Object lf2Value = null;
- Object lf3Value = null;
- Object lf4Value = null;
- Object lf5Value = null;
-
- for (Node<?> node : cnSn.getValue()) {
- if (node.getNodeType().getLocalName().equals("lf2")) {
- lf2Value = ((SimpleNode<?>) node).getValue();
- } else if (node.getNodeType().getLocalName().equals("lf3")) {
- lf3Value = ((SimpleNode<?>) node).getValue();
- } else if (node.getNodeType().getLocalName().equals("lf4")) {
- lf4Value = ((SimpleNode<?>) node).getValue();
- } else if (node.getNodeType().getLocalName().equals("lf5")) {
- lf5Value = ((SimpleNode<?>) node).getValue();
+ private void verifyContPredicate(CompositeNode cnSn, String leafName, Object value) throws URISyntaxException {
+ Object parsed = null;
+
+ for (final Node<?> node : cnSn.getValue()) {
+ if (node.getNodeType().getLocalName().equals(leafName)) {
+ parsed = node.getValue();
}
}
- assertEquals(values[0], lf2Value);
- assertEquals(values[1], lf3Value);
- assertEquals(values[2], lf4Value);
- assertEquals(values[3], lf5Value);
+
+ assertEquals(value, parsed);
}
}
}
assertNotNull(lf2);
- assertTrue(lf2.getValue() instanceof String);
- assertEquals("121", lf2.getValue());
+ assertEquals(121, lf2.getValue());
}
@Test
module cont-augment-module {
- namespace "cont:augment:module";
+ namespace "cont:augment:module";
prefix "cntaugmod";
-
+
import main-module {prefix mamo; revision-date 2013-12-2;}
-
+
revision 2013-12-2 {
-
+
}
-
+
augment "/mamo:cont" {
leaf-list lflst1 {
type leafref {
- path "../lf1";
+ path "../mamo:lf1";
}
- }
-
+ }
+
leaf lf4 {
type leafref {
- path "../lf1";
+ path "../mamo:lf1";
}
}
-
+
/* reference to not leaf element */
leaf lf6 {
type leafref {
path "../lflst1";
}
}
-
+
leaf lf7 {
type leafref {
path "../lf4";
}
}
}
-
-
-
+
+
+
}
\ No newline at end of file
<cont>
- <lf6>44.33</lf6>
+ <lf6>44</lf6>
</cont>
\ No newline at end of file
"leafref-module:cont" : {
"lf4" : "/referenced-module:cont/referenced-module:lf1",
"lf2" : "/leafref-module:cont/leafref-module:lf1",
- "lf3" : "/leafref-module:cont/leafref-module:lf1",
- "lf5" : "/leafref-module:cont/leafref-module:lf1"
+ "lf3" : "/leafref-module:cont/leafref-module:lf2",
+ "lf5" : "/leafref-module:cont/leafref-module:lf3"
}
}
\ No newline at end of file
-<cont xmlns="leafref:module">
- <lf4 xmlns:ns="referenced:module">/ns:cont/ns:lf1</lf4>
- <lf2 xmlns:ns="leafref:module">/ns:cont/ns:lf1</lf2>
- <lf3 xmlns:ns="leafref:module">/cont/lf1</lf3>
- <lf5 xmlns:ns="leafref:module">../lf1</lf5>
+<cont xmlnsa="leafref:module">
+ <lf4 xmlns:nsa="referenced:module">/nsa:cont/nsa:lf1</lf4>
+ <lf2 xmlns:nsa="leafref:module">/nsa:cont/nsa:lf1</lf2>
+ <lf3 xmlns:ns="leafref:module">/ns:cont/ns:lf2</lf3>
+ <lf5 xmlns:nsa="leafref:module">/nsa:cont/nsa:lf3</lf5>
</cont>
module leafref-module {
- namespace "leafref:module";
+ namespace "leafref:module";
prefix "lfrfmo";
- revision 2013-11-18 {
+ revision 2013-11-18 {
}
+ identity base {}
+
container cont {
leaf lf1 {
type int32;
}
leaf lf2 {
type leafref {
- path "/cont/lf1";
+ path "/cont/lf1";
+ }
+ }
+
+ leaf lf-ident {
+ type identityref {
+ base "lfrfmo:base";
}
}
+
+ leaf lf-ident-ref {
+ type leafref {
+ path "/cont/lf-ident";
+ }
+ }
+
+ leaf lf-ident-ref-relative {
+ type leafref {
+ path "../lf-ident";
+ }
+ }
+
+ leaf lf-ident-ref-relative-cnd {
+ type leafref {
+ path "/lfrfmo:cont/lfrfmo:lis[lfrfmo:id='abc']/lf-ident-ref";
+ }
+ }
+
+
+ list lis {
+ key "id";
+
+ leaf id {
+ type string;
+ }
+
+ leaf lf-ident-ref {
+ type leafref {
+ path "/cont/lf-ident";
+ }
+ }
+ }
+
}
-
+
}
\ No newline at end of file
<cont>
<lf1>121</lf1>
<lf2>121</lf2>
+ <lf-ident xmlns:a="leafref:module">a:base</lf-ident>
+ <lf-ident-ref xmlns:a="leafref:module">a:base</lf-ident-ref>
+ <lf-ident-ref-relative xmlns:a="leafref:module">a:base</lf-ident-ref-relative>
+ <lf-ident-ref-relative-cnd xmlns:a="leafref:module">a:base</lf-ident-ref-relative-cnd>
</cont>
\ No newline at end of file
<artifactId>sal-rest-docgen</artifactId>
<packaging>bundle</packaging>
- <properties>
- <jaxrs-api.version>3.0.4.Final</jaxrs-api.version>
- </properties>
-
<dependencies>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<dependency>
<groupId>org.jboss.resteasy</groupId>
<artifactId>jaxrs-api</artifactId>
- <version>${jaxrs-api.version}</version>
</dependency>
<dependency>
@Override
public void start(BundleContext context) throws Exception {
bundleContext = context;
- brokerServiceTracker = new ServiceTracker(context, Broker.class, this);
+ brokerServiceTracker = new ServiceTracker<>(context, Broker.class, this);
brokerServiceTracker.open();
}
import java.util.Set;
import java.util.TreeSet;
import javax.ws.rs.core.UriInfo;
-import junit.framework.Assert;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.After;
if (m.getKey().getAbsolutePath().endsWith("toaster.yang")) {
ApiDeclaration doc = generator.getSwaggerDocSpec(m.getValue(), "http://localhost:8080/restconf", "",
schemaContext);
- Assert.assertNotNull(doc);
+ assertNotNull(doc);
// testing bugs.opendaylight.org bug 1290. UnionType model type.
String jsonString = doc.getModels().toString();
notifications.add(new QName(notificationType.toString()));
String notificationStreamName = RemoteStreamTools.createNotificationStream(salRemoteService, notifications);
final Map<String,EventStreamInfo> desiredEventStream = RemoteStreamTools.createEventStream(restconfClientContext, notificationStreamName);
- RemoteNotificationListener remoteNotificationListener = new RemoteNotificationListener(listener);
+ RemoteNotificationListener<T> remoteNotificationListener = new RemoteNotificationListener<T>(listener);
final ListenerRegistration<?> listenerRegistration = restconfClientContext.getEventStreamContext(desiredEventStream.get(desiredEventStream.get(notificationStreamName)))
.registerNotificationListener(remoteNotificationListener);
package org.opendaylight.controller.sal.restconf.broker.listeners;
import org.opendaylight.controller.sal.binding.api.NotificationListener;
+import org.opendaylight.yangtools.yang.binding.Notification;
-public class RemoteNotificationListener implements org.opendaylight.yangtools.yang.binding.NotificationListener {
+public class RemoteNotificationListener<T extends Notification> implements org.opendaylight.yangtools.yang.binding.NotificationListener {
- org.opendaylight.controller.sal.binding.api.NotificationListener listener;
+ NotificationListener<T> listener;
- public RemoteNotificationListener(NotificationListener listener){
+ public RemoteNotificationListener(NotificationListener<T> listener){
this.listener = listener;
}
- public NotificationListener getListener(){
+ public NotificationListener<T> getListener() {
return this.listener;
}
import org.opendaylight.yangtools.yang.binding.Notification;
-public class SalNotificationListener implements NotificationListener {
- private NotificationListener notificationListener;
+public class SalNotificationListener<T extends Notification> implements NotificationListener<T> {
+ private NotificationListener<T> notificationListener;
- public SalNotificationListener( NotificationListener notificationListener){
+ public SalNotificationListener( NotificationListener<T> notificationListener){
this.notificationListener = notificationListener;
}
@Override
public void onNotification(Notification notification) {
- this.notificationListener.onNotification(notification);
+ this.notificationListener.onNotification((T)notification);
}
}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../..</relativePath>
- </parent>
- <groupId>org.opendaylight.controller.samples.l2switch.md</groupId>
- <artifactId>l2switch-impl</artifactId>
- <packaging>bundle</packaging>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-service</artifactId>
- </dependency>
-
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-topology</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.samples.l2switch.md</groupId>
- <artifactId>l2switch-model</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>net.sf.jung2</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
-
- <instructions>
- <Bundle-Activator>org.opendaylight.controller.sample.l2switch.md.L2SwitchProvider</Bundle-Activator>
- </instructions>
- <manifestLocation>${project.build.directory}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md;
-
-import org.opendaylight.controller.sample.l2switch.md.addresstracker.AddressTracker;
-import org.opendaylight.controller.sample.l2switch.md.flow.FlowWriterService;
-import org.opendaylight.controller.sample.l2switch.md.flow.FlowWriterServiceImpl;
-import org.opendaylight.controller.sample.l2switch.md.inventory.InventoryService;
-import org.opendaylight.controller.sample.l2switch.md.packet.PacketHandler;
-import org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphDijkstra;
-import org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService;
-import org.opendaylight.controller.sample.l2switch.md.topology.TopologyLinkDataChangeHandler;
-import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareConsumer;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.binding.api.NotificationService;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketProcessingService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * L2SwitchProvider serves as the Activator for our L2Switch OSGI bundle.
- */
-public class L2SwitchProvider extends AbstractBindingAwareConsumer
- implements AutoCloseable {
-
- private final static Logger _logger = LoggerFactory.getLogger(L2SwitchProvider.class);
-
- private ListenerRegistration<NotificationListener> listenerRegistration;
- private AddressTracker addressTracker;
- private TopologyLinkDataChangeHandler topologyLinkDataChangeHandler;
-
-
- /**
- * Setup the L2Switch.
- * @param consumerContext The context of the L2Switch.
- */
- @Override
- public void onSessionInitialized(BindingAwareBroker.ConsumerContext consumerContext) {
- DataBrokerService dataService = consumerContext.<DataBrokerService>getSALService(DataBrokerService.class);
- addressTracker = new AddressTracker(dataService);
-
- NetworkGraphService networkGraphService = new NetworkGraphDijkstra();
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataService, networkGraphService);
-
- NotificationService notificationService =
- consumerContext.<NotificationService>getSALService(NotificationService.class);
- PacketProcessingService packetProcessingService =
- consumerContext.<PacketProcessingService>getRpcService(PacketProcessingService.class);
- PacketHandler packetHandler = new PacketHandler();
- packetHandler.setAddressTracker(addressTracker);
- packetHandler.setFlowWriterService(flowWriterService);
- packetHandler.setPacketProcessingService(packetProcessingService);
- packetHandler.setInventoryService(new InventoryService(dataService));
-
- this.listenerRegistration = notificationService.registerNotificationListener(packetHandler);
- this.topologyLinkDataChangeHandler = new TopologyLinkDataChangeHandler(dataService, networkGraphService);
- topologyLinkDataChangeHandler.registerAsDataChangeListener();
- }
-
- /**
- * Cleanup the L2Switch.
- * @throws Exception occurs when the NotificationListener is closed
- */
- @Override
- public void close() throws Exception {
- if (listenerRegistration != null)
- listenerRegistration.close();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.addresstracker;
-
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.address.tracker.rev140402.L2Addresses;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.address.tracker.rev140402.l2.addresses.L2Address;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.address.tracker.rev140402.l2.addresses.L2AddressBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.address.tracker.rev140402.l2.addresses.L2AddressKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.Future;
-
-/**
- * AddressTracker manages the MD-SAL data tree for L2Address (mac, node connector pairings) information.
- */
-public class AddressTracker {
-
- private final static Logger _logger = LoggerFactory.getLogger(AddressTracker.class);
- private DataBrokerService dataService;
-
- /**
- * Construct an AddressTracker with the specified inputs
- * @param dataService The DataBrokerService for the AddressTracker
- */
- public AddressTracker(DataBrokerService dataService) {
- this.dataService = dataService;
- }
-
- /**
- * Get all the L2 Addresses in the MD-SAL data tree
- * @return All the L2 Addresses in the MD-SAL data tree
- */
- public L2Addresses getAddresses() {
- return (L2Addresses)dataService.readOperationalData(InstanceIdentifier.<L2Addresses>builder(L2Addresses.class).toInstance());
- }
-
- /**
- * Get a specific L2 Address in the MD-SAL data tree
- * @param macAddress A MacAddress associated with an L2 Address object
- * @return The L2 Address corresponding to the specified macAddress
- */
- public L2Address getAddress(MacAddress macAddress) {
- return (L2Address) dataService.readOperationalData(createPath(macAddress));
- }
-
- /**
- * Add L2 Address into the MD-SAL data tree
- * @param macAddress The MacAddress of the new L2Address object
- * @param nodeConnectorRef The NodeConnectorRef of the new L2Address object
- * @return Future containing the result of the add operation
- */
- public Future<RpcResult<TransactionStatus>> addAddress(MacAddress macAddress, NodeConnectorRef nodeConnectorRef) {
- if(macAddress == null || nodeConnectorRef == null) {
- return null;
- }
-
- // Create L2Address
- final L2AddressBuilder builder = new L2AddressBuilder();
- builder.setKey(new L2AddressKey(macAddress))
- .setMac(macAddress)
- .setNodeConnectorRef(nodeConnectorRef);
-
- // Add L2Address to MD-SAL data tree
- final DataModificationTransaction it = dataService.beginTransaction();
- it.putOperationalData(createPath(macAddress), builder.build());
- return it.commit();
- }
-
- /**
- * Remove L2Address from the MD-SAL data tree
- * @param macAddress The MacAddress of an L2Address object
- * @return Future containing the result of the remove operation
- */
- public Future<RpcResult<TransactionStatus>> removeHost(MacAddress macAddress) {
- final DataModificationTransaction it = dataService.beginTransaction();
- it.removeOperationalData(createPath(macAddress));
- return it.commit();
- }
-
- /**
- * Create InstanceIdentifier path for an L2Address in the MD-SAL data tree
- * @param macAddress The MacAddress of an L2Address object
- * @return InstanceIdentifier of the L2Address corresponding to the specified macAddress
- */
- private InstanceIdentifier<L2Address> createPath(MacAddress macAddress) {
- return InstanceIdentifier.<L2Addresses>builder(L2Addresses.class)
- .<L2Address, L2AddressKey>child(L2Address.class, new L2AddressKey(macAddress)).toInstance();
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.flow;
-
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-
-/**
- * Service that adds packet forwarding flows to configuration data store.
- */
-public interface FlowWriterService {
-
- /**
- * Writes a flow that forwards packets to destPort if destination mac in packet is destMac and
- * source Mac in packet is sourceMac. If sourceMac is null then flow would not set any source mac,
- * resulting in all packets with destMac being forwarded to destPort.
- *
- * @param sourceMac
- * @param destMac
- * @param destNodeConnectorRef
- */
- public void addMacToMacFlow(MacAddress sourceMac, MacAddress destMac, NodeConnectorRef destNodeConnectorRef);
-
- /**
- * Writes mac-to-mac flow on all ports that are in the path between given source and destination ports.
- * It uses path provided by NetworkGraphService{@link org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService} to find a links{@link org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link}
- * between given ports. And then writes appropriate flow on each port that is covered in that path.
- *
- * @param sourceMac
- * @param sourceNodeConnectorRef
- * @param destMac
- * @param destNodeConnectorRef
- */
- public void addMacToMacFlowsUsingShortestPath(MacAddress sourceMac, NodeConnectorRef sourceNodeConnectorRef, MacAddress destMac, NodeConnectorRef destNodeConnectorRef);
-
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.flow;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-
-import org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService;
-import org.opendaylight.controller.sample.l2switch.md.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.OutputActionCaseBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.output.action._case.OutputActionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowModFlags;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.InstructionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActionsCaseBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.apply.actions._case.ApplyActions;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.apply.actions._case.ApplyActionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.ethernet.match.fields.EthernetDestinationBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.ethernet.match.fields.EthernetSourceBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.math.BigInteger;
-import java.util.List;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Implementation of FlowWriterService{@link org.opendaylight.controller.sample.l2switch.md.flow.FlowWriterService},
- * that builds required flow and writes to configuration data store using provided DataBrokerService
- * {@link org.opendaylight.controller.sal.binding.api.data.DataBrokerService}
- */
-public class FlowWriterServiceImpl implements FlowWriterService {
- private static final Logger _logger = LoggerFactory.getLogger(FlowWriterServiceImpl.class);
- private final DataBrokerService dataBrokerService;
- private final NetworkGraphService networkGraphService;
- private AtomicLong flowIdInc = new AtomicLong();
- private AtomicLong flowCookieInc = new AtomicLong(0x2a00000000000000L);
-
-
- public FlowWriterServiceImpl(DataBrokerService dataBrokerService, NetworkGraphService networkGraphService) {
- Preconditions.checkNotNull(dataBrokerService, "dataBrokerService should not be null.");
- Preconditions.checkNotNull(networkGraphService, "networkGraphService should not be null.");
- this.dataBrokerService = dataBrokerService;
- this.networkGraphService = networkGraphService;
- }
-
- /**
- * Writes a flow that forwards packets to destPort if destination mac in packet is destMac and
- * source Mac in packet is sourceMac. If sourceMac is null then flow would not set any source mac,
- * resulting in all packets with destMac being forwarded to destPort.
- *
- * @param sourceMac
- * @param destMac
- * @param destNodeConnectorRef
- */
- @Override
- public void addMacToMacFlow(MacAddress sourceMac, MacAddress destMac, NodeConnectorRef destNodeConnectorRef) {
-
- Preconditions.checkNotNull(destMac, "Destination mac address should not be null.");
- Preconditions.checkNotNull(destNodeConnectorRef, "Destination port should not be null.");
-
-
- // do not add flow if both macs are same.
- if(sourceMac != null && destMac.equals(sourceMac)) {
- _logger.info("In addMacToMacFlow: No flows added. Source and Destination mac are same.");
- return;
- }
-
- // get flow table key
- TableKey flowTableKey = new TableKey((short) 0); //TODO: Hard coded Table Id 0, need to get it from Configuration data.
-
- //build a flow path based on node connector to program flow
- InstanceIdentifier<Flow> flowPath = buildFlowPath(destNodeConnectorRef, flowTableKey);
-
- // build a flow that target given mac id
- Flow flowBody = createMacToMacFlow(flowTableKey.getId(), 0, sourceMac, destMac, destNodeConnectorRef);
-
- // commit the flow in config data
- writeFlowToConfigData(flowPath, flowBody);
- }
-
- /**
- * Writes mac-to-mac flow on all ports that are in the path between given source and destination ports.
- * It uses path provided by NetworkGraphService
- * {@link org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService} to find a links
- * {@link org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link}
- * between given ports. And then writes appropriate flow on each port that is covered in that path.
- *
- * @param sourceMac
- * @param sourceNodeConnectorRef
- * @param destMac
- * @param destNodeConnectorRef
- */
- @Override
- public void addMacToMacFlowsUsingShortestPath(MacAddress sourceMac,
- NodeConnectorRef sourceNodeConnectorRef,
- MacAddress destMac,
- NodeConnectorRef destNodeConnectorRef) {
- Preconditions.checkNotNull(sourceMac, "Source mac address should not be null.");
- Preconditions.checkNotNull(sourceNodeConnectorRef, "Source port should not be null.");
- Preconditions.checkNotNull(destMac, "Destination mac address should not be null.");
- Preconditions.checkNotNull(destNodeConnectorRef, "Destination port should not be null.");
-
- if(sourceNodeConnectorRef.equals(destNodeConnectorRef)) {
- _logger.info("In addMacToMacFlowsUsingShortestPath: No flows added. Source and Destination ports are same.");
- return;
-
- }
- NodeId sourceNodeId = new NodeId(sourceNodeConnectorRef.getValue().firstKeyOf(Node.class, NodeKey.class).getId().getValue());
- NodeId destNodeId = new NodeId(destNodeConnectorRef.getValue().firstKeyOf(Node.class, NodeKey.class).getId().getValue());
-
- // add destMac-To-sourceMac flow on source port
- addMacToMacFlow(destMac, sourceMac, sourceNodeConnectorRef);
-
- // add sourceMac-To-destMac flow on destination port
- addMacToMacFlow(sourceMac, destMac, destNodeConnectorRef);
-
- if(!sourceNodeId.equals(destNodeId)) {
- List<Link> linksInBeween = networkGraphService.getPath(sourceNodeId, destNodeId);
-
- if(linksInBeween != null) {
- // assumes the list order is maintained and starts with link that has source as source node
- for(Link link : linksInBeween) {
- // add sourceMac-To-destMac flow on source port
- addMacToMacFlow(sourceMac, destMac, getSourceNodeConnectorRef(link));
-
- // add destMac-To-sourceMac flow on destination port
- addMacToMacFlow(destMac, sourceMac, getDestNodeConnectorRef(link));
- }
- }
- }
- }
-
- private NodeConnectorRef getSourceNodeConnectorRef(Link link) {
- InstanceIdentifier<NodeConnector> nodeConnectorInstanceIdentifier
- = InstanceIdentifierUtils.createNodeConnectorIdentifier(
- link.getSource().getSourceNode().getValue(),
- link.getSource().getSourceTp().getValue());
- return new NodeConnectorRef(nodeConnectorInstanceIdentifier);
- }
-
- private NodeConnectorRef getDestNodeConnectorRef(Link link) {
- InstanceIdentifier<NodeConnector> nodeConnectorInstanceIdentifier
- = InstanceIdentifierUtils.createNodeConnectorIdentifier(
- link.getDestination().getDestNode().getValue(),
- link.getDestination().getDestTp().getValue());
-
- return new NodeConnectorRef(nodeConnectorInstanceIdentifier);
- }
-
- /**
- * @param nodeConnectorRef
- * @return
- */
- private InstanceIdentifier<Flow> buildFlowPath(NodeConnectorRef nodeConnectorRef, TableKey flowTableKey) {
-
- // generate unique flow key
- FlowId flowId = new FlowId(String.valueOf(flowIdInc.getAndIncrement()));
- FlowKey flowKey = new FlowKey(flowId);
-
- return InstanceIdentifierUtils.generateFlowInstanceIdentifier(nodeConnectorRef, flowTableKey, flowKey);
- }
-
- /**
- * @param tableId
- * @param priority
- * @param sourceMac
- * @param destMac
- * @param destPort
- * @return {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder}
- * builds flow that forwards all packets with destMac to given port
- */
- private Flow createMacToMacFlow(Short tableId, int priority,
- MacAddress sourceMac, MacAddress destMac, NodeConnectorRef destPort) {
-
- // start building flow
- FlowBuilder macToMacFlow = new FlowBuilder() //
- .setTableId(tableId) //
- .setFlowName("mac2mac");
-
- // use its own hash code for id.
- macToMacFlow.setId(new FlowId(Long.toString(macToMacFlow.hashCode())));
-
- // create a match that has mac to mac ethernet match
- EthernetMatchBuilder ethernetMatchBuilder = new EthernetMatchBuilder() //
- .setEthernetDestination(new EthernetDestinationBuilder() //
- .setAddress(destMac) //
- .build());
- // set source in the match only if present
- if(sourceMac != null) {
- ethernetMatchBuilder.setEthernetSource(new EthernetSourceBuilder()
- .setAddress(sourceMac)
- .build());
- }
- EthernetMatch ethernetMatch = ethernetMatchBuilder.build();
- Match match = new MatchBuilder()
- .setEthernetMatch(ethernetMatch)
- .build();
-
-
- Uri destPortUri = destPort.getValue().firstKeyOf(NodeConnector.class, NodeConnectorKey.class).getId();
-
- Action outputToControllerAction = new ActionBuilder() //
- .setOrder(0)
- .setAction(new OutputActionCaseBuilder() //
- .setOutputAction(new OutputActionBuilder() //
- .setMaxLength(new Integer(0xffff)) //
- .setOutputNodeConnector(destPortUri) //
- .build()) //
- .build()) //
- .build();
-
- // Create an Apply Action
- ApplyActions applyActions = new ApplyActionsBuilder().setAction(ImmutableList.of(outputToControllerAction))
- .build();
-
- // Wrap our Apply Action in an Instruction
- Instruction applyActionsInstruction = new InstructionBuilder() //
- .setOrder(0)
- .setInstruction(new ApplyActionsCaseBuilder()//
- .setApplyActions(applyActions) //
- .build()) //
- .build();
-
- // Put our Instruction in a list of Instructions
- macToMacFlow
- .setMatch(match) //
- .setInstructions(new InstructionsBuilder() //
- .setInstruction(ImmutableList.of(applyActionsInstruction)) //
- .build()) //
- .setPriority(priority) //
- .setBufferId(0L) //
- .setHardTimeout(0) //
- .setIdleTimeout(0) //
- .setCookie(new FlowCookie(BigInteger.valueOf(flowCookieInc.getAndIncrement())))
- .setFlags(new FlowModFlags(false, false, false, false, false));
-
- return macToMacFlow.build();
- }
-
- /**
- * Starts and commits data change transaction which
- * modifies provided flow path with supplied body.
- *
- * @param flowPath
- * @param flowBody
- * @return transaction commit
- */
- private Future<RpcResult<TransactionStatus>> writeFlowToConfigData(InstanceIdentifier<Flow> flowPath,
- Flow flowBody) {
- DataModificationTransaction addFlowTransaction = dataBrokerService.beginTransaction();
- addFlowTransaction.putConfigurationData(flowPath, flowBody);
- return addFlowTransaction.commit();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.inventory;
-
-import org.opendaylight.controller.sample.l2switch.md.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-/**
- * InventoryService provides functions related to Nodes & NodeConnectors.
- */
-public class InventoryService {
- private DataBrokerService dataService;
- // Key: SwitchId, Value: NodeConnectorRef that corresponds to NC between controller & switch
- private HashMap<String, NodeConnectorRef> controllerSwitchConnectors;
-
- /**
- * Construct an InventoryService object with the specified inputs.
- * @param dataService The DataBrokerService associated with the InventoryService.
- */
- public InventoryService(DataBrokerService dataService) {
- this.dataService = dataService;
- controllerSwitchConnectors = new HashMap<String, NodeConnectorRef>();
- }
-
- public HashMap<String, NodeConnectorRef> getControllerSwitchConnectors() {
- return controllerSwitchConnectors;
- }
-
- // ToDo: Improve performance for thousands of switch ports
- /**
- * Get the External NodeConnectors of the network, which are the NodeConnectors connected to hosts.
- * @return The list of external node connectors.
- */
- public List<NodeConnectorRef> getExternalNodeConnectors() {
- // External NodeConnectors = All - Internal
- ArrayList<NodeConnectorRef> externalNodeConnectors = new ArrayList<NodeConnectorRef>();
- Set<String> internalNodeConnectors = new HashSet<>();
-
- // Read Topology -- find list of switch-to-switch internal node connectors
- NetworkTopology networkTopology =
- (NetworkTopology)dataService.readOperationalData(
- InstanceIdentifier.<NetworkTopology>builder(NetworkTopology.class).toInstance());
-
- for (Topology topology : networkTopology.getTopology()) {
- Topology completeTopology =
- (Topology)dataService.readOperationalData(
- InstanceIdentifierUtils.generateTopologyInstanceIdentifier(
- topology.getTopologyId().getValue()));
-
- for (Link link : completeTopology.getLink()) {
- internalNodeConnectors.add(link.getDestination().getDestTp().getValue());
- internalNodeConnectors.add(link.getSource().getSourceTp().getValue());
- }
- }
-
- // Read Inventory -- contains list of all nodeConnectors
- InstanceIdentifier.InstanceIdentifierBuilder<Nodes> nodesInsIdBuilder = InstanceIdentifier.<Nodes>builder(Nodes.class);
- Nodes nodes = (Nodes)dataService.readOperationalData(nodesInsIdBuilder.toInstance());
- if (nodes != null) {
- for (Node node : nodes.getNode()) {
- Node completeNode = (Node)dataService.readOperationalData(InstanceIdentifierUtils.createNodePath(node.getId()));
- for (NodeConnector nodeConnector : completeNode.getNodeConnector()) {
- // NodeConnector isn't switch-to-switch, so it must be controller-to-switch (internal) or external
- if (!internalNodeConnectors.contains(nodeConnector.getId().getValue())) {
- NodeConnectorRef ncRef = new NodeConnectorRef(
- InstanceIdentifier.<Nodes>builder(Nodes.class).<Node, NodeKey>child(Node.class, node.getKey())
- .<NodeConnector, NodeConnectorKey>child(NodeConnector.class, nodeConnector.getKey()).toInstance());
-
- // External node connectors have "-" in their name for mininet, i.e. "s1-eth1"
- if (nodeConnector.getAugmentation(FlowCapableNodeConnector.class).getName().contains("-")) {
- externalNodeConnectors.add(ncRef);
- }
- // Controller-to-switch internal node connectors
- else {
- controllerSwitchConnectors.put(node.getId().getValue(), ncRef);
- }
- }
- }
- }
- }
-
- return externalNodeConnectors;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.packet;
-
-import org.opendaylight.controller.sample.l2switch.md.addresstracker.AddressTracker;
-import org.opendaylight.controller.sample.l2switch.md.flow.FlowWriterService;
-import org.opendaylight.controller.sample.l2switch.md.inventory.InventoryService;
-import org.opendaylight.controller.sample.l2switch.md.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.sal.packet.Ethernet;
-import org.opendaylight.controller.sal.packet.LLDP;
-import org.opendaylight.controller.sal.packet.LinkEncap;
-import org.opendaylight.controller.sal.packet.Packet;
-import org.opendaylight.controller.sal.packet.RawPacket;
-import org.opendaylight.controller.sal.utils.HexEncode;
-import org.opendaylight.controller.sal.utils.NetUtils;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.address.tracker.rev140402.l2.addresses.L2Address;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketProcessingListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketProcessingService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.PacketReceived;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.TransmitPacketInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.packet.service.rev130709.TransmitPacketInputBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashMap;
-import java.util.List;
-
-/**
- * PacketHandler examines Ethernet packets to find L2Addresses (mac, nodeConnector) pairings
- * of the sender and learns them.
- * It also forwards the data packets appropriately dependending upon whether it knows about the
- * target or not.
- */
-public class PacketHandler implements PacketProcessingListener {
-
- private final static Logger _logger = LoggerFactory.getLogger(PacketHandler.class);
-
- private PacketProcessingService packetProcessingService;
- private AddressTracker addressTracker;
- private FlowWriterService flowWriterService;
- private InventoryService inventoryService;
-
- public void setAddressTracker(AddressTracker addressTracker) {
- this.addressTracker = addressTracker;
- }
-
- public void setPacketProcessingService(PacketProcessingService packetProcessingService) {
- this.packetProcessingService = packetProcessingService;
- }
-
- public void setFlowWriterService(FlowWriterService flowWriterService) {
- this.flowWriterService = flowWriterService;
- }
-
- public void setInventoryService(InventoryService inventoryService) {
- this.inventoryService = inventoryService;
- }
-
- /**
- * The handler function for all incoming packets.
- * @param packetReceived The incoming packet.
- */
- @Override
- public void onPacketReceived(PacketReceived packetReceived) {
-
- if(packetReceived == null) return;
-
- try {
- byte[] payload = packetReceived.getPayload();
- RawPacket rawPacket = new RawPacket(payload);
- NodeConnectorRef ingress = packetReceived.getIngress();
-
- Packet packet = decodeDataPacket(rawPacket);
-
- if(!(packet instanceof Ethernet)) return;
-
- handleEthernetPacket(packet, ingress);
-
- } catch(Exception e) {
- _logger.error("Failed to handle packet {}", packetReceived, e);
- }
- }
-
- /**
- * The handler function for Ethernet packets.
- * @param packet The incoming Ethernet packet.
- * @param ingress The NodeConnector where the Ethernet packet came from.
- */
- private void handleEthernetPacket(Packet packet, NodeConnectorRef ingress) {
- byte[] srcMac = ((Ethernet) packet).getSourceMACAddress();
- byte[] destMac = ((Ethernet) packet).getDestinationMACAddress();
-
- if (srcMac == null || srcMac.length == 0) return;
-
- Object enclosedPacket = packet.getPayload();
-
- if (enclosedPacket instanceof LLDP)
- return; // LLDP packets are handled by OpenFlowPlugin
-
- // get l2address by src mac
- // if unknown, add l2address
- MacAddress srcMacAddress = toMacAddress(srcMac);
- L2Address src = addressTracker.getAddress(srcMacAddress);
- boolean isSrcKnown = (src != null);
- if (!isSrcKnown) {
- addressTracker.addAddress(srcMacAddress, ingress);
- }
-
- // get host by dest mac
- // if known set dest known to true
- MacAddress destMacAddress = toMacAddress(destMac);
- L2Address dest = addressTracker.getAddress(destMacAddress);
- boolean isDestKnown = (dest != null);
-
- byte[] payload = packet.getRawPayload();
- // if (src and dest known)
- // sendpacket to dest and add src<->dest flow
- if(isSrcKnown & isDestKnown) {
- flowWriterService.addMacToMacFlowsUsingShortestPath(srcMacAddress, src.getNodeConnectorRef(),
- destMacAddress, dest.getNodeConnectorRef());
- sendPacketOut(payload, getControllerNodeConnector(dest.getNodeConnectorRef()), dest.getNodeConnectorRef());
- } else {
- // if (dest unknown)
- // sendpacket to external links minus ingress
- floodExternalPorts(payload, ingress);
- }
- }
-
- /**
- * Floods the specified payload on external ports, which are ports not connected to switches.
- * @param payload The payload to be flooded.
- * @param ingress The NodeConnector where the payload came from.
- */
- private void floodExternalPorts(byte[] payload, NodeConnectorRef ingress) {
- List<NodeConnectorRef> externalPorts = inventoryService.getExternalNodeConnectors();
- externalPorts.remove(ingress);
-
- for (NodeConnectorRef egress : externalPorts) {
- sendPacketOut(payload, getControllerNodeConnector(egress), egress);
- }
- }
-
- /**
- * Sends the specified packet on the specified port.
- * @param payload The payload to be sent.
- * @param ingress The NodeConnector where the payload came from.
- * @param egress The NodeConnector where the payload will go.
- */
- private void sendPacketOut(byte[] payload, NodeConnectorRef ingress, NodeConnectorRef egress) {
- if (ingress == null || egress == null) return;
- InstanceIdentifier<Node> egressNodePath = InstanceIdentifierUtils.getNodePath(egress.getValue());
- TransmitPacketInput input = new TransmitPacketInputBuilder() //
- .setPayload(payload) //
- .setNode(new NodeRef(egressNodePath)) //
- .setEgress(egress) //
- .setIngress(ingress) //
- .build();
- packetProcessingService.transmitPacket(input);
- }
-
- /**
- * Decodes an incoming packet.
- * @param raw The raw packet to be decoded.
- * @return The decoded form of the raw packet.
- */
- private Packet decodeDataPacket(RawPacket raw) {
- if(raw == null) {
- return null;
- }
- byte[] data = raw.getPacketData();
- if(data.length <= 0) {
- return null;
- }
- if(raw.getEncap().equals(LinkEncap.ETHERNET)) {
- Ethernet res = new Ethernet();
- try {
- res.deserialize(data, 0, data.length * NetUtils.NumBitsInAByte);
- res.setRawPayload(raw.getPacketData());
- } catch(Exception e) {
- _logger.warn("Failed to decode packet: {}", e.getMessage());
- }
- return res;
- }
- return null;
- }
-
- /**
- * Creates a MacAddress object out of a byte array.
- * @param dataLinkAddress The byte-array form of a MacAddress
- * @return MacAddress of the specified dataLinkAddress.
- */
- private MacAddress toMacAddress(byte[] dataLinkAddress) {
- return new MacAddress(HexEncode.bytesToHexStringFormat(dataLinkAddress));
- }
-
- /**
- * Gets the NodeConnector that connects the controller & switch for a specified switch port/node connector.
- * @param nodeConnectorRef The nodeConnector of a switch.
- * @return The NodeConnector that that connects the controller & switch.
- */
- private NodeConnectorRef getControllerNodeConnector(NodeConnectorRef nodeConnectorRef) {
- NodeConnectorRef controllerSwitchNodeConnector = null;
- HashMap<String, NodeConnectorRef> controllerSwitchConnectors = inventoryService.getControllerSwitchConnectors();
- InstanceIdentifier<Node> nodePath = InstanceIdentifierUtils.getNodePath(nodeConnectorRef.getValue());
- if (nodePath != null) {
- NodeKey nodeKey = InstanceIdentifierUtils.getNodeKey(nodePath);
- if (nodeKey != null) {
- controllerSwitchNodeConnector = controllerSwitchConnectors.get(nodeKey.getId().getValue());
- }
- }
- return controllerSwitchNodeConnector;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.topology;
-
-import com.google.common.base.Preconditions;
-import edu.uci.ics.jung.algorithms.shortestpath.DijkstraShortestPath;
-import edu.uci.ics.jung.graph.DirectedSparseGraph;
-import edu.uci.ics.jung.graph.Graph;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * Implementation of NetworkGraphService{@link org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService}.
- * It uses Jung graph library internally to maintain a graph and optimum way to return shortest path using
- * Dijkstra algorithm.
- */
-public class NetworkGraphDijkstra implements NetworkGraphService {
-
- private static final Logger _logger = LoggerFactory.getLogger(NetworkGraphDijkstra.class);
-
- DijkstraShortestPath<NodeId, Link> shortestPath = null;
- Graph<NodeId, Link> networkGraph = null;
-
- /**
- * Adds links to existing graph or creates new directed graph with given links if graph was not initialized.
- * @param links
- */
- @Override
- public synchronized void addLinks(List<Link> links) {
- if(links == null || links.isEmpty()) {
- _logger.info("In addLinks: No link added as links is null or empty.");
- return;
- }
-
- if(networkGraph == null) {
- networkGraph = new DirectedSparseGraph<>();
- }
-
- for(Link link : links) {
- NodeId sourceNodeId = link.getSource().getSourceNode();
- NodeId destinationNodeId = link.getDestination().getDestNode();
- networkGraph.addVertex(sourceNodeId);
- networkGraph.addVertex(destinationNodeId);
- networkGraph.addEdge(link, sourceNodeId, destinationNodeId);
- }
- if(shortestPath == null) {
- shortestPath = new DijkstraShortestPath<>(networkGraph);
- } else {
- shortestPath.reset();
- }
- }
-
- /**
- * removes links from existing graph.
- * @param links
- */
- @Override
- public synchronized void removeLinks(List<Link> links) {
- Preconditions.checkNotNull(networkGraph, "Graph is not initialized, add links first.");
-
- if(links == null || links.isEmpty()) {
- _logger.info("In removeLinks: No link removed as links is null or empty.");
- return;
- }
-
- for(Link link : links) {
- networkGraph.removeEdge(link);
- }
-
- if(shortestPath == null) {
- shortestPath = new DijkstraShortestPath<>(networkGraph);
- } else {
- shortestPath.reset();
- }
- }
-
- /**
- * returns a path between 2 nodes. Uses Dijkstra's algorithm to return shortest path.
- * @param sourceNodeId
- * @param destinationNodeId
- * @return
- */
- @Override
- public synchronized List<Link> getPath(NodeId sourceNodeId, NodeId destinationNodeId) {
- Preconditions.checkNotNull(shortestPath, "Graph is not initialized, add links first.");
-
- if(sourceNodeId == null || destinationNodeId == null) {
- _logger.info("In getPath: returning null, as sourceNodeId or destinationNodeId is null.");
- return null;
- }
-
- return shortestPath.getPath(sourceNodeId, destinationNodeId);
- }
-
- /**
- * Clears the prebuilt graph, in case same service instance is required to process a new graph.
- */
- @Override
- public synchronized void clear() {
- networkGraph = null;
- shortestPath = null;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.topology;
-
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-
-import java.util.List;
-
-/**
- * Service that allows to build a network graph using Topology links
- * {@link org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link}
- * and exposes operation that can be performed on such graph.
- */
-public interface NetworkGraphService {
-
- /**
- * Adds links to existing graph or creates new graph with given links if graph was not initialized.
- * @param links
- */
- public void addLinks(List<Link> links);
-
- /**
- * removes links from existing graph.
- * @param links
- */
- public void removeLinks(List<Link> links);
-
- /**
- * returns a path between 2 nodes. Implementation should ideally return shortest path.
- * @param sourceNodeId
- * @param destinationNodeId
- * @return
- */
- public List<Link> getPath(NodeId sourceNodeId, NodeId destinationNodeId);
-
- /**
- * Clears the prebuilt graph, in case same service instance is required to process a new graph.
- */
- public void clear();
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.topology;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sample.l2switch.md.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Listens to data change events on topology links
- * {@link org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link}
- * and maintains a topology graph using provided NetworkGraphService
- * {@link org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService}.
- * It refreshes the graph after a delay(default 10 sec) to accommodate burst of change events if they come in bulk.
- * This is to avoid continuous refresh of graph on a series of change events in short time.
- */
-public class TopologyLinkDataChangeHandler implements DataChangeListener {
- private static final Logger _logger = LoggerFactory.getLogger(TopologyLinkDataChangeHandler.class);
- private static final String DEFAULT_TOPOLOGY_ID = "flow:1";
-
- private boolean networkGraphRefreshScheduled = false;
- private final ScheduledExecutorService networkGraphRefreshScheduler = Executors.newScheduledThreadPool(1);
- private final long DEFAULT_GRAPH_REFRESH_DELAY = 10;
- private final long graphRefreshDelayInSec;
-
- private final NetworkGraphService networkGraphService;
- private final DataBrokerService dataBrokerService;
-
- /**
- * Uses default delay to refresh topology graph if this constructor is used.
- * @param dataBrokerService
- * @param networkGraphService
- */
- public TopologyLinkDataChangeHandler(DataBrokerService dataBrokerService, NetworkGraphService networkGraphService) {
- Preconditions.checkNotNull(dataBrokerService, "dataBrokerService should not be null.");
- Preconditions.checkNotNull(networkGraphService, "networkGraphService should not be null.");
- this.dataBrokerService = dataBrokerService;
- this.networkGraphService = networkGraphService;
- this.graphRefreshDelayInSec = DEFAULT_GRAPH_REFRESH_DELAY;
- }
-
- /**
- *
- * @param dataBrokerService
- * @param networkGraphService
- * @param graphRefreshDelayInSec
- */
- public TopologyLinkDataChangeHandler(DataBrokerService dataBrokerService, NetworkGraphService networkGraphService,
- long graphRefreshDelayInSec) {
- Preconditions.checkNotNull(dataBrokerService, "dataBrokerService should not be null.");
- Preconditions.checkNotNull(networkGraphService, "networkGraphService should not be null.");
- this.dataBrokerService = dataBrokerService;
- this.networkGraphService = networkGraphService;
- this.graphRefreshDelayInSec = graphRefreshDelayInSec;
- }
-
- /**
- * Based on if links have been added or removed in topology data store, schedules a refresh of network graph.
- * @param dataChangeEvent
- */
- @Override
- public void onDataChanged(DataChangeEvent<InstanceIdentifier<?>, DataObject> dataChangeEvent) {
- if(dataChangeEvent == null) {
- _logger.info("In onDataChanged: No Processing done as dataChangeEvent is null.");
- }
- Map<InstanceIdentifier<?>, DataObject> linkOriginalData = dataChangeEvent.getOriginalOperationalData();
- Map<InstanceIdentifier<?>, DataObject> linkUpdatedData = dataChangeEvent.getUpdatedOperationalData();
- // change this logic, once MD-SAL start populating DeletedOperationData Set
- if(linkOriginalData != null && linkUpdatedData != null
- && (linkOriginalData.size() != 0 || linkUpdatedData.size() != 0)
- && !networkGraphRefreshScheduled) {
- networkGraphRefreshScheduled = linkOriginalData.size() != linkUpdatedData.size();
- if(networkGraphRefreshScheduled) {
- networkGraphRefreshScheduler.schedule(new NetworkGraphRefresher(), graphRefreshDelayInSec, TimeUnit.SECONDS);
- }
- }
-
- }
-
- /**
- * Registers as a data listener to receive changes done to
- * {@link org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link}
- * under {@link org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology}
- * operation data root.
- */
-
- public void registerAsDataChangeListener() {
- InstanceIdentifier<Link> linkInstance = InstanceIdentifier.builder(NetworkTopology.class)
- .child(Topology.class, new TopologyKey(new TopologyId(DEFAULT_TOPOLOGY_ID))).child(Link.class).toInstance();
- dataBrokerService.registerDataChangeListener(linkInstance, this);
- }
-
- /**
- *
- */
- private class NetworkGraphRefresher implements Runnable {
- /**
- *
- */
- @Override
- public void run() {
- networkGraphRefreshScheduled = false;
- //TODO: it should refer to changed links only from DataChangeEvent above.
- List<Link> links = getLinksFromTopology(DEFAULT_TOPOLOGY_ID);
- networkGraphService.clear();// can remove this once changed links are addressed
- if(links != null && !links.isEmpty()) {
- networkGraphService.addLinks(links);
- }
- }
-
- /**
- * @param topologyId
- * @return
- */
- private List<Link> getLinksFromTopology(String topologyId) {
- InstanceIdentifier<Topology> topologyInstanceIdentifier = InstanceIdentifierUtils.generateTopologyInstanceIdentifier(topologyId);
- Topology topology = (Topology) dataBrokerService.readOperationalData(topologyInstanceIdentifier);
- return topology.getLink();
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.util;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/* InstanceIdentifierUtils provides utility functions related to InstanceIdentifiers.
- */
-public final class InstanceIdentifierUtils {
-
- private InstanceIdentifierUtils() {
- throw new UnsupportedOperationException("Utility class should never be instantiated");
- }
-
- /**
- * Creates an Instance Identifier (path) for node with specified id
- *
- * @param nodeId
- * @return
- */
- public static final InstanceIdentifier<Node> createNodePath(final NodeId nodeId) {
- return InstanceIdentifier.builder(Nodes.class) //
- .child(Node.class, new NodeKey(nodeId)) //
- .build();
- }
-
- /**
- * Shorten's node child path to node path.
- *
- * @param nodeChild child of node, from which we want node path.
- * @return
- */
- public static final InstanceIdentifier<Node> getNodePath(final InstanceIdentifier<?> nodeChild) {
- return nodeChild.firstIdentifierOf(Node.class);
- }
-
-
- /**
- * Creates a table path by appending table specific location to node path
- *
- * @param nodePath
- * @param tableKey
- * @return
- */
- public static final InstanceIdentifier<Table> createTablePath(final InstanceIdentifier<Node> nodePath, final TableKey tableKey) {
- return nodePath.builder()
- .augmentation(FlowCapableNode.class)
- .child(Table.class, tableKey)
- .build();
- }
-
- /**
- * Creates a path for particular flow, by appending flow-specific information
- * to table path.
- *
- * @param table
- * @param flowKey
- * @return
- */
- public static InstanceIdentifier<Flow> createFlowPath(final InstanceIdentifier<Table> table, final FlowKey flowKey) {
- return table.child(Flow.class, flowKey);
- }
-
- /**
- * Extract table id from table path.
- *
- * @param tablePath
- * @return
- */
- public static Short getTableId(final InstanceIdentifier<Table> tablePath) {
- return tablePath.firstKeyOf(Table.class, TableKey.class).getId();
- }
-
- /**
- * Extracts NodeConnectorKey from node connector path.
- */
- public static NodeConnectorKey getNodeConnectorKey(final InstanceIdentifier<?> nodeConnectorPath) {
- return nodeConnectorPath.firstKeyOf(NodeConnector.class, NodeConnectorKey.class);
- }
-
- /**
- * Extracts NodeKey from node path.
- */
- public static NodeKey getNodeKey(final InstanceIdentifier<?> nodePath) {
- return nodePath.firstKeyOf(Node.class, NodeKey.class);
- }
-
-
- //
- public static final InstanceIdentifier<NodeConnector> createNodeConnectorIdentifier(final String nodeIdValue,
- final String nodeConnectorIdValue) {
- return createNodePath(new NodeId(nodeIdValue))
- .child(NodeConnector.class, new NodeConnectorKey(new NodeConnectorId(nodeConnectorIdValue)));
- }
-
- /**
- * @param nodeConnectorRef
- * @return
- */
- public static InstanceIdentifier<Node> generateNodeInstanceIdentifier(final NodeConnectorRef nodeConnectorRef) {
- return nodeConnectorRef.getValue().firstIdentifierOf(Node.class);
- }
-
- /**
- * @param nodeConnectorRef
- * @param flowTableKey
- * @return
- */
- public static InstanceIdentifier<Table> generateFlowTableInstanceIdentifier(final NodeConnectorRef nodeConnectorRef, final TableKey flowTableKey) {
- return generateNodeInstanceIdentifier(nodeConnectorRef).builder()
- .augmentation(FlowCapableNode.class)
- .child(Table.class, flowTableKey)
- .build();
- }
-
- /**
- * @param nodeConnectorRef
- * @param flowTableKey
- * @param flowKey
- * @return
- */
- public static InstanceIdentifier<Flow> generateFlowInstanceIdentifier(final NodeConnectorRef nodeConnectorRef,
- final TableKey flowTableKey,
- final FlowKey flowKey) {
- return generateFlowTableInstanceIdentifier(nodeConnectorRef, flowTableKey).child(Flow.class, flowKey);
- }
-
- public static InstanceIdentifier<Topology> generateTopologyInstanceIdentifier(final String topologyId) {
- return InstanceIdentifier.builder(NetworkTopology.class)
- .child(Topology.class, new TopologyKey(new TopologyId(topologyId)))
- .build();
- }
-}
-
+++ /dev/null
-/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.flow;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.controller.sample.l2switch.md.topology.NetworkGraphService;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- */
-public class FlowWriterServiceImplTest {
- private DataBrokerService dataBrokerService;
- private NodeConnectorRef srcNodeConnectorRef;
- private NodeConnectorRef destNodeConnectorRef;
- private MacAddress destMacAddress;
- private MacAddress srcMacAddress;
- private DataModificationTransaction dataModificationTransaction;
- private NetworkGraphService networkGraphService;
-
- @Before
- public void init() {
- dataBrokerService = mock(DataBrokerService.class);
- networkGraphService = mock(NetworkGraphService.class);
- //build source node connector ref
- InstanceIdentifier<Nodes> srcNodesInstanceIdentifier = InstanceIdentifier.create(Nodes.class);
- InstanceIdentifier<Node> srcNodeInstanceIdentifier = srcNodesInstanceIdentifier
- .child(Node.class, new NodeKey(new NodeId("openflow:1")));
- InstanceIdentifier<NodeConnector> srcNodeConnectorInstanceIdentifier = srcNodeInstanceIdentifier
- .child(NodeConnector.class, new NodeConnectorKey(new NodeConnectorId("openflow:1:2")));
- srcNodeConnectorRef = new NodeConnectorRef(srcNodeConnectorInstanceIdentifier);
-
- //build dest node connector ref
- InstanceIdentifier<Nodes> nodesInstanceIdentifier
- = InstanceIdentifier.builder(Nodes.class)
- .build();
- InstanceIdentifier<Node> nodeInstanceIdentifier =
- nodesInstanceIdentifier.child(Node.class, new NodeKey(new NodeId("openflow:2")));
- InstanceIdentifier<NodeConnector> nodeConnectorInstanceIdentifier =
- nodeInstanceIdentifier.child(NodeConnector.class, new NodeConnectorKey(new NodeConnectorId("openflow:2:2")));
- destNodeConnectorRef = new NodeConnectorRef(nodeConnectorInstanceIdentifier);
- destMacAddress = new MacAddress("00:0a:95:9d:68:16");
- srcMacAddress = new MacAddress("00:0a:95:8c:97:24");
- dataModificationTransaction = mock(DataModificationTransaction.class);
- when(dataBrokerService.beginTransaction()).thenReturn(dataModificationTransaction);
- }
-
- @Test
- public void testFlowWriterServiceImpl_NPEWhenDataBrokerServiceIsNull() throws Exception {
- try {
- new FlowWriterServiceImpl(null, networkGraphService);
- fail("Expected null pointer exception.");
- } catch(NullPointerException npe) {
- assertEquals("dataBrokerService should not be null.", npe.getMessage());
- }
- }
-
- @Test
- public void testAddMacToMacFlow_NPEWhenNullSourceMacDestMacAndNodeConnectorRef() throws Exception {
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataBrokerService, networkGraphService);
- try {
- flowWriterService.addMacToMacFlow(null, null, null);
- fail("Expected null pointer exception.");
- } catch(NullPointerException npe) {
- assertEquals("Destination mac address should not be null.", npe.getMessage());
- }
- }
-
- @Test
- public void testAddMacToMacFlow_NPEWhenSourceMacNullMac() throws Exception {
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataBrokerService, networkGraphService);
- try {
- flowWriterService.addMacToMacFlow(null, null, destNodeConnectorRef);
- fail("Expected null pointer exception.");
- } catch(NullPointerException npe) {
- assertEquals("Destination mac address should not be null.", npe.getMessage());
- }
- }
-
- @Test
- public void testAddMacToMacFlow_NPEWhenNullSourceMacNodeConnectorRef() throws Exception {
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataBrokerService, networkGraphService);
- try {
- flowWriterService.addMacToMacFlow(null, destMacAddress, null);
- fail("Expected null pointer exception.");
- } catch(NullPointerException npe) {
- assertEquals("Destination port should not be null.", npe.getMessage());
- }
- }
-
- @Test
- public void testAddMacToMacFlow_WhenNullSourceMac() throws Exception {
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataBrokerService, networkGraphService);
- flowWriterService.addMacToMacFlow(null, destMacAddress, destNodeConnectorRef);
- verify(dataBrokerService, times(1)).beginTransaction();
- verify(dataModificationTransaction, times(1)).commit();
- }
-
- @Test
- public void testAddMacToMacFlow_WhenSrcAndDestMacAreSame() throws Exception {
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataBrokerService, networkGraphService);
- flowWriterService.addMacToMacFlow(new MacAddress(destMacAddress.getValue()), destMacAddress, destNodeConnectorRef);
- verify(dataBrokerService, never()).beginTransaction();
- verify(dataModificationTransaction, never()).commit();
-
- }
-
- @Test
- public void testAddMacToMacFlow_SunnyDay() throws Exception {
- FlowWriterService flowWriterService = new FlowWriterServiceImpl(dataBrokerService, networkGraphService);
- flowWriterService.addMacToMacFlow(srcMacAddress, destMacAddress, destNodeConnectorRef);
- verify(dataBrokerService, times(1)).beginTransaction();
- verify(dataModificationTransaction, times(1)).commit();
- }
-
-}
+++ /dev/null
-/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.topology;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import static junit.framework.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- */
-public class NetworkGraphDijkstraTest {
- Link link1, link2, link3, link4, link5, link6, link7, link8, link9, link10,link11,link12;
- Destination dest1, dest2, dest3, dest4, dest5, dest6,dest7,dest8,dest9,dest10,dest11,dest12;
- Source src1, src2, src3, src4, src5, src6,src7,src8,src9,src10,src11,src12;
- NodeId nodeId1 = new NodeId("openflow:1");
- NodeId nodeId2 = new NodeId("openflow:2");
- NodeId nodeId3 = new NodeId("openflow:3");
- NodeId nodeId4 = new NodeId("openflow:4");
- NodeId nodeId5 = new NodeId("openflow:5");
- NodeId nodeId6 = new NodeId("openflow:6");
- NodeId nodeId7 = new NodeId("openflow:7");
- List<Link> links = new ArrayList<>();
-
- @Before
- public void init() {
- link1 = mock(Link.class);
- link2 = mock(Link.class);
- link3 = mock(Link.class);
- link4 = mock(Link.class);
- link5 = mock(Link.class);
- link6 = mock(Link.class);
- link7 = mock(Link.class);
- link8 = mock(Link.class);
- link9 = mock(Link.class);
- link10 = mock(Link.class);
- link11 = mock(Link.class);
- link12 = mock(Link.class);
- dest1 = mock(Destination.class);
- dest2 = mock(Destination.class);
- dest3 = mock(Destination.class);
- dest4 = mock(Destination.class);
- dest5 = mock(Destination.class);
- dest6 = mock(Destination.class);
- dest7 = mock(Destination.class);
- dest8 = mock(Destination.class);
- dest9 = mock(Destination.class);
- dest10 = mock(Destination.class);
- dest11 = mock(Destination.class);
- dest12 = mock(Destination.class);
- src1 = mock(Source.class);
- src2 = mock(Source.class);
- src3 = mock(Source.class);
- src4 = mock(Source.class);
- src5 = mock(Source.class);
- src6 = mock(Source.class);
- src7 = mock(Source.class);
- src8 = mock(Source.class);
- src9 = mock(Source.class);
- src10 = mock(Source.class);
- src11 = mock(Source.class);
- src12 = mock(Source.class);
- when(link1.getSource()).thenReturn(src1);
- when(link2.getSource()).thenReturn(src2);
- when(link3.getSource()).thenReturn(src3);
- when(link4.getSource()).thenReturn(src4);
- when(link5.getSource()).thenReturn(src5);
- when(link6.getSource()).thenReturn(src6);
- when(link7.getSource()).thenReturn(src7);
- when(link8.getSource()).thenReturn(src8);
- when(link9.getSource()).thenReturn(src9);
- when(link10.getSource()).thenReturn(src10);
- when(link11.getSource()).thenReturn(src11);
- when(link12.getSource()).thenReturn(src12);
- when(link1.getDestination()).thenReturn(dest1);
- when(link2.getDestination()).thenReturn(dest2);
- when(link3.getDestination()).thenReturn(dest3);
- when(link4.getDestination()).thenReturn(dest4);
- when(link5.getDestination()).thenReturn(dest5);
- when(link6.getDestination()).thenReturn(dest6);
- when(link7.getDestination()).thenReturn(dest7);
- when(link8.getDestination()).thenReturn(dest8);
- when(link9.getDestination()).thenReturn(dest9);
- when(link10.getDestination()).thenReturn(dest10);
- when(link11.getDestination()).thenReturn(dest11);
- when(link12.getDestination()).thenReturn(dest12);
- when(src1.getSourceNode()).thenReturn(nodeId1);
- when(dest1.getDestNode()).thenReturn(nodeId2);
- when(src2.getSourceNode()).thenReturn(nodeId2);
- when(dest2.getDestNode()).thenReturn(nodeId1);
- when(src3.getSourceNode()).thenReturn(nodeId1);
- when(dest3.getDestNode()).thenReturn(nodeId3);
- when(src4.getSourceNode()).thenReturn(nodeId3);
- when(dest4.getDestNode()).thenReturn(nodeId1);
- when(src5.getSourceNode()).thenReturn(nodeId2);
- when(dest5.getDestNode()).thenReturn(nodeId4);
- when(src6.getSourceNode()).thenReturn(nodeId4);
- when(dest6.getDestNode()).thenReturn(nodeId2);
- when(src7.getSourceNode()).thenReturn(nodeId2);
- when(dest7.getDestNode()).thenReturn(nodeId5);
- when(src8.getSourceNode()).thenReturn(nodeId5);
- when(dest8.getDestNode()).thenReturn(nodeId2);
- when(src9.getSourceNode()).thenReturn(nodeId6);
- when(dest9.getDestNode()).thenReturn(nodeId3);
- when(src10.getSourceNode()).thenReturn(nodeId3);
- when(dest10.getDestNode()).thenReturn(nodeId6);
- when(src11.getSourceNode()).thenReturn(nodeId7);
- when(dest11.getDestNode()).thenReturn(nodeId3);
- when(src12.getSourceNode()).thenReturn(nodeId3);
- when(dest12.getDestNode()).thenReturn(nodeId7);
- links.add(link1);
- links.add(link2);
- links.add(link3);
- links.add(link4);
- links.add(link5);
- links.add(link6);
- links.add(link7);
- links.add(link8);
- links.add(link9);
- links.add(link10);
- links.add(link11);
- links.add(link12);
-
- }
-
- @Test
- public void testAddLinksAndGetPath() throws Exception {
- NetworkGraphService networkGraphService = new NetworkGraphDijkstra();
- networkGraphService.addLinks(links);
- List<Link> path = networkGraphService.getPath(nodeId2, nodeId3);
- assertEquals("path size is not as expected.", 2, path.size());
- assertEquals("link source is not as expected.", nodeId2, path.get(0).getSource().getSourceNode());
- assertEquals("link destination is not as expected.", nodeId1, path.get(0).getDestination().getDestNode());
- path = networkGraphService.getPath(nodeId3, nodeId2);
- assertEquals("path size is not as expected.", 2, path.size());
- assertEquals("link source is not as expected.", nodeId3, path.get(0).getSource().getSourceNode());
- assertEquals("link destination is not as expected.", nodeId1, path.get(0).getDestination().getDestNode());
-
- path = networkGraphService.getPath(nodeId4, nodeId6);
- assertEquals("path size is not as expected.", 4, path.size());
- assertEquals("link source is not as expected.", nodeId4, path.get(0).getSource().getSourceNode());
- assertEquals("link destination is not as expected.", nodeId2, path.get(0).getDestination().getDestNode());
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sample.l2switch.md.topology;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.sample.l2switch.md.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- */
-public class TopologyLinkDataChangeHandlerTest {
- NetworkGraphService networkGraphService;
- DataBrokerService dataBrokerService;
- DataChangeEvent dataChangeEvent;
- Topology topology;
- Link link;
-
- @Before
- public void init() {
- networkGraphService = mock(NetworkGraphService.class);
- dataBrokerService = mock(DataBrokerService.class);
- dataChangeEvent = mock(DataChangeEvent.class);
- link = mock(Link.class);
- topology = mock(Topology.class);
- }
-
- @Test
- public void testOnDataChange() throws Exception {
- TopologyLinkDataChangeHandler topologyLinkDataChangeHandler = new TopologyLinkDataChangeHandler(dataBrokerService, networkGraphService, 2);
- Map<InstanceIdentifier<?>, DataObject> original = new HashMap<InstanceIdentifier<?>, DataObject>();
- InstanceIdentifier<?> instanceIdentifier = InstanceIdentifierUtils.generateTopologyInstanceIdentifier("flow:1");
- DataObject dataObject = mock(DataObject.class);
- Map<InstanceIdentifier<?>, DataObject> updated = new HashMap<InstanceIdentifier<?>, DataObject>();
- updated.put(instanceIdentifier, dataObject);
- when(dataChangeEvent.getUpdatedOperationalData()).thenReturn(updated);
- when(dataChangeEvent.getOriginalOperationalData()).thenReturn(original);
- List<Link> links = new ArrayList<>();
- links.add(link);
- when(dataBrokerService.readOperationalData(instanceIdentifier)).thenReturn(topology);
- when(topology.getLink()).thenReturn(links);
-
- topologyLinkDataChangeHandler.onDataChanged(dataChangeEvent);
- Thread.sleep(2100);
- verify(networkGraphService, times(1)).addLinks(links);
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../..</relativePath>
- </parent>
- <groupId>org.opendaylight.controller.samples.l2switch.md</groupId>
- <artifactId>l2switch-model</artifactId>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types</artifactId>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
- <Import-Package>org.opendaylight.yangtools.yang.binding.annotations, *</Import-Package>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </instructions>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-maven-plugin</artifactId>
- <version>${yangtools.version}</version>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>maven-sal-api-gen-plugin</artifactId>
- <version>${yangtools.version}</version>
- <type>jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- <version>${yangtools.version}</version>
- <type>jar</type>
- </dependency>
- </dependencies>
- <executions>
- <execution>
- <goals>
- <goal>generate-sources</goal>
- </goals>
- <configuration>
- <yangFilesRootDir>src/main/yang</yangFilesRootDir>
- <codeGenerators>
- <generator>
- <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
- <outputBaseDir>${salGeneratorPath}</outputBaseDir>
- </generator>
- <generator>
- <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
- <outputBaseDir>target/site/models</outputBaseDir>
- </generator>
- <generator>
- <codeGeneratorClass>org.opendaylight.yangtools.yang.wadl.generator.maven.WadlGenerator</codeGeneratorClass>
- <outputBaseDir>target/site/models</outputBaseDir>
- </generator>
- </codeGenerators>
- <inspectDependencies>true</inspectDependencies>
- </configuration>
- </execution>
- </executions>
-
- </plugin>
- </plugins>
- </build>
-</project>
+++ /dev/null
-module l2-address-tracker {
- yang-version 1;
- namespace "urn:opendaylight:l2-address-tracker";
- prefix l2-address-tracker;
-
- import ietf-yang-types {
- prefix yang;
- revision-date 2010-09-24;
- }
- import opendaylight-inventory {
- prefix inv;
- revision-date 2013-08-19;
- }
-
- organization "Cisco Systems Inc";
- contact
- "Alex Fan <alefan@cisco.com>";
- description
- "YANG version of the L2 Address Tracker Data Model";
-
- revision 2014-04-02 {
- description
- "L2 Address Tracker module draft.";
- }
-
- grouping l2-address {
- leaf mac {
- type yang:mac-address;
- mandatory true;
- description
- "the mac address of the host.";
- }
- leaf node-connector-ref {
- type inv:node-connector-ref;
- }
- }
-
- container l2-addresses {
- config false;
- list l2-address {
- key "mac";
- uses l2-address;
- }
- }
-}
\ No newline at end of file
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.controller.samples</groupId>
- <artifactId>sal-samples</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- </parent>
-
- <artifactId>l2switch.aggregator</artifactId>
- <groupId>org.opendaylight.controller.samples.l2switch</groupId>
- <version>1.1.0-SNAPSHOT</version>
- <packaging>pom</packaging>
-
- <modules>
- <module>model</module>
- <module>implementation</module>
- </modules>
-
-</project>
<module>toaster-consumer</module>
<module>toaster-provider</module>
<module>toaster-config</module>
- <module>l2switch</module>
<module>clustering-test-app</module>
</modules>
<scm>
{
darknessFactor.set( darkness );
}
+
+ LOG.info("onDataChanged - new Toaster config: {}", toaster);
}
}
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
- <version>${slf4j.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
}
listenerRegistration = null;
}
+
+ super.close();
}
protected final <K extends DataObject> Optional<K> readLatestConfiguration(final InstanceIdentifier<K> path) {
@Override
public void close() throws Exception {
+ LOG.info("StatisticsManager close called");
finishing = true;
if (nodeRegistrator != null) {
nodeRegistrator.close();
org.opendaylight.controller.netconf.confignetconfconnector.util,
org.opendaylight.controller.netconf.confignetconfconnector.osgi,
org.opendaylight.controller.netconf.confignetconfconnector.exception,</Private-Package>
- <Import-Package>com.google.common.base,
- com.google.common.collect,
- javax.annotation,
- javax.management,
- javax.management.openmbean,
- org.opendaylight.controller.config.api,
- org.opendaylight.controller.config.api.jmx,
- org.opendaylight.controller.config.yangjmxgenerator,
- org.opendaylight.controller.config.yangjmxgenerator.attribute,
- org.opendaylight.controller.netconf.api,
- org.opendaylight.controller.netconf.mapping.api,
- org.opendaylight.controller.netconf.util.mapping,
- org.opendaylight.controller.netconf.util.xml,
- org.opendaylight.controller.netconf.util.exception,
- org.opendaylight.yangtools.yang.common,
- org.opendaylight.yangtools.yang.model.api,
- org.osgi.framework,
- org.osgi.util.tracker,
- org.slf4j,
- org.w3c.dom,
- com.google.common.io,
- org.opendaylight.yangtools.yang.model.api.type,
- org.opendaylight.yangtools.sal.binding.generator.spi,
- org.opendaylight.yangtools.sal.binding.yang.types,
- org.opendaylight.controller.config.util
- </Import-Package>
+ <Import-Package>*</Import-Package>
<Export-Package></Export-Package>
</instructions>
</configuration>
protected Object preprocess(Object value) {
Util.checkType(value, Map.class);
- Preconditions.checkArgument(((Map)value).size() == 1, "Unexpected number of values in %s, expected 1", value);
- return ((Map)value).values().iterator().next();
+ Preconditions.checkArgument(((Map<?, ?>)value).size() == 1, "Unexpected number of values in %s, expected 1", value);
+ return ((Map<?, ?>)value).values().iterator().next();
}
}
protected Object preprocess(Object value) {
Util.checkType(value, Map.class);
- Preconditions.checkArgument(((Map)value).size() == 1, "Unexpected number of values in %s, expected 1", value);
- Object stringValue = ((Map) value).values().iterator().next();
+ Preconditions.checkArgument(((Map<?, ?>)value).size() == 1, "Unexpected number of values in %s, expected 1", value);
+ Object stringValue = ((Map<?, ?>) value).values().iterator().next();
Util.checkType(stringValue, String.class);
return stringValue;
protected Object preprocess(Object value) {
Util.checkType(value, Map.class);
- Preconditions.checkArgument(((Map)value).size() == 1, "Unexpected number of values in %s, expected 1", value);
- Object listOfStrings = ((Map) value).values().iterator().next();
+ Preconditions.checkArgument(((Map<?, ?>)value).size() == 1, "Unexpected number of values in %s, expected 1", value);
+ Object listOfStrings = ((Map<?, ?>) value).values().iterator().next();
Util.checkType(listOfStrings, List.class);
StringBuilder b = new StringBuilder();
- for (Object character: (List)listOfStrings) {
+ for (Object character: (List<?>)listOfStrings) {
Util.checkType(character, String.class);
b.append(character);
}
package org.opendaylight.controller.netconf.confignetconfconnector.operations.runtimerpc;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Maps;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.management.ObjectName;
import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.rpc.ModuleRpcs;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.rev130405.Modules;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.rev130405.modules.Module;
-import javax.management.ObjectName;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
/**
* Represents parsed xpath to runtime bean instance
return moduleName;
}
+ @VisibleForTesting
+ Map<String, String> getAdditionalAttributes() {
+ return additionalAttributes;
+ }
+
public String getInstanceName() {
return instanceName;
}
return ObjectNameUtil.createRuntimeBeanName(moduleName, instanceName, additionalAttributesJavaNames);
}
+ /**
+ * Pattern for an absolute instance identifier xpath pointing to a runtime bean instance e.g:
+ * <pre>
+ * /modules/module[name=instanceName][type=moduleType]
+ * </pre>
+ * or
+ * <pre>
+ * /a:modules/a:module[a:name=instanceName][a:type=moduleType]
+ * </pre>
+ */
private static final String xpathPatternBlueprint =
- "/" + XmlNetconfConstants.MODULES_KEY
- + "/" + XmlNetconfConstants.MODULE_KEY
- + "\\["
-
- + "(?<key1>type|name)"
- + "='(?<value1>[^']+)'"
- + "( and |\\]\\[)"
- + "(?<key2>type|name)"
- + "='(?<value2>[^']+)'"
-
- + "\\]"
- + "(?<additional>.*)";
+ "/" + getRegExForPrefixedName(Modules.QNAME.getLocalName())+ "/" + getRegExForPrefixedName(Module.QNAME.getLocalName())
+
+ + "\\["
+ + "(?<key1>" + getRegExForPrefixedName(XmlNetconfConstants.TYPE_KEY) + "|" + getRegExForPrefixedName(XmlNetconfConstants.NAME_KEY) + ")"
+ + "=('|\")?(?<value1>[^'\"\\]]+)('|\")?"
+ + "( and |\\]\\[)"
+ + "(?<key2>" + getRegExForPrefixedName(XmlNetconfConstants.TYPE_KEY) + "|" + getRegExForPrefixedName(XmlNetconfConstants.NAME_KEY) + ")"
+ + "=('|\")?(?<value2>[^'\"\\]]+)('|\")?"
+ + "\\]"
+
+ + "(?<additional>.*)";
+
+ /**
+ * Return reg ex that matches either the name with or without a prefix
+ */
+ private static String getRegExForPrefixedName(final String name) {
+ return "([^:]+:)?" + name;
+ }
private static final Pattern xpathPattern = Pattern.compile(xpathPatternBlueprint);
- private static final String additionalPatternBlueprint = "(?<additionalKey>.+)\\[(.+)='(?<additionalValue>.+)'\\]";
+
+ /**
+ * Pattern for additional path elements inside xpath for instance identifier pointing to an inner runtime bean e.g:
+ * <pre>
+ * /modules/module[name=instanceName and type=moduleType]/inner[key=b]
+ * </pre>
+ */
+ private static final String additionalPatternBlueprint = getRegExForPrefixedName("(?<additionalKey>.+)") + "\\[(?<prefixedKey>" + getRegExForPrefixedName("(.+)") + ")=('|\")?(?<additionalValue>[^'\"\\]]+)('|\")?\\]";
private static final Pattern additionalPattern = Pattern.compile(additionalPatternBlueprint);
public static RuntimeRpcElementResolved fromXpath(String xpath, String elementName, String namespace) {
PatternGroupResolver(String key1, String value1, String value2, String additional) {
this.key1 = Preconditions.checkNotNull(key1);
this.value1 = Preconditions.checkNotNull(value1);
-
this.value2 = Preconditions.checkNotNull(value2);
-
this.additional = Preconditions.checkNotNull(additional);
}
String getModuleName() {
- return key1.equals(XmlNetconfConstants.TYPE_KEY) ? value1 : value2;
+ return key1.contains(XmlNetconfConstants.TYPE_KEY) ? value1 : value2;
}
String getInstanceName() {
- return key1.equals(XmlNetconfConstants.NAME_KEY) ? value1 : value2;
+ return key1.contains(XmlNetconfConstants.NAME_KEY) ? value1 : value2;
}
+
Map<String, String> getAdditionalKeys(String elementName, String moduleName) {
HashMap<String, String> additionalAttributes = Maps.newHashMap();
Preconditions
.checkState(
matcher.matches(),
- "Attribute %s not in required form on rpc element %s, required format for additional attributes is %s",
+ "Attribute %s not in required form on rpc element %s, required format for additional attributes is: %s",
additionalKeyValue, elementName, additionalPatternBlueprint);
String name = matcher.group("additionalKey");
runtimeBeanYangName = name;
private static final Logger logger = LoggerFactory.getLogger(Activator.class);
private BundleContext context;
- private ServiceRegistration osgiRegistration;
+ private ServiceRegistration<?> osgiRegistration;
private ConfigRegistryLookupThread configRegistryLookup = null;
@Override
import org.custommonkey.xmlunit.NodeTester;
import org.custommonkey.xmlunit.XMLAssert;
import org.custommonkey.xmlunit.XMLUnit;
+import org.hamcrest.CoreMatchers;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
-import org.junit.matchers.JUnitMatchers;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.config.api.ConflictingVersionException;
}
private void assertContainsString(String string, String substring) {
- assertThat(string, JUnitMatchers.containsString(substring));
+ assertThat(string, CoreMatchers.containsString(substring));
}
private void checkEnum(final Document response) throws Exception {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.confignetconfconnector.operations.runtimerpc;
+
+import static org.junit.Assert.assertEquals;
+import com.google.common.collect.ImmutableMap;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class RuntimeRpcElementResolvedTest {
+
+ private static final String MODULE_TYPE = "moduleType";
+ private static final String INSTANCE_NAME = "instanceName";
+ @Parameterized.Parameter(0)
+ public String xpath;
+ @Parameterized.Parameter(1)
+ public Map<String, String> additional;
+
+ @Parameterized.Parameters(name = "{index}: parsed({0}) contains moduleName:{1} and instanceName:{2}")
+ public static Collection<Object[]> data() {
+ return Arrays.asList(new Object[][] {
+ // With namespaces
+ { "/a:modules/a:module[a:name='instanceName'][a:type='moduleType']/b:listener-state[b:peer-id='127.0.0.1']",
+ new HashMap<>(ImmutableMap.of("listener-state", "127.0.0.1"))},
+ { "/a:modules/a:module[a:name='instanceName'][a:type='moduleType']",
+ null},
+
+ // Without namespaces
+ { "/modules/module[name=instanceName][type=moduleType]", null},
+ { "/modules/module[type=moduleType][name='instanceName']", null},
+ { "/modules/module[name=\'instanceName\'][type=\"moduleType\"]", null},
+ { "/modules/module[type=moduleType and name=instanceName]", null},
+ { "/modules/module[name=\"instanceName\" and type=moduleType]", null},
+ { "/modules/module[type=\"moduleType\" and name=instanceName]", null},
+ { "/modules/module[name=\'instanceName\' and type=\"moduleType\"]", null},
+
+ // With inner beans
+ { "/modules/module[name=instanceName and type=\"moduleType\"]/inner[key=b]", Collections.singletonMap("inner", "b")},
+ { "/modules/module[name=instanceName and type=moduleType]/inner[key=b]", Collections.singletonMap("inner", "b")},
+ { "/modules/module[name=instanceName and type=moduleType]/inner[key=\'b\']", Collections.singletonMap("inner", "b")},
+ { "/modules/module[name=instanceName and type=moduleType]/inner[key=\"b\"]", Collections.singletonMap("inner", "b")},
+
+ { "/modules/module[name=instanceName and type=\"moduleType\"]/inner[key2=a]/inner2[key=b]",
+ new HashMap<>(ImmutableMap.of("inner", "a", "inner2", "b"))
+ },
+ });
+ }
+
+ @Test
+ public void testFromXpath() throws Exception {
+ final RuntimeRpcElementResolved resolved = RuntimeRpcElementResolved.fromXpath(xpath, "element", "namespace");
+ assertEquals(MODULE_TYPE, resolved.getModuleName());
+ assertEquals(INSTANCE_NAME, resolved.getInstanceName());
+ if (additional != null) {
+ assertEquals(additional, resolved.getAdditionalAttributes());
+ }
+ }
+}
import org.junit.Assert;
import org.junit.Test;
-import org.junit.matchers.JUnitMatchers;
+import org.hamcrest.CoreMatchers;
import org.opendaylight.controller.config.api.LookupRegistry;
import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
import org.opendaylight.yangtools.yang.common.QName;
String message = e.getMessage();
Assert.assertThat(
message,
- JUnitMatchers
+ CoreMatchers
.containsString("missing from config subsystem but present in yangstore: [(namespace?revision=1970-01-01)qname2]"));
Assert.assertThat(
message,
- JUnitMatchers
+ CoreMatchers
.containsString("All modules present in config: [(namespace?revision=1970-01-01)qname1]"));
}
}
package org.opendaylight.controller.netconf.persist.impl;
import com.google.common.annotations.VisibleForTesting;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Attr;
import org.w3c.dom.Element;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
/**
* Inspects snapshot xml to be stored, remove all capabilities that are not referenced by it.
* Useful when persisting current configuration.
*/
public class CapabilityStrippingConfigSnapshotHolder implements ConfigSnapshotHolder {
- private static final Logger logger = LoggerFactory.getLogger(CapabilityStrippingConfigSnapshotHolder.class);
+ private static final Logger LOG = LoggerFactory.getLogger(CapabilityStrippingConfigSnapshotHolder.class);
private final String configSnapshot;
private final StripCapabilitiesResult stripCapabilitiesResult;
static StripCapabilitiesResult stripCapabilities(XmlElement configElement, Set<String> allCapabilitiesFromHello) {
// collect all namespaces
Set<String> foundNamespacesInXML = getNamespaces(configElement);
- logger.trace("All capabilities {}\nFound namespaces in XML {}", allCapabilitiesFromHello, foundNamespacesInXML);
+ LOG.trace("All capabilities {}\nFound namespaces in XML {}", allCapabilitiesFromHello, foundNamespacesInXML);
// required are referenced both in xml and hello
SortedSet<String> requiredCapabilities = new TreeSet<>();
// can be removed
}
}
- logger.trace("Required capabilities {}, \nObsolete capabilities {}",
+ LOG.trace("Required capabilities {}, \nObsolete capabilities {}",
requiredCapabilities, obsoleteCapabilities);
return new StripCapabilitiesResult(requiredCapabilities, obsoleteCapabilities);
package org.opendaylight.controller.netconf.persist.impl;
-import org.opendaylight.controller.config.persist.api.Persister;
-import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
-import org.opendaylight.controller.netconf.api.jmx.DefaultCommitOperationMXBean;
-import org.opendaylight.controller.netconf.api.jmx.NetconfJMXNotification;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import java.io.Closeable;
+import java.io.IOException;
import javax.annotation.concurrent.ThreadSafe;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanServerConnection;
import javax.management.Notification;
import javax.management.NotificationListener;
import javax.management.ObjectName;
-import java.io.Closeable;
-import java.io.IOException;
+import org.opendaylight.controller.config.persist.api.Persister;
+import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
+import org.opendaylight.controller.netconf.api.jmx.DefaultCommitOperationMXBean;
+import org.opendaylight.controller.netconf.api.jmx.NetconfJMXNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Responsible for listening for notifications from netconf (via JMX) containing latest
@ThreadSafe
public class ConfigPersisterNotificationHandler implements Closeable {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterNotificationHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigPersisterNotificationHandler.class);
private final MBeanServerConnection mBeanServerConnection;
private final NotificationListener listener;
}
private static void registerAsJMXListener(final MBeanServerConnection mBeanServerConnection, final NotificationListener listener) {
- logger.trace("Called registerAsJMXListener");
+ LOG.trace("Called registerAsJMXListener");
try {
mBeanServerConnection.addNotificationListener(DefaultCommitOperationMXBean.OBJECT_NAME, listener, null, null);
} catch (InstanceNotFoundException | IOException e) {
mBeanServerConnection.removeNotificationListener(on, listener);
}
} catch (final Exception e) {
- logger.warn("Unable to unregister {} as listener for {}", listener, on, e);
+ LOG.warn("Unable to unregister {} as listener for {}", listener, on, e);
}
}
}
class ConfigPersisterNotificationListener implements NotificationListener {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterNotificationListener.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigPersisterNotificationListener.class);
private final Persister persisterAggregator;
// Socket should not be closed at this point
// Activator unregisters this as JMX listener before close is called
- logger.trace("Received notification {}", notification);
+ LOG.trace("Received notification {}", notification);
if (notification instanceof CommitJMXNotification) {
try {
handleAfterCommitNotification((CommitJMXNotification) notification);
} catch (final Exception e) {
// log exceptions from notification Handler here since
// notificationBroadcastSupport logs only DEBUG level
- logger.warn("Failed to handle notification {}", notification, e);
+ LOG.warn("Failed to handle notification {}", notification, e);
throw e;
}
} else {
try {
persisterAggregator.persistConfig(new CapabilityStrippingConfigSnapshotHolder(notification.getConfigSnapshot(),
notification.getCapabilities()));
- logger.trace("Configuration persisted successfully");
+ LOG.trace("Configuration persisted successfully");
} catch (final IOException e) {
throw new RuntimeException("Unable to persist configuration snapshot", e);
}
@Immutable
public class ConfigPusherImpl implements ConfigPusher {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigPusherImpl.class);
private final long maxWaitForCapabilitiesMillis;
private final long conflictingVersionTimeoutMillis;
* it is good idea to perform garbage collection to prune
* any garbage we have accumulated during startup.
*/
- logger.debug("Running post-initialization garbage collection...");
+ LOG.debug("Running post-initialization garbage collection...");
System.gc();
- logger.debug("Post-initialization garbage collection completed.");
- logger.debug("ConfigPusher has pushed configs {}, gc completed", configs);
+ LOG.debug("Post-initialization garbage collection completed.");
+ LOG.debug("ConfigPusher has pushed configs {}, gc completed", configs);
}
catch (NetconfDocumentedException e) {
- logger.error("Error pushing configs {}",configs);
+ LOG.error("Error pushing configs {}",configs);
throw new IllegalStateException(e);
}
}
}
public void pushConfigs(List<? extends ConfigSnapshotHolder> configs) throws InterruptedException {
- logger.debug("Requested to push configs {}", configs);
+ LOG.debug("Requested to push configs {}", configs);
this.queue.put(configs);
}
private LinkedHashMap<? extends ConfigSnapshotHolder, EditAndCommitResponse> internalPushConfigs(List<? extends ConfigSnapshotHolder> configs) throws NetconfDocumentedException {
- logger.debug("Last config snapshots to be pushed to netconf: {}", configs);
+ LOG.debug("Last config snapshots to be pushed to netconf: {}", configs);
LinkedHashMap<ConfigSnapshotHolder, EditAndCommitResponse> result = new LinkedHashMap<>();
// start pushing snapshots:
for (ConfigSnapshotHolder configSnapshotHolder : configs) {
if(configSnapshotHolder != null) {
EditAndCommitResponse editAndCommitResponseWithRetries = pushConfigWithConflictingVersionRetries(configSnapshotHolder);
- logger.debug("Config snapshot pushed successfully: {}, result: {}", configSnapshotHolder, result);
+ LOG.debug("Config snapshot pushed successfully: {}, result: {}", configSnapshotHolder, result);
result.put(configSnapshotHolder, editAndCommitResponseWithRetries);
}
}
- logger.debug("All configuration snapshots have been pushed successfully.");
+ LOG.debug("All configuration snapshots have been pushed successfully.");
return result;
}
return pushConfig(configSnapshotHolder, operationService);
} catch (ConflictingVersionException e) {
lastException = e;
- logger.debug("Conflicting version detected, will retry after timeout");
+ LOG.debug("Conflicting version detected, will retry after timeout");
sleep();
}
} while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < conflictingVersionTimeoutMillis);
try {
return getOperationService(expectedCapabilities, idForReporting);
} catch (NotEnoughCapabilitiesException e) {
- logger.debug("Not enough capabilities: " + e.toString());
+ LOG.debug("Not enough capabilities: {}", e.toString());
lastException = e;
sleep();
}
return serviceCandidate;
} else {
serviceCandidate.close();
- logger.trace("Netconf server did not provide required capabilities for {} " +
+ LOG.trace("Netconf server did not provide required capabilities for {} ", idForReporting,
"Expected but not found: {}, all expected {}, current {}",
- idForReporting, notFoundDiff, expectedCapabilities, serviceCandidate.getCapabilities()
+ notFoundDiff, expectedCapabilities, serviceCandidate.getCapabilities()
);
throw new NotEnoughCapabilitiesException("Not enough capabilities for " + idForReporting + ". Expected but not found: " + notFoundDiff);
}
} catch (SAXException | IOException e) {
throw new IllegalStateException("Cannot parse " + configSnapshotHolder);
}
- logger.trace("Pushing last configuration to netconf: {}", configSnapshotHolder);
+ LOG.trace("Pushing last configuration to netconf: {}", configSnapshotHolder);
Stopwatch stopwatch = new Stopwatch().start();
NetconfMessage editConfigMessage = createEditConfigMessage(xmlToBePersisted);
Document commitResponseMessage = sendRequestGetResponseCheckIsOK(getCommitMessage(), operationService,
"commit", configSnapshotHolder.toString());
- if (logger.isTraceEnabled()) {
+ if (LOG.isTraceEnabled()) {
StringBuilder response = new StringBuilder("editConfig response = {");
response.append(XmlUtil.toString(editResponseMessage));
response.append("}");
response.append("commit response = {");
response.append(XmlUtil.toString(commitResponseMessage));
response.append("}");
- logger.trace("Last configuration loaded successfully");
- logger.trace("Detailed message {}", response);
- logger.trace("Total time spent {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ LOG.trace("Last configuration loaded successfully");
+ LOG.trace("Detailed message {}", response);
+ LOG.trace("Total time spent {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
}
return new EditAndCommitResponse(editResponseMessage, commitResponseMessage);
}
package org.opendaylight.controller.netconf.persist.impl;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.config.persist.api.PropertiesProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
public class NoOpStorageAdapter implements StorageAdapter, Persister {
- private static final Logger logger = LoggerFactory.getLogger(NoOpStorageAdapter.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NoOpStorageAdapter.class);
@Override
public Persister instantiate(PropertiesProvider propertiesProvider) {
- logger.debug("instantiate called with {}", propertiesProvider);
+ LOG.debug("instantiate called with {}", propertiesProvider);
return this;
}
@Override
public void persistConfig(ConfigSnapshotHolder holder) throws IOException {
- logger.debug("persistConfig called with {}", holder);
+ LOG.debug("persistConfig called with {}", holder);
}
@Override
public List<ConfigSnapshotHolder> loadLastConfigs() throws IOException {
- logger.debug("loadLastConfig called");
+ LOG.debug("loadLastConfig called");
return Collections.emptyList();
}
@Override
public void close() {
- logger.debug("close called");
+ LOG.debug("close called");
}
}
*
*/
public final class PersisterAggregator implements Persister {
- private static final Logger logger = LoggerFactory.getLogger(PersisterAggregator.class);
+ private static final Logger LOG = LoggerFactory.getLogger(PersisterAggregator.class);
public static class PersisterWithConfiguration {
persisterWithConfigurations.add(PersisterAggregator.loadConfiguration(index, propertiesProvider));
}
}
- logger.debug("Initialized persister with following adapters {}", persisterWithConfigurations);
+ LOG.debug("Initialized persister with following adapters {}", persisterWithConfigurations);
return new PersisterAggregator(persisterWithConfigurations);
}
public void persistConfig(ConfigSnapshotHolder holder) throws IOException {
for (PersisterWithConfiguration persisterWithConfiguration: persisterWithConfigurations){
if (!persisterWithConfiguration.readOnly){
- logger.debug("Calling {}.persistConfig", persisterWithConfiguration.getStorage());
+ LOG.debug("Calling {}.persistConfig", persisterWithConfiguration.getStorage());
persisterWithConfiguration.getStorage().persistConfig(holder);
}
}
throw new RuntimeException("Error while calling loadLastConfig on " + persisterWithConfiguration, e);
}
if (!configs.isEmpty()) {
- logger.debug("Found non empty configs using {}:{}", persisterWithConfiguration, configs);
+ LOG.debug("Found non empty configs using {}:{}", persisterWithConfiguration, configs);
return configs;
}
}
// no storage had an answer
- logger.debug("No non-empty list of configuration snapshots found");
+ LOG.debug("No non-empty list of configuration snapshots found");
return Collections.emptyList();
}
try{
persisterWithConfiguration.storage.close();
}catch(RuntimeException e) {
- logger.error("Error while closing {}", persisterWithConfiguration.storage, e);
+ LOG.error("Error while closing {}", persisterWithConfiguration.storage, e);
if (lastException == null){
lastException = e;
} else {
package org.opendaylight.controller.netconf.persist.impl.osgi;
+import com.google.common.annotations.VisibleForTesting;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
-
import javax.management.MBeanServer;
-
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
-
public class ConfigPersisterActivator implements BundleActivator {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterActivator.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigPersisterActivator.class);
private static final MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer();
public static final String MAX_WAIT_FOR_CAPABILITIES_MILLIS_PROPERTY = "maxWaitForCapabilitiesMillis";
@Override
public void start(final BundleContext context) throws Exception {
- logger.debug("ConfigPersister starting");
+ LOG.debug("ConfigPersister starting");
this.context = context;
autoCloseables = new ArrayList<>();
long maxWaitForCapabilitiesMillis = getMaxWaitForCapabilitiesMillis(propertiesProvider);
List<ConfigSnapshotHolder> configs = persisterAggregator.loadLastConfigs();
long conflictingVersionTimeoutMillis = getConflictingVersionTimeoutMillis(propertiesProvider);
- logger.debug("Following configs will be pushed: {}", configs);
+ LOG.debug("Following configs will be pushed: {}", configs);
InnerCustomizer innerCustomizer = new InnerCustomizer(configs, maxWaitForCapabilitiesMillis,
conflictingVersionTimeoutMillis, persisterAggregator);
@Override
public NetconfOperationProvider addingService(ServiceReference<NetconfOperationProvider> reference) {
- logger.trace("Got OuterCustomizer.addingService {}", reference);
+ LOG.trace("Got OuterCustomizer.addingService {}", reference);
// JMX was registered, track config-netconf-connector
Filter filter;
try {
@Override
public NetconfOperationServiceFactory addingService(ServiceReference<NetconfOperationServiceFactory> reference) {
- logger.trace("Got InnerCustomizer.addingService {}", reference);
+ LOG.trace("Got InnerCustomizer.addingService {}", reference);
NetconfOperationServiceFactory service = reference.getBundle().getBundleContext().getService(reference);
- logger.debug("Creating new job queue");
+ LOG.debug("Creating new job queue");
final ConfigPusherImpl configPusher = new ConfigPusherImpl(service, maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis);
- logger.debug("Configuration Persister got {}", service);
- logger.debug("Context was {}", context);
- logger.debug("Registration was {}", registration);
+ LOG.debug("Configuration Persister got {}", service);
+ LOG.debug("Context was {}", context);
+ LOG.debug("Registration was {}", registration);
final Thread pushingThread = new Thread(new Runnable() {
@Override
registration = context.registerService(ConfigPusher.class.getName(), configPusher, null);
configPusher.process(autoCloseables, platformMBeanServer, persisterAggregator);
} else {
- logger.warn("Unable to process configs as BundleContext is null");
+ LOG.warn("Unable to process configs as BundleContext is null");
}
} catch (InterruptedException e) {
- logger.info("ConfigPusher thread stopped",e);
+ LOG.info("ConfigPusher thread stopped",e);
}
- logger.info("Configuration Persister initialization completed.");
+ LOG.info("Configuration Persister initialization completed.");
}
}, "config-pusher");
synchronized (autoCloseables) {
public class PropertiesProviderBaseImpl implements PropertiesProvider {
- private static final Logger logger = LoggerFactory.getLogger(PropertiesProviderBaseImpl.class);
+ private static final Logger LOG = LoggerFactory.getLogger(PropertiesProviderBaseImpl.class);
private final BundleContext bundleContext;
public PropertiesProviderBaseImpl(BundleContext bundleContext) {
}
public String getPropertyWithoutPrefix(String fullKey){
- logger.trace("Full key {}", fullKey);
+ LOG.trace("Full key {}", fullKey);
return bundleContext.getProperty(fullKey);
}
*/
package org.opendaylight.controller.netconf.persist.impl;
+import static org.junit.Assert.assertEquals;
+
import com.google.common.collect.Sets;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
import org.apache.commons.io.IOUtils;
import org.junit.Test;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Element;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-
public class CapabilityStrippingConfigSnapshotHolderTest {
@Test
import static org.mockito.Mockito.verify;
import javax.management.MBeanServerConnection;
-
import javax.management.NotificationFilter;
import javax.management.NotificationListener;
import javax.management.ObjectName;
package org.opendaylight.controller.netconf.persist.impl;
+import com.google.common.collect.Lists;
import java.util.Collections;
-
import javax.management.Notification;
-
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.jmx.NetconfJMXNotification;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import com.google.common.collect.Lists;
-
public class ConfigPersisterNotificationListenerTest {
@Mock
*/
package org.opendaylight.controller.netconf.persist.impl;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.config.persist.api.PropertiesProvider;
import org.opendaylight.controller.config.persist.api.StorageAdapter;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
public class DummyAdapter implements StorageAdapter, Persister {
static int persist = 0;
package org.opendaylight.controller.netconf.persist.impl;
-import com.google.common.collect.Lists;
-
-import org.junit.Test;
-import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
-import org.opendaylight.controller.config.persist.api.Persister;
-import org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter;
-import org.opendaylight.controller.netconf.persist.impl.osgi.ConfigPersisterActivator;
-import org.opendaylight.controller.netconf.persist.impl.osgi.PropertiesProviderBaseImpl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Properties;
-
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.opendaylight.controller.netconf.persist.impl.PersisterAggregator.PersisterWithConfiguration;
-import static org.opendaylight.controller.netconf.persist.impl.PersisterAggregatorTest.TestingPropertiesProvider.loadFile;
+
+import com.google.common.collect.Lists;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+import org.junit.Test;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.api.Persister;
+import org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter;
+import org.opendaylight.controller.netconf.persist.impl.osgi.ConfigPersisterActivator;
+import org.opendaylight.controller.netconf.persist.impl.osgi.PropertiesProviderBaseImpl;
public class PersisterAggregatorTest {
@Test
public void testDummyAdapter() throws Exception {
- PersisterAggregator persisterAggregator = PersisterAggregator.createFromProperties(loadFile("test1.properties"));
+ PersisterAggregator persisterAggregator = PersisterAggregator.createFromProperties(TestingPropertiesProvider.loadFile("test1.properties"));
List<PersisterWithConfiguration> persisters = persisterAggregator.getPersisterWithConfigurations();
assertEquals(1, persisters.size());
PersisterWithConfiguration persister = persisters.get(0);
@Test
public void testLoadFromPropertyFile() throws Exception {
- PersisterAggregator persisterAggregator = PersisterAggregator.createFromProperties(loadFile("test2.properties"));
+ PersisterAggregator persisterAggregator = PersisterAggregator.createFromProperties(TestingPropertiesProvider.loadFile("test2.properties"));
List<PersisterWithConfiguration> persisters = persisterAggregator.getPersisterWithConfigurations();
assertEquals(1, persisters.size());
PersisterWithConfiguration persister = persisters.get(0);
@Test
public void testFileStorageNumberOfBackups() throws Exception {
try {
- PersisterAggregator.createFromProperties(loadFile("test3.properties"));
+ PersisterAggregator.createFromProperties(TestingPropertiesProvider.loadFile("test3.properties"));
fail();
} catch (RuntimeException e) {
assertThat(
*/
package org.opendaylight.controller.netconf.persist.impl.osgi;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+
import com.google.common.collect.Sets;
+import java.io.IOException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
-import java.io.IOException;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-
public class ConfigPersisterTest {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterTest.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ConfigPersisterTest.class);
private MockedBundleContext ctx;
private ConfigPersisterActivator configPersisterActivator;
doReturn(getConflictingService()).when(ctx.serviceFactory).createService(anyString());
Thread.sleep(500);
// working service:
- logger.info("Switching to working service **");
+ LOG.info("Switching to working service **");
doReturn(getWorkingService(getOKDocument())).when(ctx.serviceFactory).createService(anyString());
Thread.sleep(1000);
assertCannotRegisterAsJMXListener_pushWasSuccessful();
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.SortedSet;
import java.util.TreeSet;
-
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.osgi.framework.ServiceReference;
import org.osgi.framework.ServiceRegistration;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
final class MockedBundleContext {
@Mock
private BundleContext context;
*/
package org.opendaylight.controller.netconf.persist.impl.osgi;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
-import org.junit.matchers.JUnitMatchers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final class TestingExceptionHandler implements Thread.UncaughtExceptionHandler {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterTest.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestingExceptionHandler.class);
private Throwable t;
@Override
public void uncaughtException(Thread t, Throwable e) {
- logger.debug("Uncaught exception in thread {}", t, e);
+ LOG.debug("Uncaught exception in thread {}", t, e);
this.t = e;
}
private void assertException(Throwable t, Class<? extends Exception> exType, String exMessageToContain) {
assertEquals("Expected exception of type " + exType + " but was " + t, exType, t.getClass());
if(exMessageToContain!=null) {
- assertThat(t.getMessage(), JUnitMatchers.containsString(exMessageToContain));
+ assertThat(t.getMessage(), containsString(exMessageToContain));
}
}
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
-
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
-
import org.opendaylight.controller.config.api.ConflictingVersionException;
import org.opendaylight.controller.config.api.ValidationException;
import org.slf4j.Logger;
package org.opendaylight.controller.netconf.api;
import io.netty.channel.ChannelFuture;
-
import org.opendaylight.protocol.framework.ProtocolSession;
public interface NetconfSession extends ProtocolSession<NetconfMessage> {
package org.opendaylight.controller.netconf.api.jmx;
-import org.w3c.dom.Element;
-
-import javax.management.NotificationBroadcasterSupport;
import java.util.Set;
+import javax.management.NotificationBroadcasterSupport;
+import org.w3c.dom.Element;
public class CommitJMXNotification extends NetconfJMXNotification {
package org.opendaylight.controller.netconf.api.jmx;
-import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
-
import javax.management.ObjectName;
+import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
public interface DefaultCommitOperationMXBean {
package org.opendaylight.controller.netconf.api.jmx;
import java.util.Set;
-
import javax.management.Notification;
import javax.management.NotificationBroadcasterSupport;
-
import org.w3c.dom.Element;
public abstract class NetconfJMXNotification extends Notification {
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import com.google.common.collect.ImmutableMap;
import java.util.Collections;
import java.util.Iterator;
-
import javax.xml.namespace.NamespaceContext;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import javax.xml.xpath.XPathFactory;
-
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
-import com.google.common.collect.ImmutableMap;
-
/**
* Unit tests for NetconfDocumentedException.
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-artifacts</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-config-dispatcher</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-netconf-connector</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-auth</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-cli</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-client</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-connector-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-mapping-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-monitoring</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-netty-util</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-ssh</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-tcp</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-testtool</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-usermanager</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf-monitoring</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf-monitoring-extension</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-client</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-impl</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-netty-util</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-ssh</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>features-netconf</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+</project>
+
import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
-import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import java.net.InetSocketAddress;
import java.util.concurrent.Future;
-import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doReturn;
import io.netty.util.HashedWheelTimer;
import io.netty.util.Timer;
import io.netty.util.concurrent.Promise;
-import org.apache.sshd.common.SessionListener;
import org.junit.Test;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
import org.opendaylight.protocol.framework.SessionListenerFactory;
import org.opendaylight.protocol.framework.SessionNegotiator;
-
-import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
public void testGetSessionNegotiator() throws Exception {
NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
Timer timer = new HashedWheelTimer();
- SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class);
+ SessionListenerFactory<NetconfClientSessionListener> listenerFactory = mock(SessionListenerFactory.class);
doReturn(sessionListener).when(listenerFactory).getSessionListener();
Channel channel = mock(Channel.class);
- Promise promise = mock(Promise.class);
+ Promise<NetconfClientSession> promise = mock(Promise.class);
NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer,
Optional.<NetconfHelloMessageAdditionalHeader>absent(), 200L);
- SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
+ SessionNegotiator<?> sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
assertNotNull(sessionNegotiator);
}
}
package org.opendaylight.controller.netconf.client;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import com.google.common.base.Optional;
-import io.netty.channel.*;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.ChannelProgressivePromise;
import io.netty.handler.ssl.SslHandler;
import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.Promise;
-import org.apache.mina.handler.demux.ExceptionHandler;
+import java.util.Set;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
import org.mockito.internal.util.collections.Sets;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences;
import org.opendaylight.controller.netconf.api.NetconfMessage;
-import io.netty.util.Timer;
import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder;
import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder;
import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
-import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.openexi.proc.common.EXIOptions;
import org.w3c.dom.Document;
-import java.util.Set;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
public class NetconfClientSessionNegotiatorTest {
return pipeline;
}
- private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise,
- NetconfMessage startExi) {
+ private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(final Promise<NetconfClientSession> promise,
+ final NetconfMessage startExi) {
ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class);
NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi);
doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class));
NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null);
negotiator.channelActive(null);
- Set caps = Sets.newSet("a", "b");
+ Set<String> caps = Sets.newSet("a", "b");
NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10);
negotiator.handleMessage(helloServerMessage);
verify(promise).setSuccess(anyObject());
NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage);
negotiator.channelActive(null);
- Set caps = Sets.newSet("exi:1.0");
+ Set<String> caps = Sets.newSet("exi:1.0");
NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
- doAnswer(new Answer() {
+ doAnswer(new Answer<Object>() {
@Override
- public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+ public Object answer(final InvocationOnMock invocationOnMock) throws Throwable {
channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]);
return null;
}
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelPipeline;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
import org.openexi.proc.common.EXIOptions;
-import java.util.ArrayList;
import java.util.Collection;
import static org.junit.Assert.assertEquals;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
-import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule;
import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
package org.opendaylight.controller.netconf.client;
import io.netty.channel.*;
import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.Promise;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.collections.Sets;
private Channel channel;
private ChannelFuture channelFuture;
- Set caps;
+ Set<String> caps;
private NetconfHelloMessage helloMessage;
private NetconfMessage message;
private NetconfClientSessionListener sessionListener;
NetconfClientSessionNegotiatorFactory negotiatorFactory = mock(NetconfClientSessionNegotiatorFactory.class);
NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
- SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ SessionNegotiator<?> sessionNegotiator = mock(SessionNegotiator.class);
doReturn("").when(sessionNegotiator).toString();
doReturn(sessionNegotiator).when(negotiatorFactory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
ChannelPipeline pipeline = mock(ChannelPipeline.class);
import io.netty.channel.ChannelPipeline;
import io.netty.util.concurrent.Promise;
import org.junit.Test;
-import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer;
import org.opendaylight.protocol.framework.SessionListenerFactory;
import org.opendaylight.protocol.framework.SessionNegotiator;
@Test
public void testInitializeSessionNegotiator() throws Exception {
NetconfClientSessionNegotiatorFactory factory = mock(NetconfClientSessionNegotiatorFactory.class);
- SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+ SessionNegotiator<?> sessionNegotiator = mock(SessionNegotiator.class);
doReturn("").when(sessionNegotiator).toString();
doReturn(sessionNegotiator).when(factory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
NetconfClientSessionListener listener = mock(NetconfClientSessionListener.class);
import com.google.common.base.Optional;
import java.io.IOException;
+import java.util.Map;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.mapping.AbstractNetconfOperation.OperationNameAndNamespace;
return result;
}
- private static void addSubtree(XmlElement filter, XmlElement src, XmlElement dst) {
+ private static void addSubtree(XmlElement filter, XmlElement src, XmlElement dst) throws NetconfDocumentedException {
for (XmlElement srcChild : src.getChildElements()) {
for (XmlElement filterChild : filter.getChildElements()) {
addSubtree2(filterChild, srcChild, dst);
}
}
- private static MatchingResult addSubtree2(XmlElement filter, XmlElement src, XmlElement dstParent) {
+ private static MatchingResult addSubtree2(XmlElement filter, XmlElement src, XmlElement dstParent) throws NetconfDocumentedException {
Document document = dstParent.getDomElement().getOwnerDocument();
MatchingResult matches = matches(src, filter);
if (matches != MatchingResult.NO_MATCH && matches != MatchingResult.CONTENT_MISMATCH) {
* Shallow compare src node to filter: tag name and namespace must match.
* If filter node has no children and has text content, it also must match.
*/
- private static MatchingResult matches(XmlElement src, XmlElement filter) {
+ private static MatchingResult matches(XmlElement src, XmlElement filter) throws NetconfDocumentedException {
boolean tagMatch = src.getName().equals(filter.getName()) &&
src.getNamespaceOptionally().equals(filter.getNamespaceOptionally());
MatchingResult result = null;
// match text content
Optional<String> maybeText = filter.getOnlyTextContentOptionally();
if (maybeText.isPresent()) {
- if (maybeText.equals(src.getOnlyTextContentOptionally())) {
+ if (maybeText.equals(src.getOnlyTextContentOptionally()) || prefixedContentMatches(filter, src)) {
result = MatchingResult.CONTENT_MATCH;
} else {
result = MatchingResult.CONTENT_MISMATCH;
if (result == null) {
result = MatchingResult.NO_MATCH;
}
- logger.debug("Matching {} to {} resulted in {}", src, filter, tagMatch);
+ logger.debug("Matching {} to {} resulted in {}", src, filter, result);
return result;
}
+ private static boolean prefixedContentMatches(final XmlElement filter, final XmlElement src) throws NetconfDocumentedException {
+ final Map.Entry<String, String> prefixToNamespaceOfFilter = filter.findNamespaceOfTextContent();
+ final Map.Entry<String, String> prefixToNamespaceOfSrc = src.findNamespaceOfTextContent();
+
+ final String prefix = prefixToNamespaceOfFilter.getKey();
+ // If this is not a prefixed content, we do not need to continue since content do not match
+ if(prefix.equals(XmlElement.DEFAULT_NAMESPACE_PREFIX)) {
+ return false;
+ }
+ // Namespace mismatch
+ if(!prefixToNamespaceOfFilter.getValue().equals(prefixToNamespaceOfSrc.getValue())) {
+ return false;
+ }
+
+ final String unprefixedFilterContent = filter.getTextContent().substring(prefixToNamespaceOfFilter.getKey().length() + 1);
+ final String unprefixedSrcContnet = src.getTextContent().substring(prefixToNamespaceOfSrc.getKey().length() + 1);
+ // Finally compare unprefixed content
+ return unprefixedFilterContent.equals(unprefixedSrcContnet);
+ }
+
enum MatchingResult {
NO_MATCH, TAG_MATCH, CONTENT_MATCH, CONTENT_MISMATCH
}
-/*\r
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.\r
- *\r
- * This program and the accompanying materials are made available under the\r
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,\r
- * and is available at http://www.eclipse.org/legal/epl-v10.html\r
- */\r
-package org.opendaylight.controller.netconf.impl.mapping.operations;\r
-\r
-import org.opendaylight.controller.netconf.impl.NetconfServerSession;\r
-\r
-public interface DefaultNetconfOperation {\r
- void setNetconfSession(NetconfServerSession s);\r
-}\r
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+
+public interface DefaultNetconfOperation {
+ void setNetconfSession(NetconfServerSession s);
+}
-/*\r
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.\r
- *\r
- * This program and the accompanying materials are made available under the\r
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,\r
- * and is available at http://www.eclipse.org/legal/epl-v10.html\r
- */\r
-package org.opendaylight.controller.netconf.impl.mapping.operations;\r
-\r
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException;\r
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;\r
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;\r
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;\r
-import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;\r
-import org.opendaylight.controller.netconf.api.NetconfMessage;\r
-import org.opendaylight.controller.netconf.impl.NetconfServerSession;\r
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;\r
-import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;\r
-import org.opendaylight.controller.netconf.util.xml.XmlElement;\r
-import org.opendaylight.controller.netconf.util.xml.XmlUtil;\r
-import org.slf4j.Logger;\r
-import org.slf4j.LoggerFactory;\r
-import org.w3c.dom.Document;\r
-import org.w3c.dom.Element;\r
-public class DefaultStartExi extends AbstractSingletonNetconfOperation implements DefaultNetconfOperation {\r
- public static final String START_EXI = "start-exi";\r
-\r
- private static final Logger logger = LoggerFactory.getLogger(DefaultStartExi.class);\r
- private NetconfServerSession netconfSession;\r
-\r
- public DefaultStartExi(String netconfSessionIdForReporting) {\r
- super(netconfSessionIdForReporting);\r
- }\r
-\r
- @Override\r
- public Document handle(Document message,\r
- NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {\r
- logger.debug("Received start-exi message {} ", XmlUtil.toString(message));\r
-\r
- try {\r
- netconfSession.startExiCommunication(new NetconfMessage(message));\r
- } catch (IllegalArgumentException e) {\r
- throw new NetconfDocumentedException("Failed to parse EXI parameters", ErrorType.protocol,\r
- ErrorTag.operation_failed, ErrorSeverity.error);\r
- }\r
-\r
- return super.handle(message, subsequentOperation);\r
- }\r
-\r
- @Override\r
- protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {\r
- Element getSchemaResult = document.createElementNS( XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, XmlNetconfConstants.OK);\r
- logger.trace("{} operation successful", START_EXI);\r
- return getSchemaResult;\r
- }\r
-\r
- @Override\r
- protected String getOperationName() {\r
- return START_EXI;\r
- }\r
-\r
- @Override\r
- protected String getOperationNamespace() {\r
- return XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0;\r
- }\r
-\r
- @Override\r
- public void setNetconfSession(NetconfServerSession s) {\r
- netconfSession = s;\r
- }\r
-}\r
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+public class DefaultStartExi extends AbstractSingletonNetconfOperation implements DefaultNetconfOperation {
+ public static final String START_EXI = "start-exi";
+
+ private static final Logger logger = LoggerFactory.getLogger(DefaultStartExi.class);
+ private NetconfServerSession netconfSession;
+
+ public DefaultStartExi(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ public Document handle(Document message,
+ NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ logger.debug("Received start-exi message {} ", XmlUtil.toString(message));
+
+ try {
+ netconfSession.startExiCommunication(new NetconfMessage(message));
+ } catch (IllegalArgumentException e) {
+ throw new NetconfDocumentedException("Failed to parse EXI parameters", ErrorType.protocol,
+ ErrorTag.operation_failed, ErrorSeverity.error);
+ }
+
+ return super.handle(message, subsequentOperation);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ Element getSchemaResult = document.createElementNS( XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, XmlNetconfConstants.OK);
+ logger.trace("{} operation successful", START_EXI);
+ return getSchemaResult;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return START_EXI;
+ }
+
+ @Override
+ protected String getOperationNamespace() {
+ return XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0;
+ }
+
+ @Override
+ public void setNetconfSession(NetconfServerSession s) {
+ netconfSession = s;
+ }
+}
-/*\r
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.\r
- *\r
- * This program and the accompanying materials are made available under the\r
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,\r
- * and is available at http://www.eclipse.org/legal/epl-v10.html\r
- */\r
-package org.opendaylight.controller.netconf.impl.mapping.operations;\r
-\r
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException;\r
-import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;\r
-import org.opendaylight.controller.netconf.impl.NetconfServerSession;\r
-import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;\r
-import org.opendaylight.controller.netconf.util.xml.XmlElement;\r
-import org.opendaylight.controller.netconf.util.xml.XmlUtil;\r
-import org.slf4j.Logger;\r
-import org.slf4j.LoggerFactory;\r
-import org.w3c.dom.Document;\r
-import org.w3c.dom.Element;\r
-\r
-public class DefaultStopExi extends AbstractSingletonNetconfOperation implements DefaultNetconfOperation {\r
-\r
- public static final String STOP_EXI = "stop-exi";\r
- private NetconfServerSession netconfSession;\r
-\r
- private static final Logger logger = LoggerFactory\r
- .getLogger(DefaultStartExi.class);\r
-\r
- public DefaultStopExi(String netconfSessionIdForReporting) {\r
- super(netconfSessionIdForReporting);\r
- }\r
-\r
- @Override\r
- protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {\r
- logger.debug("Received stop-exi message {} ", XmlUtil.toString(operationElement));\r
-\r
- netconfSession.stopExiCommunication();\r
-\r
- Element getSchemaResult = document.createElementNS( XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, XmlNetconfConstants.OK);\r
- logger.trace("{} operation successful", STOP_EXI);\r
- return getSchemaResult;\r
- }\r
-\r
- @Override\r
- protected String getOperationName() {\r
- return STOP_EXI;\r
- }\r
-\r
- @Override\r
- protected String getOperationNamespace() {\r
- return XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0;\r
- }\r
-\r
- @Override\r
- public void setNetconfSession(NetconfServerSession s) {\r
- this.netconfSession = s;\r
- }\r
-}\r
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+public class DefaultStopExi extends AbstractSingletonNetconfOperation implements DefaultNetconfOperation {
+
+ public static final String STOP_EXI = "stop-exi";
+ private NetconfServerSession netconfSession;
+
+ private static final Logger logger = LoggerFactory
+ .getLogger(DefaultStartExi.class);
+
+ public DefaultStopExi(String netconfSessionIdForReporting) {
+ super(netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+ logger.debug("Received stop-exi message {} ", XmlUtil.toString(operationElement));
+
+ netconfSession.stopExiCommunication();
+
+ Element getSchemaResult = document.createElementNS( XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0, XmlNetconfConstants.OK);
+ logger.trace("{} operation successful", STOP_EXI);
+ return getSchemaResult;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return STOP_EXI;
+ }
+
+ @Override
+ protected String getOperationNamespace() {
+ return XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0;
+ }
+
+ @Override
+ public void setNetconfSession(NetconfServerSession s) {
+ this.netconfSession = s;
+ }
+}
*/
package org.opendaylight.controller.netconf.impl;
-import junit.framework.Assert;
-
+import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
public void testParsing() throws Exception {
String s = "[netconf;10.12.0.102:48528;ssh;;;;;;]";
NetconfHelloMessageAdditionalHeader header = NetconfHelloMessageAdditionalHeader.fromString(s);
- Assert.assertEquals("netconf", header.getUserName());
- Assert.assertEquals("10.12.0.102", header.getAddress());
- Assert.assertEquals("ssh", header.getTransport());
+ assertEquals("netconf", header.getUserName());
+ assertEquals("10.12.0.102", header.getAddress());
+ assertEquals("ssh", header.getTransport());
}
@Test
public void testParsing2() throws Exception {
String s = "[tomas;10.0.0.0/10000;tcp;1000;1000;;/home/tomas;;]";
NetconfHelloMessageAdditionalHeader header = NetconfHelloMessageAdditionalHeader.fromString(s);
- Assert.assertEquals("tomas", header.getUserName());
- Assert.assertEquals("10.0.0.0", header.getAddress());
- Assert.assertEquals("tcp", header.getTransport());
+ assertEquals("tomas", header.getUserName());
+ assertEquals("10.0.0.0", header.getAddress());
+ assertEquals("tcp", header.getTransport());
}
@Test(expected = IllegalArgumentException.class)
*/
package org.opendaylight.controller.netconf.impl;
-import static junit.framework.Assert.assertNotNull;
+import static org.junit.Assert.assertNotNull;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import io.netty.channel.Channel;
-import java.util.List;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
public void testSessions() throws Exception {
doReturn("sessToStr").when(managementSession).toString();
service.onSessionUp(managementSession);
- List list = Lists.newArrayList(managementSession);
}
@Test(expected = RuntimeException.class)
public void testGetSchemas3() throws Exception {
doReturn("").when(managementSession).toString();
Capability cap = mock(Capability.class);
- Set caps = Sets.newHashSet(cap);
- Set services = Sets.newHashSet(operationService);
+ Set<Capability> caps = Sets.newHashSet(cap);
+ Set<NetconfOperationService> services = Sets.newHashSet(operationService);
doReturn(snapshot).when(operationProvider).openSnapshot(anyString());
doReturn(services).when(snapshot).getServices();
doReturn(caps).when(operationService).getCapabilities();
- Optional opt = mock(Optional.class);
+ Optional<String> opt = mock(Optional.class);
doReturn(opt).when(cap).getCapabilitySchema();
doReturn(true).when(opt).isPresent();
doReturn(opt).when(cap).getModuleNamespace();
doReturn("namespace").when(opt).get();
- Optional optRev = Optional.of("rev");
+ Optional<String> optRev = Optional.of("rev");
doReturn(optRev).when(cap).getRevision();
doReturn(Optional.of("modName")).when(cap).getModuleName();
doReturn(Optional.of(Lists.newArrayList("loc"))).when(cap).getLocation();
@Parameters
public static Collection<Object[]> data() {
List<Object[]> result = new ArrayList<>();
- for (int i = 0; i <= 8; i++) {
+ for (int i = 0; i <= 10; i++) {
result.add(new Object[]{i});
}
return result;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
package org.opendaylight.controller.netconf.impl.mapping.operations;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
import com.google.common.base.Optional;
-import junit.framework.Assert;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-
public class DefaultGetSchemaTest {
private CapabilityProvider cap;
package org.opendaylight.controller.netconf.impl.osgi;
import java.util.Arrays;
-import java.util.Collection;
import java.util.Dictionary;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
-import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
-import org.osgi.framework.*;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.Filter;
+import org.osgi.framework.ServiceListener;
+import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
public class NetconfImplActivatorTest {
@Mock
private Filter filter;
@Mock
- private ServiceReference reference;
+ private ServiceReference<?> reference;
@Mock
- private ServiceRegistration registration;
+ private ServiceRegistration<?> registration;
@Before
public void setUp() throws Exception {
doReturn(filter).when(bundle).createFilter(anyString());
doNothing().when(bundle).addServiceListener(any(ServiceListener.class), anyString());
- ServiceReference[] refs = new ServiceReference[0];
+ ServiceReference<?>[] refs = new ServiceReference[0];
doReturn(refs).when(bundle).getServiceReferences(anyString(), anyString());
doReturn(Arrays.asList(refs)).when(bundle).getServiceReferences(any(Class.class), anyString());
doReturn("").when(bundle).getProperty(anyString());
@Mock
private NetconfOperationServiceFactory factory;
@Mock
- private ServiceReference reference;
+ private ServiceReference<NetconfOperationServiceFactory> reference;
private NetconfOperationServiceFactoryTracker tracker;
--- /dev/null
+<!--
+ ~ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="m-10">
+ <data>
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">prefix:sal-netconf-connector</type>
+ <name>controller-config</name>
+ <port xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">1830</port>
+ <connection-timeout-millis xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">20000</connection-timeout-millis>
+ <between-attempts-timeout-millis xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">2000</between-attempts-timeout-millis>
+ <sleep-factor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">1.5</sleep-factor>
+ <password xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</password>
+ <dom-registry xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">prefix:dom-broker-osgi-registry</type>
+ <name>dom-broker</name>
+ </dom-registry>
+ <client-dispatcher xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf">prefix:netconf-client-dispatcher</type>
+ <name>global-netconf-dispatcher</name>
+ </client-dispatcher>
+ <username xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</username>
+ <address xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">127.0.0.1</address>
+ <processing-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool">prefix:threadpool</type>
+ <name>global-netconf-processing-executor</name>
+ </processing-executor>
+ <tcp-only xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">false</tcp-only>
+ <binding-registry xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">prefix:binding-broker-osgi-registry</type>
+ <name>binding-osgi-broker</name>
+ </binding-registry>
+ <max-connection-attempts xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">0</max-connection-attempts>
+ <event-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-event-executor</type>
+ <name>global-event-executor</name>
+ </event-executor>
+ </module>
+ </modules>
+ </data>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<!--
+ ~ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ ~
+ ~ This program and the accompanying materials are made available under the
+ ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="m-10">
+ <data>
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">prefix:sal-netconf-connector</type>
+ <name>controller-config</name>
+ <port xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">1830</port>
+ <connection-timeout-millis xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">20000</connection-timeout-millis>
+ <between-attempts-timeout-millis xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">2000</between-attempts-timeout-millis>
+ <sleep-factor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">1.5</sleep-factor>
+ <password xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</password>
+ <dom-registry xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">prefix:dom-broker-osgi-registry</type>
+ <name>dom-broker</name>
+ </dom-registry>
+ <client-dispatcher xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:netconf">prefix:netconf-client-dispatcher</type>
+ <name>global-netconf-dispatcher</name>
+ </client-dispatcher>
+ <username xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">admin</username>
+ <address xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">127.0.0.1</address>
+ <processing-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:threadpool">prefix:threadpool</type>
+ <name>global-netconf-processing-executor</name>
+ </processing-executor>
+ <tcp-only xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">false</tcp-only>
+ <binding-registry xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">prefix:binding-broker-osgi-registry</type>
+ <name>binding-osgi-broker</name>
+ </binding-registry>
+ <max-connection-attempts xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">0</max-connection-attempts>
+ <event-executor xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netty">prefix:netty-event-executor</type>
+ <name>global-event-executor</name>
+ </event-executor>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:shutdown:impl">prefix:shutdown</type>
+ <name>shutdown</name>
+ <secret xmlns="urn:opendaylight:params:xml:ns:yang:controller:shutdown:impl"/>
+ </module>
+ </modules>
+ </data>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="m-9">
+ <get-config>
+ <source>
+ <running/>
+ </source>
+ <filter xmlns:a="urn:ietf:params:xml:ns:netconf:base:1.0" a:type="subtree">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf">x:sal-netconf-connector</type>
+ <name>controller-config</name>
+ </module>
+ </modules>
+ </filter>
+ </get-config>
+</rpc>
--- /dev/null
+<rpc-reply message-id="5"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <data>
+ <top xmlns="http://example.com/schema/1.2/config">
+ <users>
+ <user>
+ <name>fred</name>
+ <type xmlns:x="http://java.sun.com/dtd/properties.dtd">x:admin</type>
+ <full-name>Fred Flintstone</full-name>
+ </user>
+ </users>
+ </top>
+ </data>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<rpc-reply message-id="5" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <data>
+ <top xmlns="http://example.com/schema/1.2/config">
+ <users>
+ <user>
+ <name>root</name>
+ <type>superuser</type>
+ <full-name>Charlie Root</full-name>
+ <company-info>
+ <dept>1</dept>
+ <id>1</id>
+ </company-info>
+ </user>
+ <user>
+ <name>fred</name>
+ <type xmlns:x="http://java.sun.com/dtd/properties.dtd">x:admin</type>
+ <full-name>Fred Flintstone</full-name>
+ <company-info>
+ <dept>2</dept>
+ <id>2</id>
+ </company-info>
+ </user>
+ <user>
+ <name>barney</name>
+ <type>admin</type>
+ <full-name>Barney Rubble</full-name>
+ <company-info>
+ <dept>2</dept>
+ <id>3</id>
+ </company-info>
+ </user>
+ </users>
+ <groups>
+ <group>
+ <name>admin</name>
+ </group>
+ </groups>
+ </top>
+ </data>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<rpc message-id="5"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <get-config>
+ <source>
+ <running/>
+ </source>
+ <filter type="subtree">
+ <top xmlns="http://example.com/schema/1.2/config">
+ <users>
+ <user>
+ <name>fred</name>
+ <type xmlns:a="http://java.sun.com/dtd/properties.dtd">a:admin</type>
+ <full-name/>
+ </user>
+ </users>
+ </top>
+ </filter>
+ </get-config>
+</rpc>
\ No newline at end of file
*/
package org.opendaylight.controller.netconf.it;
-import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import io.netty.channel.EventLoopGroup;
import io.netty.channel.local.LocalAddress;
+import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.nio.file.Files;
import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public static final String USERNAME = "user";
public static final String PASSWORD = "pwd";
- private NetconfSSHServer sshServer;
+ private SshProxyServer sshProxyServer;
+
+ private ExecutorService nioExec;
+ private EventLoopGroup clientGroup;
+ private ScheduledExecutorService minaTimerEx;
@Before
public void setUp() throws Exception {
- final char[] pem = PEMGenerator.generate().toCharArray();
- sshServer = NetconfSSHServer.start(TLS_ADDRESS.getPort(), NetconfConfigUtil.getNetconfLocalAddress(), getNettyThreadgroup(), pem);
- sshServer.setAuthProvider(getAuthProvider());
+ nioExec = Executors.newFixedThreadPool(1);
+ clientGroup = new NioEventLoopGroup();
+ minaTimerEx = Executors.newScheduledThreadPool(1);
+ sshProxyServer = new SshProxyServer(minaTimerEx, clientGroup, nioExec);
+ sshProxyServer.bind(
+ new SshProxyServerConfigurationBuilder()
+ .setBindingAddress(TLS_ADDRESS)
+ .setLocalAddress(NetconfConfigUtil.getNetconfLocalAddress())
+ .setAuthenticator(new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ })
+ .setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString()))
+ .setIdleTimeout(Integer.MAX_VALUE)
+ .createSshProxyServerConfiguration());
}
@After
public void tearDown() throws Exception {
- sshServer.close();
- sshServer.join();
+ sshProxyServer.close();
+ clientGroup.shutdownGracefully().await();
+ minaTimerEx.shutdownNow();
+ nioExec.shutdownNow();
}
@Test
package org.opendaylight.controller.netconf.monitoring;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.TestCase.fail;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
-
import java.util.Collections;
import org.hamcrest.CoreMatchers;
import org.junit.Before;
package org.opendaylight.controller.netconf.monitoring.osgi;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import java.util.Arrays;
import org.junit.Before;
import org.junit.Test;
import org.osgi.framework.ServiceListener;
import org.osgi.framework.ServiceReference;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.*;
-
public class NetconfMonitoringActivatorTest {
@Mock
MockitoAnnotations.initMocks(this);
doReturn(filter).when(context).createFilter(anyString());
doNothing().when(context).addServiceListener(any(ServiceListener.class), anyString());
- ServiceReference[] refs = new ServiceReference[2];
+ ServiceReference<?>[] refs = new ServiceReference[2];
doReturn(Arrays.asList(refs)).when(context).getServiceReferences(any(Class.class), anyString());
doReturn(refs).when(context).getServiceReferences(anyString(), anyString());
}
package org.opendaylight.controller.netconf.monitoring.osgi;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
import java.util.Hashtable;
import org.junit.Before;
import org.junit.Test;
import org.osgi.framework.ServiceReference;
import org.osgi.framework.ServiceRegistration;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyCollection;
-import static org.mockito.Mockito.*;
-
public class NetconfMonitoringServiceTrackerTest {
@Mock
- private ServiceReference reference;
+ private ServiceReference<NetconfMonitoringService> reference;
@Mock
private BundleContext context;
@Mock
- private ServiceRegistration serviceRegistration;
+ private ServiceRegistration<?> serviceRegistration;
@Mock
private Filter filter;
@Mock
<groupId>org.opendaylight.controller</groupId>
<artifactId>protocol-framework</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<artifactId>maven-bundle-plugin</artifactId>
<configuration>
<instructions>
- <Import-Package>org.apache.sshd.*, ch.ethz.ssh2, com.google.common.base, com.google.common.collect, io.netty.buffer,
+ <Import-Package>org.apache.sshd.*, com.google.common.base, com.google.common.collect, io.netty.buffer,
io.netty.channel, io.netty.channel.socket, io.netty.handler.codec, io.netty.handler.ssl, io.netty.util,
io.netty.util.concurrent, javax.xml.transform, javax.xml.transform.dom, javax.xml.transform.sax,
javax.xml.transform.stream, org.opendaylight.controller.netconf.api,
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
- <version>2.4</version>
<executions>
<execution>
<goals>
final String alignmentTextContent = alignmentElement.getTextContent().trim();
switch (alignmentTextContent) {
- case EXI_PARAMETER_BIT_PACKED:
- options.setAlignmentType(AlignmentType.bitPacked);
- break;
- case EXI_PARAMETER_BYTE_ALIGNED:
- options.setAlignmentType(AlignmentType.byteAligned);
- break;
- case EXI_PARAMETER_COMPRESSED:
- options.setAlignmentType(AlignmentType.compress);
- break;
- case EXI_PARAMETER_PRE_COMPRESSION:
- options.setAlignmentType(AlignmentType.preCompress);
- break;
+ case EXI_PARAMETER_BIT_PACKED:
+ options.setAlignmentType(AlignmentType.bitPacked);
+ break;
+ case EXI_PARAMETER_BYTE_ALIGNED:
+ options.setAlignmentType(AlignmentType.byteAligned);
+ break;
+ case EXI_PARAMETER_COMPRESSED:
+ options.setAlignmentType(AlignmentType.compress);
+ break;
+ case EXI_PARAMETER_PRE_COMPRESSION:
+ options.setAlignmentType(AlignmentType.preCompress);
+ break;
}
}
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
-import com.google.common.base.Preconditions;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandlerAdapter;
-import io.netty.channel.ChannelPromise;
import java.io.IOException;
import java.net.SocketAddress;
+
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandlerAdapter;
+import io.netty.channel.ChannelPromise;
+
/**
* Netty SSH handler class. Acts as interface between Netty and SSH library.
*/
private final AuthenticationHandler authenticationHandler;
private final SshClient sshClient;
- private AsyncSshHanderReader sshReadAsyncListener;
+ private AsyncSshHandlerReader sshReadAsyncListener;
private AsyncSshHandlerWriter sshWriteAsyncHandler;
private ClientChannel channel;
connectPromise.setSuccess();
connectPromise = null;
- sshReadAsyncListener = new AsyncSshHanderReader(this, ctx, channel.getAsyncOut());
+ // TODO we should also read from error stream and at least log from that
+
+ sshReadAsyncListener = new AsyncSshHandlerReader(new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ AsyncSshHandler.this.disconnect(ctx, ctx.newPromise());
+ }
+ }, new AsyncSshHandlerReader.ReadMsgHandler() {
+ @Override
+ public void onMessageRead(final ByteBuf msg) {
+ ctx.fireChannelRead(msg);
+ }
+ }, channel.toString(), channel.getAsyncOut());
+
// if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
if(channel != null) {
sshWriteAsyncHandler = new AsyncSshHandlerWriter(channel.getAsyncIn());
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandler;
import org.apache.sshd.common.future.SshFutureListener;
import org.apache.sshd.common.io.IoInputStream;
import org.apache.sshd.common.io.IoReadFuture;
* Listener on async input stream from SSH session.
* This listeners schedules reads in a loop until the session is closed or read fails.
*/
-final class AsyncSshHanderReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
+public final class AsyncSshHandlerReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
private static final int BUFFER_SIZE = 8192;
- private final ChannelOutboundHandler asyncSshHandler;
- private final ChannelHandlerContext ctx;
+ private final AutoCloseable connectionClosedCallback;
+ private final ReadMsgHandler readHandler;
+ private final String channelId;
private IoInputStream asyncOut;
private Buffer buf;
private IoReadFuture currentReadFuture;
- public AsyncSshHanderReader(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
- this.asyncSshHandler = asyncSshHandler;
- this.ctx = ctx;
+ public AsyncSshHandlerReader(final AutoCloseable connectionClosedCallback, final ReadMsgHandler readHandler, final String channelId, final IoInputStream asyncOut) {
+ this.connectionClosedCallback = connectionClosedCallback;
+ this.readHandler = readHandler;
+ this.channelId = channelId;
this.asyncOut = asyncOut;
buf = new Buffer(BUFFER_SIZE);
asyncOut.read(buf).addListener(this);
if(future.getException() != null) {
if(asyncOut.isClosed() || asyncOut.isClosing()) {
// Ssh dropped
- logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
+ logger.debug("Ssh session dropped on channel: {}", channelId, future.getException());
} else {
- logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
+ logger.warn("Exception while reading from SSH remote on channel {}", channelId, future.getException());
}
invokeDisconnect();
return;
}
if (future.getRead() > 0) {
- ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
+ final ByteBuf msg = Unpooled.wrappedBuffer(buf.array(), 0, future.getRead());
+ if(logger.isTraceEnabled()) {
+ logger.trace("Reading message on channel: {}, message: {}", channelId, AsyncSshHandlerWriter.byteBufToString(msg));
+ }
+ readHandler.onMessageRead(msg);
// Schedule next read
buf = new Buffer(BUFFER_SIZE);
private void invokeDisconnect() {
try {
- asyncSshHandler.disconnect(ctx, ctx.newPromise());
+ connectionClosedCallback.close();
} catch (final Exception e) {
// This should not happen
throw new IllegalStateException(e);
// Remove self as listener on close to prevent reading from closed input
if(currentReadFuture != null) {
currentReadFuture.removeListener(this);
+ currentReadFuture = null;
}
asyncOut = null;
}
+
+ public interface ReadMsgHandler {
+
+ void onMessageRead(ByteBuf msg);
+ }
}
* Async Ssh writer. Takes messages(byte arrays) and sends them encrypted to remote server.
* Also handles pending writes by caching requests until pending state is over.
*/
-final class AsyncSshHandlerWriter implements AutoCloseable {
+public final class AsyncSshHandlerWriter implements AutoCloseable {
private static final Logger logger = LoggerFactory
.getLogger(AsyncSshHandlerWriter.class);
writeWithPendingDetection(pendingWrite.ctx, pendingWrite.promise, msg);
}
- private static String byteBufToString(final ByteBuf msg) {
+ public static String byteBufToString(final ByteBuf msg) {
msg.resetReaderIndex();
final String s = msg.toString(Charsets.UTF_8);
msg.resetReaderIndex();
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doAnswer;
-
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
@Test
public void testEncode() throws Exception {
final List<ByteBuf> chunks = Lists.newArrayList();
- doAnswer(new Answer() {
+ doAnswer(new Answer<Object>() {
@Override
public Object answer(final InvocationOnMock invocation) throws Throwable {
chunks.add((ByteBuf) invocation.getArguments()[0]);
package org.opendaylight.controller.netconf.nettyutil.handler;
-import static org.junit.Assert.*;
-
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.custommonkey.xmlunit.XMLUnit;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
-
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
import java.io.IOException;
import java.net.SocketAddress;
-
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.mockito.stubbing.Answer;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelPromise;
-
public class AsyncSshHandlerTest {
@Mock
private <T extends SshFuture<T>> ListenableFuture<SshFutureListener<T>> stubAddListener(final T future) {
final SettableFuture<SshFutureListener<T>> listenerSettableFuture = SettableFuture.create();
- doAnswer(new Answer() {
+ doAnswer(new Answer<Object>() {
@Override
public Object answer(final InvocationOnMock invocation) throws Throwable {
listenerSettableFuture.set((SshFutureListener<T>) invocation.getArguments()[0]);
private ChannelSubsystem getMockedSubsystemChannel(final IoInputStream asyncOut, final IoOutputStream asyncIn) throws IOException {
final ChannelSubsystem subsystemChannel = mock(ChannelSubsystem.class);
+ doReturn("subsystemChannel").when(subsystemChannel).toString();
+
doNothing().when(subsystemChannel).setStreaming(any(ClientChannel.Streaming.class));
final OpenFuture openFuture = mock(OpenFuture.class);
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-netty-util</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.ssh;
-
-import com.google.common.base.Preconditions;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.ThreadSafe;
-
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.ssh.threads.Handshaker;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.local.LocalAddress;
-
-/**
- * Thread that accepts client connections. Accepted socket is forwarded to {@link org.opendaylight.controller.netconf.ssh.threads.Handshaker},
- * which is executed in {@link #handshakeExecutor}.
- */
-@ThreadSafe
-public final class NetconfSSHServer extends Thread implements AutoCloseable {
-
- private static final Logger logger = LoggerFactory.getLogger(NetconfSSHServer.class);
- private static final AtomicLong sessionIdCounter = new AtomicLong();
-
- private final ServerSocket serverSocket;
- private final LocalAddress localAddress;
- private final EventLoopGroup bossGroup;
- private Optional<AuthProvider> authProvider = Optional.absent();
- private final ExecutorService handshakeExecutor;
- private final char[] pem;
- private volatile boolean up;
-
- private NetconfSSHServer(final int serverPort, final LocalAddress localAddress, final EventLoopGroup bossGroup, final char[] pem) throws IOException {
- super(NetconfSSHServer.class.getSimpleName());
- this.bossGroup = bossGroup;
- this.pem = pem;
- logger.trace("Creating SSH server socket on port {}", serverPort);
- this.serverSocket = new ServerSocket(serverPort);
- if (serverSocket.isBound() == false) {
- throw new IllegalStateException("Socket can't be bound to requested port :" + serverPort);
- }
- logger.trace("Server socket created.");
- this.localAddress = localAddress;
- this.up = true;
- handshakeExecutor = Executors.newFixedThreadPool(10);
- }
-
- public static NetconfSSHServer start(final int serverPort, final LocalAddress localAddress, final EventLoopGroup bossGroup, final char[] pemArray) throws IOException {
- final NetconfSSHServer netconfSSHServer = new NetconfSSHServer(serverPort, localAddress, bossGroup, pemArray);
- netconfSSHServer.start();
- return netconfSSHServer;
- }
-
- public synchronized AuthProvider getAuthProvider() {
- Preconditions.checkState(authProvider.isPresent(), "AuthenticationProvider is not set up, cannot authenticate user");
- return authProvider.get();
- }
-
- public synchronized void setAuthProvider(final AuthProvider authProvider) {
- if(this.authProvider != null) {
- logger.debug("Changing auth provider to {}", authProvider);
- }
- this.authProvider = Optional.fromNullable(authProvider);
- }
-
- @Override
- public void close() throws IOException {
- up = false;
- logger.trace("Closing SSH server socket.");
- serverSocket.close();
- bossGroup.shutdownGracefully();
- logger.trace("SSH server socket closed.");
- }
-
- @VisibleForTesting
- public InetSocketAddress getLocalSocketAddress() {
- return (InetSocketAddress) serverSocket.getLocalSocketAddress();
- }
-
- @Override
- public void run() {
- while (up) {
- Socket acceptedSocket = null;
- try {
- acceptedSocket = serverSocket.accept();
- } catch (final IOException e) {
- if (up == false) {
- logger.trace("Exiting server thread", e);
- } else {
- logger.warn("Exception occurred during socket.accept", e);
- }
- }
- if (acceptedSocket != null) {
- try {
- final Handshaker task = new Handshaker(acceptedSocket, localAddress, sessionIdCounter.incrementAndGet(), getAuthProvider(), bossGroup, pem);
- handshakeExecutor.submit(task);
- } catch (final IOException e) {
- logger.warn("Cannot set PEMHostKey, closing connection", e);
- closeSocket(acceptedSocket);
- } catch (final IllegalStateException e) {
- logger.warn("Cannot accept connection, closing", e);
- closeSocket(acceptedSocket);
- }
- }
- }
- logger.debug("Server thread is exiting");
- }
-
- private void closeSocket(final Socket acceptedSocket) {
- try {
- acceptedSocket.close();
- } catch (final IOException e) {
- logger.warn("Ignoring exception while closing socket", e);
- }
- }
-
-}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import com.google.common.base.Preconditions;
+import io.netty.bootstrap.Bootstrap;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.local.LocalAddress;
+import io.netty.channel.local.LocalChannel;
+import io.netty.util.concurrent.GenericFutureListener;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import org.apache.sshd.common.NamedFactory;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.server.AsyncCommand;
+import org.apache.sshd.server.Command;
+import org.apache.sshd.server.Environment;
+import org.apache.sshd.server.ExitCallback;
+import org.apache.sshd.server.SessionAware;
+import org.apache.sshd.server.session.ServerSession;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This command handles all netconf related rpc and forwards to delegate server.
+ * Uses netty to make a local connection to delegate server.
+ *
+ * Command is Apache Mina SSH terminology for objects handling ssh data.
+ */
+public class RemoteNetconfCommand implements AsyncCommand, SessionAware {
+
+ private static final Logger logger = LoggerFactory.getLogger(RemoteNetconfCommand.class);
+
+ private final EventLoopGroup clientEventGroup;
+ private final LocalAddress localAddress;
+
+ private IoInputStream in;
+ private IoOutputStream out;
+ private ExitCallback callback;
+ private NetconfHelloMessageAdditionalHeader netconfHelloMessageAdditionalHeader;
+
+ private Channel clientChannel;
+ private ChannelFuture clientChannelFuture;
+
+ public RemoteNetconfCommand(final EventLoopGroup clientEventGroup, final LocalAddress localAddress) {
+ this.clientEventGroup = clientEventGroup;
+ this.localAddress = localAddress;
+ }
+
+ @Override
+ public void setIoInputStream(final IoInputStream in) {
+ this.in = in;
+ }
+
+ @Override
+ public void setIoOutputStream(final IoOutputStream out) {
+ this.out = out;
+ }
+
+ @Override
+ public void setIoErrorStream(final IoOutputStream err) {
+ // TODO do we want to use error stream in some way ?
+ }
+
+ @Override
+ public void setInputStream(final InputStream in) {
+ throw new UnsupportedOperationException("Synchronous IO is unsupported");
+ }
+
+ @Override
+ public void setOutputStream(final OutputStream out) {
+ throw new UnsupportedOperationException("Synchronous IO is unsupported");
+
+ }
+
+ @Override
+ public void setErrorStream(final OutputStream err) {
+ throw new UnsupportedOperationException("Synchronous IO is unsupported");
+
+ }
+
+ @Override
+ public void setExitCallback(final ExitCallback callback) {
+ this.callback = callback;
+ }
+
+ @Override
+ public void start(final Environment env) throws IOException {
+ logger.trace("Establishing internal connection to netconf server for client: {}", getClientAddress());
+
+ final Bootstrap clientBootstrap = new Bootstrap();
+ clientBootstrap.group(clientEventGroup).channel(LocalChannel.class);
+
+ clientBootstrap
+ .handler(new ChannelInitializer<LocalChannel>() {
+ @Override
+ public void initChannel(final LocalChannel ch) throws Exception {
+ ch.pipeline().addLast(new SshProxyClientHandler(in, out, netconfHelloMessageAdditionalHeader, callback));
+ }
+ });
+ clientChannelFuture = clientBootstrap.connect(localAddress);
+ clientChannelFuture.addListener(new GenericFutureListener<ChannelFuture>() {
+
+ @Override
+ public void operationComplete(final ChannelFuture future) throws Exception {
+ if(future.isSuccess()) {
+ clientChannel = clientChannelFuture.channel();
+ } else {
+ logger.warn("Unable to establish internal connection to netconf server for client: {}", getClientAddress());
+ Preconditions.checkNotNull(callback, "Exit callback must be set");
+ callback.onExit(1, "Unable to establish internal connection to netconf server for client: "+ getClientAddress());
+ }
+ }
+ });
+ }
+
+ @Override
+ public void destroy() {
+ logger.trace("Releasing internal connection to netconf server for client: {} on channel: {}",
+ getClientAddress(), clientChannel);
+
+ clientChannelFuture.cancel(true);
+ if(clientChannel != null) {
+ clientChannel.close().addListener(new GenericFutureListener<ChannelFuture>() {
+
+ @Override
+ public void operationComplete(final ChannelFuture future) throws Exception {
+ if (future.isSuccess() == false) {
+ logger.warn("Unable to release internal connection to netconf server on channel: {}", clientChannel);
+ }
+ }
+ });
+ }
+ }
+
+ private String getClientAddress() {
+ return netconfHelloMessageAdditionalHeader.getAddress();
+ }
+
+ @Override
+ public void setSession(final ServerSession session) {
+ final SocketAddress remoteAddress = session.getIoSession().getRemoteAddress();
+ String hostName = "";
+ String port = "";
+ if(remoteAddress instanceof InetSocketAddress) {
+ hostName = ((InetSocketAddress) remoteAddress).getAddress().getHostAddress();
+ port = Integer.toString(((InetSocketAddress) remoteAddress).getPort());
+ }
+ netconfHelloMessageAdditionalHeader = new NetconfHelloMessageAdditionalHeader(
+ session.getUsername(), hostName, port, "ssh", "client");
+ }
+
+ public static class NetconfCommandFactory implements NamedFactory<Command> {
+
+ public static final String NETCONF = "netconf";
+
+ private final EventLoopGroup clientBootstrap;
+ private final LocalAddress localAddress;
+
+ public NetconfCommandFactory(final EventLoopGroup clientBootstrap, final LocalAddress localAddress) {
+
+ this.clientBootstrap = clientBootstrap;
+ this.localAddress = localAddress;
+ }
+
+ @Override
+ public String getName() {
+ return NETCONF;
+ }
+
+ @Override
+ public RemoteNetconfCommand create() {
+ return new RemoteNetconfCommand(clientBootstrap, localAddress);
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.server.ExitCallback;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.client.AsyncSshHandlerReader;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.client.AsyncSshHandlerWriter;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Netty handler that reads SSH from remote client and writes to delegate server and reads from delegate server and writes to remote client
+ */
+final class SshProxyClientHandler extends ChannelInboundHandlerAdapter {
+
+ private static final Logger logger = LoggerFactory.getLogger(SshProxyClientHandler.class);
+
+ private final IoInputStream in;
+ private final IoOutputStream out;
+
+ private AsyncSshHandlerReader asyncSshHandlerReader;
+ private AsyncSshHandlerWriter asyncSshHandlerWriter;
+
+ private final NetconfHelloMessageAdditionalHeader netconfHelloMessageAdditionalHeader;
+ private final ExitCallback callback;
+
+ public SshProxyClientHandler(final IoInputStream in, final IoOutputStream out,
+ final NetconfHelloMessageAdditionalHeader netconfHelloMessageAdditionalHeader,
+ final ExitCallback callback) {
+ this.in = in;
+ this.out = out;
+ this.netconfHelloMessageAdditionalHeader = netconfHelloMessageAdditionalHeader;
+ this.callback = callback;
+ }
+
+ @Override
+ public void channelActive(final ChannelHandlerContext ctx) throws Exception {
+ writeAdditionalHeader(ctx);
+
+ asyncSshHandlerWriter = new AsyncSshHandlerWriter(out);
+ asyncSshHandlerReader = new AsyncSshHandlerReader(new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ // Close both sessions (delegate server and remote client)
+ ctx.fireChannelInactive();
+ ctx.disconnect();
+ ctx.close();
+ asyncSshHandlerReader.close();
+ asyncSshHandlerWriter.close();
+ }
+ }, new AsyncSshHandlerReader.ReadMsgHandler() {
+ @Override
+ public void onMessageRead(final ByteBuf msg) {
+ if(logger.isTraceEnabled()) {
+ logger.trace("Forwarding message for client: {} on channel: {}, message: {}",
+ netconfHelloMessageAdditionalHeader.getAddress(), ctx.channel(), AsyncSshHandlerWriter.byteBufToString(msg));
+ }
+ // Just forward to delegate
+ ctx.writeAndFlush(msg);
+ }
+ }, "ssh" + netconfHelloMessageAdditionalHeader.getAddress(), in);
+
+
+ super.channelActive(ctx);
+ }
+
+ private void writeAdditionalHeader(final ChannelHandlerContext ctx) {
+ ctx.writeAndFlush(Unpooled.copiedBuffer(netconfHelloMessageAdditionalHeader.toFormattedString().getBytes()));
+ }
+
+ @Override
+ public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception {
+ asyncSshHandlerWriter.write(ctx, msg, ctx.newPromise());
+ }
+
+ @Override
+ public void channelInactive(final ChannelHandlerContext ctx) throws Exception {
+ logger.debug("Internal connection to netconf server was dropped for client: {} on channel: ",
+ netconfHelloMessageAdditionalHeader.getAddress(), ctx.channel());
+ callback.onExit(1, "Internal connection to netconf server was dropped for client: " +
+ netconfHelloMessageAdditionalHeader.getAddress() + " on channel: " + ctx.channel());
+ super.channelInactive(ctx);
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.EventLoopGroup;
+import java.io.IOException;
+import java.nio.channels.AsynchronousChannelGroup;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.sshd.SshServer;
+import org.apache.sshd.common.FactoryManager;
+import org.apache.sshd.common.NamedFactory;
+import org.apache.sshd.common.RuntimeSshException;
+import org.apache.sshd.common.io.IoAcceptor;
+import org.apache.sshd.common.io.IoConnector;
+import org.apache.sshd.common.io.IoHandler;
+import org.apache.sshd.common.io.IoServiceFactory;
+import org.apache.sshd.common.io.IoServiceFactoryFactory;
+import org.apache.sshd.common.io.nio2.Nio2Acceptor;
+import org.apache.sshd.common.io.nio2.Nio2Connector;
+import org.apache.sshd.common.io.nio2.Nio2ServiceFactoryFactory;
+import org.apache.sshd.common.util.CloseableUtils;
+import org.apache.sshd.server.Command;
+import org.apache.sshd.server.ServerFactoryManager;
+
+/**
+ * Proxy SSH server that just delegates decrypted content to a delegate server within same VM.
+ * Implemented using Apache Mina SSH lib.
+ */
+public class SshProxyServer implements AutoCloseable {
+
+ private final SshServer sshServer;
+ private final ScheduledExecutorService minaTimerExecutor;
+ private final EventLoopGroup clientGroup;
+ private final IoServiceFactoryFactory nioServiceWithPoolFactoryFactory;
+
+ public SshProxyServer(final ScheduledExecutorService minaTimerExecutor, final EventLoopGroup clientGroup, final ExecutorService nioExecutor) {
+ this.minaTimerExecutor = minaTimerExecutor;
+ this.clientGroup = clientGroup;
+ this.nioServiceWithPoolFactoryFactory = new NioServiceWithPoolFactory.NioServiceWithPoolFactoryFactory(nioExecutor);
+ this.sshServer = SshServer.setUpDefaultServer();
+ }
+
+ public void bind(final SshProxyServerConfiguration sshProxyServerConfiguration) throws IOException {
+ sshServer.setHost(sshProxyServerConfiguration.getBindingAddress().getHostString());
+ sshServer.setPort(sshProxyServerConfiguration.getBindingAddress().getPort());
+
+ sshServer.setPasswordAuthenticator(sshProxyServerConfiguration.getAuthenticator());
+ sshServer.setKeyPairProvider(sshProxyServerConfiguration.getKeyPairProvider());
+
+ sshServer.setIoServiceFactoryFactory(nioServiceWithPoolFactoryFactory);
+ sshServer.setScheduledExecutorService(minaTimerExecutor);
+ sshServer.setProperties(getProperties(sshProxyServerConfiguration));
+
+ final RemoteNetconfCommand.NetconfCommandFactory netconfCommandFactory =
+ new RemoteNetconfCommand.NetconfCommandFactory(clientGroup, sshProxyServerConfiguration.getLocalAddress());
+ sshServer.setSubsystemFactories(Lists.<NamedFactory<Command>>newArrayList(netconfCommandFactory));
+ sshServer.start();
+ }
+
+ private static Map<String, String> getProperties(final SshProxyServerConfiguration sshProxyServerConfiguration) {
+ return new HashMap<String, String>()
+ {{
+ put(ServerFactoryManager.IDLE_TIMEOUT, String.valueOf(sshProxyServerConfiguration.getIdleTimeout()));
+ }};
+ }
+
+ @Override
+ public void close() {
+ try {
+ sshServer.stop(true);
+ } catch (final InterruptedException e) {
+ throw new RuntimeException("Interrupted while stopping sshServer", e);
+ } finally {
+ sshServer.close(true);
+ }
+ }
+
+ /**
+ * Based on Nio2ServiceFactory with one addition: injectable executor
+ */
+ private static final class NioServiceWithPoolFactory extends CloseableUtils.AbstractCloseable implements IoServiceFactory {
+
+ private final FactoryManager manager;
+ private final AsynchronousChannelGroup group;
+
+ public NioServiceWithPoolFactory(final FactoryManager manager, final ExecutorService executor) {
+ this.manager = manager;
+ try {
+ group = AsynchronousChannelGroup.withThreadPool(executor);
+ } catch (final IOException e) {
+ throw new RuntimeSshException(e);
+ }
+ }
+
+ public IoConnector createConnector(final IoHandler handler) {
+ return new Nio2Connector(manager, handler, group);
+ }
+
+ public IoAcceptor createAcceptor(final IoHandler handler) {
+ return new Nio2Acceptor(manager, handler, group);
+ }
+
+ @Override
+ protected void doCloseImmediately() {
+ try {
+ group.shutdownNow();
+ group.awaitTermination(5, TimeUnit.SECONDS);
+ } catch (final Exception e) {
+ log.debug("Exception caught while closing channel group", e);
+ } finally {
+ super.doCloseImmediately();
+ }
+ }
+
+ private static final class NioServiceWithPoolFactoryFactory extends Nio2ServiceFactoryFactory {
+
+ private final ExecutorService nioExecutor;
+
+ private NioServiceWithPoolFactoryFactory(final ExecutorService nioExecutor) {
+ this.nioExecutor = nioExecutor;
+ }
+
+ @Override
+ public IoServiceFactory create(final FactoryManager manager) {
+ return new NioServiceWithPoolFactory(manager, nioExecutor);
+ }
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import com.google.common.base.Preconditions;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.apache.sshd.common.KeyPairProvider;
+import org.apache.sshd.server.PasswordAuthenticator;
+
+public final class SshProxyServerConfiguration {
+ private final InetSocketAddress bindingAddress;
+ private final LocalAddress localAddress;
+ private final PasswordAuthenticator authenticator;
+ private final KeyPairProvider keyPairProvider;
+ private final int idleTimeout;
+
+ SshProxyServerConfiguration(final InetSocketAddress bindingAddress, final LocalAddress localAddress, final PasswordAuthenticator authenticator, final KeyPairProvider keyPairProvider, final int idleTimeout) {
+ this.bindingAddress = Preconditions.checkNotNull(bindingAddress);
+ this.localAddress = Preconditions.checkNotNull(localAddress);
+ this.authenticator = Preconditions.checkNotNull(authenticator);
+ this.keyPairProvider = Preconditions.checkNotNull(keyPairProvider);
+ // Idle timeout cannot be disabled in the sshd by using =< 0 value
+ Preconditions.checkArgument(idleTimeout > 0, "Idle timeout has to be > 0");
+ this.idleTimeout = idleTimeout;
+ }
+
+ public InetSocketAddress getBindingAddress() {
+ return bindingAddress;
+ }
+
+ public LocalAddress getLocalAddress() {
+ return localAddress;
+ }
+
+ public PasswordAuthenticator getAuthenticator() {
+ return authenticator;
+ }
+
+ public KeyPairProvider getKeyPairProvider() {
+ return keyPairProvider;
+ }
+
+ public int getIdleTimeout() {
+ return idleTimeout;
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.apache.sshd.common.KeyPairProvider;
+import org.apache.sshd.server.PasswordAuthenticator;
+
+public final class SshProxyServerConfigurationBuilder {
+ private InetSocketAddress bindingAddress;
+ private LocalAddress localAddress;
+ private PasswordAuthenticator authenticator;
+ private KeyPairProvider keyPairProvider;
+ private int idleTimeout;
+
+ public SshProxyServerConfigurationBuilder setBindingAddress(final InetSocketAddress bindingAddress) {
+ this.bindingAddress = bindingAddress;
+ return this;
+ }
+
+ public SshProxyServerConfigurationBuilder setLocalAddress(final LocalAddress localAddress) {
+ this.localAddress = localAddress;
+ return this;
+ }
+
+ public SshProxyServerConfigurationBuilder setAuthenticator(final PasswordAuthenticator authenticator) {
+ this.authenticator = authenticator;
+ return this;
+ }
+
+ public SshProxyServerConfigurationBuilder setKeyPairProvider(final KeyPairProvider keyPairProvider) {
+ this.keyPairProvider = keyPairProvider;
+ return this;
+ }
+
+ public SshProxyServerConfigurationBuilder setIdleTimeout(final int idleTimeout) {
+ this.idleTimeout = idleTimeout;
+ return this;
+ }
+
+ public SshProxyServerConfiguration createSshProxyServerConfiguration() {
+ return new SshProxyServerConfiguration(bindingAddress, localAddress, authenticator, keyPairProvider, idleTimeout);
+ }
+
+ public SshProxyServerConfigurationBuilder create () {
+ return new SshProxyServerConfigurationBuilder();
+ }
+}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.ssh.authentication;
-
-import com.google.common.annotations.VisibleForTesting;
-import java.io.FileInputStream;
-import java.security.NoSuchAlgorithmException;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.bouncycastle.openssl.PEMWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.security.Key;
-import java.security.KeyPair;
-import java.security.KeyPairGenerator;
-import java.security.SecureRandom;
-
-public class PEMGenerator {
- private static final Logger logger = LoggerFactory.getLogger(PEMGenerator.class);
- private static final int KEY_SIZE = 4096;
-
-
- public static String readOrGeneratePK(File privateKeyFile) throws IOException {
- if (privateKeyFile.exists() == false) {
- // generate & save to file
- try {
- return generateTo(privateKeyFile);
- } catch (Exception e) {
- logger.error("Exception occurred while generating PEM string to {}", privateKeyFile, e);
- throw new IllegalStateException("Error generating RSA key from file " + privateKeyFile);
- }
- } else {
- // read from file
- try (FileInputStream fis = new FileInputStream(privateKeyFile)) {
- return IOUtils.toString(fis);
- } catch (final IOException e) {
- logger.error("Error reading RSA key from file {}", privateKeyFile, e);
- throw new IOException("Error reading RSA key from file " + privateKeyFile, e);
- }
- }
- }
-
- /**
- * Generate private key to a file and return its content as string.
- *
- * @param privateFile path where private key should be generated
- * @return String representation of private key
- * @throws IOException
- * @throws NoSuchAlgorithmException
- */
- @VisibleForTesting
- public static String generateTo(File privateFile) throws IOException, NoSuchAlgorithmException {
- logger.info("Generating private key to {}", privateFile.getAbsolutePath());
- String privatePEM = generate();
- FileUtils.write(privateFile, privatePEM);
- return privatePEM;
- }
-
- @VisibleForTesting
- public static String generate() throws NoSuchAlgorithmException, IOException {
- KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
- SecureRandom sr = new SecureRandom();
- keyGen.initialize(KEY_SIZE, sr);
- KeyPair keypair = keyGen.generateKeyPair();
- return toString(keypair.getPrivate());
- }
-
- /**
- * Get string representation of a key.
- */
- private static String toString(Key key) throws IOException {
- try (StringWriter writer = new StringWriter()) {
- try (PEMWriter pemWriter = new PEMWriter(writer)) {
- pemWriter.writeObject(key);
- }
- return writer.toString();
- }
- }
-
-}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh.osgi;
+
+import com.google.common.base.Preconditions;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.session.ServerSession;
+import org.opendaylight.controller.netconf.auth.AuthConstants;
+import org.opendaylight.controller.netconf.auth.AuthProvider;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class AuthProviderTracker implements ServiceTrackerCustomizer<AuthProvider, AuthProvider>, PasswordAuthenticator {
+ private static final Logger logger = LoggerFactory.getLogger(AuthProviderTracker.class);
+
+ private final BundleContext bundleContext;
+
+ private Integer maxPreference;
+ private final ServiceTracker<AuthProvider, AuthProvider> listenerTracker;
+ private AuthProvider authProvider;
+
+ public AuthProviderTracker(final BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ listenerTracker = new ServiceTracker<>(bundleContext, AuthProvider.class, this);
+ listenerTracker.open();
+ }
+
+ @Override
+ public AuthProvider addingService(final ServiceReference<AuthProvider> reference) {
+ logger.trace("Service {} added", reference);
+ final AuthProvider authService = bundleContext.getService(reference);
+ final Integer newServicePreference = getPreference(reference);
+ if(isBetter(newServicePreference)) {
+ maxPreference = newServicePreference;
+ this.authProvider = authService;
+ }
+ return authService;
+ }
+
+ private Integer getPreference(final ServiceReference<AuthProvider> reference) {
+ final Object preferenceProperty = reference.getProperty(AuthConstants.SERVICE_PREFERENCE_KEY);
+ return preferenceProperty == null ? Integer.MIN_VALUE : Integer.valueOf(preferenceProperty.toString());
+ }
+
+ private boolean isBetter(final Integer newServicePreference) {
+ Preconditions.checkNotNull(newServicePreference);
+ if(maxPreference == null) {
+ return true;
+ }
+
+ return newServicePreference > maxPreference;
+ }
+
+ @Override
+ public void modifiedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
+ final AuthProvider authService = bundleContext.getService(reference);
+ final Integer newServicePreference = getPreference(reference);
+ if(isBetter(newServicePreference)) {
+ logger.trace("Replacing modified service {} in netconf SSH.", reference);
+ this.authProvider = authService;
+ }
+ }
+
+ @Override
+ public void removedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
+ logger.trace("Removing service {} from netconf SSH. " +
+ "SSH won't authenticate users until AuthProvider service will be started.", reference);
+ maxPreference = null;
+ this.authProvider = null;
+ }
+
+ public void stop() {
+ listenerTracker.close();
+ // sshThread should finish normally since sshServer.close stops processing
+ }
+
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return authProvider == null ? false : authProvider.authenticated(username, password);
+ }
+}
import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Preconditions;
-import java.io.File;
+import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import io.netty.channel.local.LocalAddress;
+import io.netty.channel.nio.NioEventLoopGroup;
import java.io.IOException;
import java.net.InetSocketAddress;
-
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FilenameUtils;
-import org.opendaylight.controller.netconf.auth.AuthConstants;
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.apache.sshd.common.util.ThreadUtils;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil.InfixProp;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
-import org.osgi.framework.ServiceReference;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Strings;
-
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.local.LocalAddress;
-import io.netty.channel.nio.NioEventLoopGroup;
-
-/**
- * Activator for netconf SSH bundle which creates SSH bridge between netconf client and netconf server. Activator
- * starts SSH Server in its own thread. This thread is closed when activator calls stop() method. Server opens socket
- * and listens for client connections. Each client connection creation is handled in separate
- * {@link org.opendaylight.controller.netconf.ssh.threads.Handshaker} thread.
- * This thread creates two additional threads {@link org.opendaylight.controller.netconf.ssh.threads.IOThread}
- * forwarding data from/to client.IOThread closes servers session and server connection when it gets -1 on input stream.
- * {@link org.opendaylight.controller.netconf.ssh.threads.IOThread}'s run method waits for -1 on input stream to finish.
- * All threads are daemons.
- */
public class NetconfSSHActivator implements BundleActivator {
private static final Logger logger = LoggerFactory.getLogger(NetconfSSHActivator.class);
- private static AuthProviderTracker authProviderTracker;
- private NetconfSSHServer server;
+ private static final java.lang.String ALGORITHM = "RSA";
+ private static final int KEY_SIZE = 4096;
+ public static final int POOL_SIZE = 8;
+ private static final int DEFAULT_IDLE_TIMEOUT = (int) TimeUnit.MINUTES.toMillis(20);
+
+ private ScheduledExecutorService minaTimerExecutor;
+ private NioEventLoopGroup clientGroup;
+ private ExecutorService nioExecutor;
+ private AuthProviderTracker authProviderTracker;
+
+ private SshProxyServer server;
@Override
public void start(final BundleContext bundleContext) throws IOException {
+ minaTimerExecutor = Executors.newScheduledThreadPool(POOL_SIZE, new ThreadFactory() {
+ @Override
+ public Thread newThread(final Runnable r) {
+ return new Thread(r, "netconf-ssh-server-mina-timers");
+ }
+ });
+ clientGroup = new NioEventLoopGroup();
+ nioExecutor = ThreadUtils.newFixedThreadPool("netconf-ssh-server-nio-group", POOL_SIZE);
server = startSSHServer(bundleContext);
}
if(authProviderTracker != null) {
authProviderTracker.stop();
}
+
+ if(nioExecutor!=null) {
+ nioExecutor.shutdownNow();
+ }
+
+ if(clientGroup != null) {
+ clientGroup.shutdownGracefully();
+ }
+
+ if(minaTimerExecutor != null) {
+ minaTimerExecutor.shutdownNow();
+ }
}
- private static NetconfSSHServer startSSHServer(final BundleContext bundleContext) throws IOException {
- final Optional<InetSocketAddress> maybeSshSocketAddress = NetconfConfigUtil.extractNetconfServerAddress(bundleContext,
- InfixProp.ssh);
+ private SshProxyServer startSSHServer(final BundleContext bundleContext) throws IOException {
+ final Optional<InetSocketAddress> maybeSshSocketAddress = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, InfixProp.ssh);
if (maybeSshSocketAddress.isPresent() == false) {
logger.trace("SSH bridge not configured");
final LocalAddress localAddress = NetconfConfigUtil.getNetconfLocalAddress();
- final String path = FilenameUtils.separatorsToSystem(NetconfConfigUtil.getPrivateKeyPath(bundleContext));
- checkState(!Strings.isNullOrEmpty(path), "Path to ssh private key is blank. Reconfigure %s", NetconfConfigUtil.getPrivateKeyKey());
- final String privateKeyPEMString = PEMGenerator.readOrGeneratePK(new File(path));
-
- final EventLoopGroup bossGroup = new NioEventLoopGroup();
- final NetconfSSHServer server = NetconfSSHServer.start(sshSocketAddress.getPort(), localAddress, bossGroup, privateKeyPEMString.toCharArray());
-
- authProviderTracker = new AuthProviderTracker(bundleContext, server);
+ authProviderTracker = new AuthProviderTracker(bundleContext);
- return server;
- }
-
- private static Thread runNetconfSshThread(final NetconfSSHServer server) {
- final Thread serverThread = new Thread(server, "netconf SSH server thread");
- serverThread.setDaemon(true);
- serverThread.start();
- logger.trace("Netconf SSH bridge up and running.");
- return serverThread;
+ final String path = FilenameUtils.separatorsToSystem(NetconfConfigUtil.getPrivateKeyPath(bundleContext));
+ checkState(!Strings.isNullOrEmpty(path), "Path to ssh private key is blank. Reconfigure %s",
+ NetconfConfigUtil.getPrivateKeyKey());
+
+ final SshProxyServer sshProxyServer = new SshProxyServer(minaTimerExecutor, clientGroup, nioExecutor);
+ sshProxyServer.bind(
+ new SshProxyServerConfigurationBuilder()
+ .setBindingAddress(sshSocketAddress)
+ .setLocalAddress(localAddress)
+ .setAuthenticator(authProviderTracker)
+ .setKeyPairProvider(new PEMGeneratorHostKeyProvider(path, ALGORITHM, KEY_SIZE))
+ .setIdleTimeout(DEFAULT_IDLE_TIMEOUT)
+ .createSshProxyServerConfiguration());
+ return sshProxyServer;
}
- private static class AuthProviderTracker implements ServiceTrackerCustomizer<AuthProvider, AuthProvider> {
- private final BundleContext bundleContext;
- private final NetconfSSHServer server;
-
- private Integer maxPreference;
- private Thread sshThread;
- private final ServiceTracker<AuthProvider, AuthProvider> listenerTracker;
-
- public AuthProviderTracker(final BundleContext bundleContext, final NetconfSSHServer server) {
- this.bundleContext = bundleContext;
- this.server = server;
- listenerTracker = new ServiceTracker<>(bundleContext, AuthProvider.class, this);
- listenerTracker.open();
- }
-
- @Override
- public AuthProvider addingService(final ServiceReference<AuthProvider> reference) {
- logger.trace("Service {} added", reference);
- final AuthProvider authService = bundleContext.getService(reference);
- final Integer newServicePreference = getPreference(reference);
- if(isBetter(newServicePreference)) {
- maxPreference = newServicePreference;
- server.setAuthProvider(authService);
- if(sshThread == null) {
- sshThread = runNetconfSshThread(server);
- }
- }
- return authService;
- }
-
- private Integer getPreference(final ServiceReference<AuthProvider> reference) {
- final Object preferenceProperty = reference.getProperty(AuthConstants.SERVICE_PREFERENCE_KEY);
- return preferenceProperty == null ? Integer.MIN_VALUE : Integer.valueOf(preferenceProperty.toString());
- }
-
- private boolean isBetter(final Integer newServicePreference) {
- Preconditions.checkNotNull(newServicePreference);
- if(maxPreference == null) {
- return true;
- }
-
- return newServicePreference > maxPreference;
- }
-
- @Override
- public void modifiedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
- final AuthProvider authService = bundleContext.getService(reference);
- final Integer newServicePreference = getPreference(reference);
- if(isBetter(newServicePreference)) {
- logger.trace("Replacing modified service {} in netconf SSH.", reference);
- server.setAuthProvider(authService);
- }
- }
-
- @Override
- public void removedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
- logger.trace("Removing service {} from netconf SSH. " +
- "SSH won't authenticate users until AuthProvider service will be started.", reference);
- maxPreference = null;
- server.setAuthProvider(null);
- }
-
- public void stop() {
- listenerTracker.close();
- // sshThread should finish normally since sshServer.close stops processing
- }
-
- }
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.ssh.threads;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static com.google.common.base.Preconditions.checkState;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.Socket;
-
-import javax.annotation.concurrent.NotThreadSafe;
-import javax.annotation.concurrent.ThreadSafe;
-
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import ch.ethz.ssh2.AuthenticationResult;
-import ch.ethz.ssh2.PtySettings;
-import ch.ethz.ssh2.ServerAuthenticationCallback;
-import ch.ethz.ssh2.ServerConnection;
-import ch.ethz.ssh2.ServerConnectionCallback;
-import ch.ethz.ssh2.ServerSession;
-import ch.ethz.ssh2.ServerSessionCallback;
-import ch.ethz.ssh2.SimpleServerSessionCallback;
-
-import com.google.common.base.Supplier;
-
-import io.netty.bootstrap.Bootstrap;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufProcessor;
-import io.netty.buffer.Unpooled;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelFuture;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInboundHandlerAdapter;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.local.LocalAddress;
-import io.netty.channel.local.LocalChannel;
-import io.netty.handler.stream.ChunkedStream;
-
-/**
- * One instance represents per connection, responsible for ssh handshake.
- * Once auth succeeds and correct subsystem is chosen, backend connection with
- * netty netconf server is made. This task finishes right after negotiation is done.
- */
-@ThreadSafe
-public class Handshaker implements Runnable {
- private static final Logger logger = LoggerFactory.getLogger(Handshaker.class);
-
- private final ServerConnection ganymedConnection;
- private final String session;
-
-
- public Handshaker(Socket socket, LocalAddress localAddress, long sessionId, AuthProvider authProvider,
- EventLoopGroup bossGroup, final char[] pem) throws IOException {
-
- this.session = "Session " + sessionId;
-
- String remoteAddressWithPort = socket.getRemoteSocketAddress().toString().replace("/", "");
- logger.debug("{} started with {}", session, remoteAddressWithPort);
- String remoteAddress, remotePort;
- if (remoteAddressWithPort.contains(":")) {
- String[] split = remoteAddressWithPort.split(":");
- remoteAddress = split[0];
- remotePort = split[1];
- } else {
- remoteAddress = remoteAddressWithPort;
- remotePort = "";
- }
- ServerAuthenticationCallbackImpl serverAuthenticationCallback = new ServerAuthenticationCallbackImpl(
- authProvider, session);
-
- ganymedConnection = new ServerConnection(socket);
-
- ServerConnectionCallbackImpl serverConnectionCallback = new ServerConnectionCallbackImpl(
- serverAuthenticationCallback, remoteAddress, remotePort, session,
- getGanymedAutoCloseable(ganymedConnection), localAddress, bossGroup);
-
- // initialize ganymed
- ganymedConnection.setPEMHostKey(pem, null);
- ganymedConnection.setAuthenticationCallback(serverAuthenticationCallback);
- ganymedConnection.setServerConnectionCallback(serverConnectionCallback);
- }
-
-
- private static AutoCloseable getGanymedAutoCloseable(final ServerConnection ganymedConnection) {
- return new AutoCloseable() {
- @Override
- public void close() throws Exception {
- ganymedConnection.close();
- }
- };
- }
-
- @Override
- public void run() {
- // let ganymed process handshake
- logger.trace("{} is started", session);
- try {
- // TODO this should be guarded with a timer to prevent resource exhaustion
- ganymedConnection.connect();
- } catch (IOException e) {
- logger.debug("{} connection error", session, e);
- }
- logger.trace("{} is exiting", session);
- }
-}
-
-/**
- * Netty client handler that forwards bytes from backed server to supplied output stream.
- * When backend server closes the connection, remoteConnection.close() is called to tear
- * down ssh connection.
- */
-class SSHClientHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(SSHClientHandler.class);
- private final AutoCloseable remoteConnection;
- private final BufferedOutputStream remoteOutputStream;
- private final String session;
- private ChannelHandlerContext channelHandlerContext;
-
- public SSHClientHandler(AutoCloseable remoteConnection, OutputStream remoteOutputStream,
- String session) {
- this.remoteConnection = remoteConnection;
- this.remoteOutputStream = new BufferedOutputStream(remoteOutputStream);
- this.session = session;
- }
-
- @Override
- public void channelActive(ChannelHandlerContext ctx) {
- this.channelHandlerContext = ctx;
- logger.debug("{} Client active", session);
- }
-
- @Override
- public void channelRead(ChannelHandlerContext ctx, Object msg) throws IOException {
- ByteBuf bb = (ByteBuf) msg;
- // we can block the server here so that slow client does not cause memory pressure
- try {
- bb.forEachByte(new ByteBufProcessor() {
- @Override
- public boolean process(byte value) throws Exception {
- remoteOutputStream.write(value);
- return true;
- }
- });
- } finally {
- bb.release();
- }
- }
-
- @Override
- public void channelReadComplete(ChannelHandlerContext ctx) throws IOException {
- logger.trace("{} Flushing", session);
- remoteOutputStream.flush();
- }
-
- @Override
- public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- // Close the connection when an exception is raised.
- logger.warn("{} Unexpected exception from downstream", session, cause);
- ctx.close();
- }
-
- @Override
- public void channelInactive(ChannelHandlerContext ctx) throws Exception {
- logger.trace("{} channelInactive() called, closing remote client ctx", session);
- remoteConnection.close();//this should close socket and all threads created for this client
- this.channelHandlerContext = null;
- }
-
- public ChannelHandlerContext getChannelHandlerContext() {
- return checkNotNull(channelHandlerContext, "Channel is not active");
- }
-}
-
-/**
- * Ganymed handler that gets unencrypted input and output streams, connects them to netty.
- * Checks that 'netconf' subsystem is chosen by user.
- * Launches new ClientInputStreamPoolingThread thread once session is established.
- * Writes custom header to netty server, to inform it about IP address and username.
- */
-class ServerConnectionCallbackImpl implements ServerConnectionCallback {
- private static final Logger logger = LoggerFactory.getLogger(ServerConnectionCallbackImpl.class);
- public static final String NETCONF_SUBSYSTEM = "netconf";
-
- private final Supplier<String> currentUserSupplier;
- private final String remoteAddress;
- private final String remotePort;
- private final String session;
- private final AutoCloseable ganymedConnection;
- private final LocalAddress localAddress;
- private final EventLoopGroup bossGroup;
-
- ServerConnectionCallbackImpl(Supplier<String> currentUserSupplier, String remoteAddress, String remotePort, String session,
- AutoCloseable ganymedConnection, LocalAddress localAddress, EventLoopGroup bossGroup) {
- this.currentUserSupplier = currentUserSupplier;
- this.remoteAddress = remoteAddress;
- this.remotePort = remotePort;
- this.session = session;
- this.ganymedConnection = ganymedConnection;
- // initialize netty local connection
- this.localAddress = localAddress;
- this.bossGroup = bossGroup;
- }
-
- private static ChannelFuture initializeNettyConnection(LocalAddress localAddress, EventLoopGroup bossGroup,
- final SSHClientHandler sshClientHandler) {
- Bootstrap clientBootstrap = new Bootstrap();
- clientBootstrap.group(bossGroup).channel(LocalChannel.class);
-
- clientBootstrap.handler(new ChannelInitializer<LocalChannel>() {
- @Override
- public void initChannel(LocalChannel ch) throws Exception {
- ch.pipeline().addLast(sshClientHandler);
- }
- });
- // asynchronously initialize local connection to netconf server
- return clientBootstrap.connect(localAddress);
- }
-
- @Override
- public ServerSessionCallback acceptSession(final ServerSession serverSession) {
- String currentUser = currentUserSupplier.get();
- final String additionalHeader = new NetconfHelloMessageAdditionalHeader(currentUser, remoteAddress,
- remotePort, "ssh", "client").toFormattedString();
-
-
- return new SimpleServerSessionCallback() {
- @Override
- public Runnable requestSubsystem(final ServerSession ss, final String subsystem) throws IOException {
- return new Runnable() {
- @Override
- public void run() {
- if (NETCONF_SUBSYSTEM.equals(subsystem)) {
- // connect
- final SSHClientHandler sshClientHandler = new SSHClientHandler(ganymedConnection, ss.getStdin(), session);
- ChannelFuture clientChannelFuture = initializeNettyConnection(localAddress, bossGroup, sshClientHandler);
- // get channel
- final Channel channel = clientChannelFuture.awaitUninterruptibly().channel();
-
- // write additional header before polling thread is started
- // polling thread could process and forward data before additional header is written
- // This will result into unexpected state: hello message without additional header and the next message with additional header
- channel.writeAndFlush(Unpooled.copiedBuffer(additionalHeader.getBytes()));
-
- new ClientInputStreamPoolingThread(session, ss.getStdout(), channel, new AutoCloseable() {
- @Override
- public void close() throws Exception {
- logger.trace("Closing both ganymed and local connection");
- try {
- ganymedConnection.close();
- } catch (Exception e) {
- logger.warn("Ignoring exception while closing ganymed", e);
- }
- try {
- channel.close();
- } catch (Exception e) {
- logger.warn("Ignoring exception while closing channel", e);
- }
- }
- }, sshClientHandler.getChannelHandlerContext()).start();
- } else {
- logger.debug("{} Wrong subsystem requested:'{}', closing ssh session", serverSession, subsystem);
- String reason = "Only netconf subsystem is supported, requested:" + subsystem;
- closeSession(ss, reason);
- }
- }
- };
- }
-
- public void closeSession(ServerSession ss, String reason) {
- logger.trace("{} Closing session - {}", serverSession, reason);
- try {
- ss.getStdin().write(reason.getBytes());
- } catch (IOException e) {
- logger.warn("{} Exception while closing session", serverSession, e);
- }
- ss.close();
- }
-
- @Override
- public Runnable requestPtyReq(final ServerSession ss, final PtySettings pty) throws IOException {
- return new Runnable() {
- @Override
- public void run() {
- closeSession(ss, "PTY request not supported");
- }
- };
- }
-
- @Override
- public Runnable requestShell(final ServerSession ss) throws IOException {
- return new Runnable() {
- @Override
- public void run() {
- closeSession(ss, "Shell not supported");
- }
- };
- }
- };
- }
-}
-
-/**
- * Only thread that is required during ssh session, forwards client's input to netty.
- * When user closes connection, onEndOfInput.close() is called to tear down the local channel.
- */
-class ClientInputStreamPoolingThread extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(ClientInputStreamPoolingThread.class);
-
- private final InputStream fromClientIS;
- private final Channel serverChannel;
- private final AutoCloseable onEndOfInput;
- private final ChannelHandlerContext channelHandlerContext;
-
- ClientInputStreamPoolingThread(String session, InputStream fromClientIS, Channel serverChannel, AutoCloseable onEndOfInput,
- ChannelHandlerContext channelHandlerContext) {
- super(ClientInputStreamPoolingThread.class.getSimpleName() + " " + session);
- this.fromClientIS = fromClientIS;
- this.serverChannel = serverChannel;
- this.onEndOfInput = onEndOfInput;
- this.channelHandlerContext = channelHandlerContext;
- }
-
- @Override
- public void run() {
- ChunkedStream chunkedStream = new ChunkedStream(fromClientIS);
- try {
- ByteBuf byteBuf;
- while ((byteBuf = chunkedStream.readChunk(channelHandlerContext/*only needed for ByteBuf alloc */)) != null) {
- serverChannel.writeAndFlush(byteBuf);
- }
- } catch (Exception e) {
- logger.warn("Exception", e);
- } finally {
- logger.trace("End of input");
- // tear down connection
- try {
- onEndOfInput.close();
- } catch (Exception e) {
- logger.warn("Ignoring exception while closing socket", e);
- }
- }
- }
-}
-
-/**
- * Authentication handler for ganymed.
- * Provides current user name after authenticating using supplied AuthProvider.
- */
-@NotThreadSafe
-class ServerAuthenticationCallbackImpl implements ServerAuthenticationCallback, Supplier<String> {
- private static final Logger logger = LoggerFactory.getLogger(ServerAuthenticationCallbackImpl.class);
- private final AuthProvider authProvider;
- private final String session;
- private String currentUser;
-
- ServerAuthenticationCallbackImpl(AuthProvider authProvider, String session) {
- this.authProvider = authProvider;
- this.session = session;
- }
-
- @Override
- public String initAuthentication(ServerConnection sc) {
- logger.trace("{} Established connection", session);
- return "Established connection" + "\r\n";
- }
-
- @Override
- public String[] getRemainingAuthMethods(ServerConnection sc) {
- return new String[]{ServerAuthenticationCallback.METHOD_PASSWORD};
- }
-
- @Override
- public AuthenticationResult authenticateWithNone(ServerConnection sc, String username) {
- return AuthenticationResult.FAILURE;
- }
-
- @Override
- public AuthenticationResult authenticateWithPassword(ServerConnection sc, String username, String password) {
- checkState(currentUser == null);
- try {
- if (authProvider.authenticated(username, password)) {
- currentUser = username;
- logger.trace("{} user {} authenticated", session, currentUser);
- return AuthenticationResult.SUCCESS;
- }
- } catch (Exception e) {
- logger.warn("{} Authentication failed", session, e);
- }
- return AuthenticationResult.FAILURE;
- }
-
- @Override
- public AuthenticationResult authenticateWithPublicKey(ServerConnection sc, String username, String algorithm,
- byte[] publicKey, byte[] signature) {
- return AuthenticationResult.FAILURE;
- }
-
- @Override
- public String get() {
- return currentUser;
- }
-}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
import com.google.common.base.Stopwatch;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.HashedWheelTimer;
import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
import org.junit.Test;
-import org.opendaylight.controller.netconf.auth.AuthProvider;
import org.opendaylight.controller.netconf.netty.EchoClientHandler.State;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.client.AsyncSshHandler;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SSHTest {
public static final Logger logger = LoggerFactory.getLogger(SSHTest.class);
public static final String AHOJ = "ahoj\n";
- private EventLoopGroup nettyGroup;
- HashedWheelTimer hashedWheelTimer;
- @Before
- public void setUp() throws Exception {
+ private static EventLoopGroup nettyGroup;
+ private static HashedWheelTimer hashedWheelTimer;
+ private static ExecutorService nioExec;
+ private static ScheduledExecutorService minaTimerEx;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
hashedWheelTimer = new HashedWheelTimer();
nettyGroup = new NioEventLoopGroup();
+ nioExec = Executors.newFixedThreadPool(1);
+ minaTimerEx = Executors.newScheduledThreadPool(1);
}
- @After
- public void tearDown() throws Exception {
+ @AfterClass
+ public static void tearDown() throws Exception {
hashedWheelTimer.stop();
- nettyGroup.shutdownGracefully();
+ nettyGroup.shutdownGracefully().await();
+ minaTimerEx.shutdownNow();
+ nioExec.shutdownNow();
}
@Test
public void test() throws Exception {
new Thread(new EchoServer(), "EchoServer").start();
- AuthProvider authProvider = mock(AuthProvider.class);
- doReturn(true).when(authProvider).authenticated(anyString(), anyString());
- doReturn("auth").when(authProvider).toString();
-
- NetconfSSHServer netconfSSHServer = NetconfSSHServer.start(10831, NetconfConfigUtil.getNetconfLocalAddress(),
- new NioEventLoopGroup(), PEMGenerator.generate().toCharArray());
- netconfSSHServer.setAuthProvider(authProvider);
- InetSocketAddress address = netconfSSHServer.getLocalSocketAddress();
+ final InetSocketAddress addr = new InetSocketAddress("127.0.0.1", 10831);
+ final SshProxyServer sshProxyServer = new SshProxyServer(minaTimerEx, nettyGroup, nioExec);
+ sshProxyServer.bind(
+ new SshProxyServerConfigurationBuilder().setBindingAddress(addr).setLocalAddress(NetconfConfigUtil.getNetconfLocalAddress()).setAuthenticator(new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ }).setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString())).setIdleTimeout(Integer.MAX_VALUE).createSshProxyServerConfiguration());
- final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort()));
+ final EchoClientHandler echoClientHandler = connectClient(addr);
Stopwatch stopwatch = new Stopwatch().start();
- while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
- Thread.sleep(100);
+ while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 30) {
+ Thread.sleep(500);
}
assertTrue(echoClientHandler.isConnected());
logger.info("connected, writing to client");
echoClientHandler.write(AHOJ);
+
// check that server sent back the same string
stopwatch = stopwatch.reset().start();
- while (echoClientHandler.read().endsWith(AHOJ) == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
- Thread.sleep(100);
+ while (echoClientHandler.read().endsWith(AHOJ) == false && stopwatch.elapsed(TimeUnit.SECONDS) < 30) {
+ Thread.sleep(500);
}
+
try {
- String read = echoClientHandler.read();
+ final String read = echoClientHandler.read();
assertTrue(read + " should end with " + AHOJ, read.endsWith(AHOJ));
} finally {
logger.info("Closing socket");
- netconfSSHServer.close();
- netconfSSHServer.join();
+ sshProxyServer.close();
}
}
- public EchoClientHandler connectClient(InetSocketAddress address) {
+ public EchoClientHandler connectClient(final InetSocketAddress address) {
final EchoClientHandler echoClientHandler = new EchoClientHandler();
- ChannelInitializer<NioSocketChannel> channelInitializer = new ChannelInitializer<NioSocketChannel>() {
+ final ChannelInitializer<NioSocketChannel> channelInitializer = new ChannelInitializer<NioSocketChannel>() {
@Override
- public void initChannel(NioSocketChannel ch) throws Exception {
+ public void initChannel(final NioSocketChannel ch) throws Exception {
ch.pipeline().addFirst(AsyncSshHandler.createForNetconfSubsystem(new LoginPassword("a", "a")));
ch.pipeline().addLast(echoClientHandler);
}
};
- Bootstrap b = new Bootstrap();
+ final Bootstrap b = new Bootstrap();
b.group(nettyGroup)
.channel(NioSocketChannel.class)
@Test
public void testClientWithoutServer() throws Exception {
- InetSocketAddress address = new InetSocketAddress(12345);
+ final InetSocketAddress address = new InetSocketAddress(12345);
final EchoClientHandler echoClientHandler = connectClient(address);
- Stopwatch stopwatch = new Stopwatch().start();
+ final Stopwatch stopwatch = new Stopwatch().start();
while(echoClientHandler.getState() == State.CONNECTING && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
Thread.sleep(100);
}
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
-import ch.ethz.ssh2.Connection;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
-import java.io.InputStream;
import java.net.InetSocketAddress;
-import junit.framework.Assert;
-import org.apache.commons.io.IOUtils;
+import java.nio.file.Files;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.sshd.ClientSession;
+import org.apache.sshd.SshClient;
+import org.apache.sshd.client.future.AuthFuture;
+import org.apache.sshd.client.future.ConnectFuture;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceListener;
private static final String PASSWORD = "netconf";
private static final String HOST = "127.0.0.1";
private static final int PORT = 1830;
- private static final InetSocketAddress tcpAddress = new InetSocketAddress("127.0.0.1", 8383);
private static final Logger logger = LoggerFactory.getLogger(SSHServerTest.class);
- private Thread sshServerThread;
+
+ private SshProxyServer server;
@Mock
private BundleContext mockedContext;
-
+ private final ExecutorService nioExec = Executors.newFixedThreadPool(1);
+ private final EventLoopGroup clientGroup = new NioEventLoopGroup();
+ private final ScheduledExecutorService minaTimerEx = Executors.newScheduledThreadPool(1);
@Before
public void setUp() throws Exception {
doReturn(new ServiceReference[0]).when(mockedContext).getServiceReferences(anyString(), anyString());
logger.info("Creating SSH server");
- String pem;
- try (InputStream is = getClass().getResourceAsStream("/RSA.pk")) {
- pem = IOUtils.toString(is);
- }
-
- EventLoopGroup bossGroup = new NioEventLoopGroup();
- NetconfSSHServer server = NetconfSSHServer.start(PORT, NetconfConfigUtil.getNetconfLocalAddress(),
- bossGroup, pem.toCharArray());
- server.setAuthProvider(new AuthProvider() {
- @Override
- public boolean authenticated(final String username, final String password) {
- return true;
- }
- });
-
- sshServerThread = new Thread(server);
- sshServerThread.setDaemon(true);
- sshServerThread.start();
- logger.info("SSH server on " + PORT);
+ final InetSocketAddress addr = InetSocketAddress.createUnresolved(HOST, PORT);
+ server = new SshProxyServer(minaTimerEx, clientGroup, nioExec);
+ server.bind(
+ new SshProxyServerConfigurationBuilder().setBindingAddress(addr).setLocalAddress(NetconfConfigUtil.getNetconfLocalAddress()).setAuthenticator(new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ }).setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString())).setIdleTimeout(Integer.MAX_VALUE).createSshProxyServerConfiguration());
+ logger.info("SSH server started on " + PORT);
}
@Test
- public void connect() {
+ public void connect() throws Exception {
+ final SshClient sshClient = SshClient.setUpDefaultClient();
+ sshClient.start();
try {
- Connection conn = new Connection(HOST, PORT);
- Assert.assertNotNull(conn);
- logger.info("connecting to SSH server");
- conn.connect();
- logger.info("authenticating ...");
- boolean isAuthenticated = conn.authenticateWithPassword(USER, PASSWORD);
- Assert.assertTrue(isAuthenticated);
- } catch (Exception e) {
- logger.error("Error while starting SSH server.", e);
+ final ConnectFuture connect = sshClient.connect(USER, HOST, PORT);
+ connect.await(30, TimeUnit.SECONDS);
+ org.junit.Assert.assertTrue(connect.isConnected());
+ final ClientSession session = connect.getSession();
+ session.addPasswordIdentity(PASSWORD);
+ final AuthFuture auth = session.auth();
+ auth.await(30, TimeUnit.SECONDS);
+ org.junit.Assert.assertTrue(auth.isSuccess());
+ } finally {
+ sshClient.close(true);
+ server.close();
+ clientGroup.shutdownGracefully().await();
+ minaTimerEx.shutdownNow();
+ nioExec.shutdownNow();
}
-
}
}
private final File configDir;
private final List<Integer> openDevices;
- private final File ncFeatureFile;
+ private final List<File> ncFeatureFiles;
private final File etcDir;
private final File loadOrderCfgFile;
this.configDir = new File(directory, ETC_OPENDAYLIGHT_KARAF_PATH);
this.etcDir = new File(directory, ETC_KARAF_PATH);
this.loadOrderCfgFile = new File(etcDir, ORG_OPS4J_PAX_URL_MVN_CFG);
- this.ncFeatureFile = getFeatureFile(directory, "features-netconf-connector");
+ this.ncFeatureFiles = getFeatureFile(directory, "features-netconf-connector", "xml");
this.openDevices = openDevices;
}
public void updateFeatureFile(final List<File> generated) {
- // TODO karaf core contains jaxb for feature files, use that for modification
+ // TODO karaf core contains jaxb for feature files, use that for
+ // modification
try {
- final Document document = XmlUtil.readXmlToDocument(Files.toString(ncFeatureFile, Charsets.UTF_8));
- final NodeList childNodes = document.getDocumentElement().getChildNodes();
-
- for (int i = 0; i < childNodes.getLength(); i++) {
- final Node item = childNodes.item(i);
- if(item instanceof Element == false) {
- continue;
- }
- if(item.getLocalName().equals("feature") ==false) {
- continue;
- }
-
- if(NETCONF_CONNECTOR_ALL_FEATURE.equals(((Element) item).getAttribute("name"))) {
- final Element ncAllFeatureDefinition = (Element) item;
- // Clean previous generated files
- for (final XmlElement configfile : XmlElement.fromDomElement(ncAllFeatureDefinition).getChildElements("configfile")) {
- ncAllFeatureDefinition.removeChild(configfile.getDomElement());
+ for (final File featureFile : ncFeatureFiles) {
+ final Document document = XmlUtil.readXmlToDocument(Files
+ .toString(featureFile, Charsets.UTF_8));
+ final NodeList childNodes = document.getDocumentElement().getChildNodes();
+
+ for (int i = 0; i < childNodes.getLength(); i++) {
+ final Node item = childNodes.item(i);
+ if (item instanceof Element == false) {
+ continue;
}
- for (final File file : generated) {
- final Element configfile = document.createElement("configfile");
- configfile.setTextContent("file:" + ETC_OPENDAYLIGHT_KARAF_PATH + file.getName());
- configfile.setAttribute("finalname", ETC_OPENDAYLIGHT_KARAF_PATH + file.getName());
- ncAllFeatureDefinition.appendChild(configfile);
+ if (item.getLocalName().equals("feature") == false) {
+ continue;
+ }
+
+ if (NETCONF_CONNECTOR_ALL_FEATURE
+ .equals(((Element) item).getAttribute("name"))) {
+ final Element ncAllFeatureDefinition = (Element) item;
+ // Clean previous generated files
+ for (final XmlElement configfile : XmlElement
+ .fromDomElement(ncAllFeatureDefinition)
+ .getChildElements("configfile")) {
+ ncAllFeatureDefinition.removeChild(configfile.getDomElement());
+ }
+ for (final File file : generated) {
+ final Element configfile = document.createElement("configfile");
+ configfile.setTextContent("file:"
+ + ETC_OPENDAYLIGHT_KARAF_PATH
+ + file.getName());
+ configfile.setAttribute(
+ "finalname",
+ ETC_OPENDAYLIGHT_KARAF_PATH
+ + file.getName());
+ ncAllFeatureDefinition.appendChild(configfile);
+ }
}
}
- }
- Files.write(XmlUtil.toString(document), ncFeatureFile, Charsets.UTF_8);
- LOG.info("Feature file {} updated", ncFeatureFile);
+ Files.write(XmlUtil.toString(document), featureFile,Charsets.UTF_8);
+ LOG.info("Feature file {} updated", featureFile);
+ }
} catch (final IOException e) {
throw new RuntimeException("Unable to load features file as a resource");
} catch (final SAXException e) {
}
- private static File getFeatureFile(final File distroFolder, final String featureName) {
+ private static List<File> getFeatureFile(final File distroFolder, final String featureName, final String suffix) {
checkExistingDir(distroFolder, String.format("Folder %s does not exist", distroFolder));
final File systemDir = checkExistingDir(new File(distroFolder, "system"), String.format("Folder %s does not contain a karaf distro, folder system is missing", distroFolder));
}
});
- return newestVersionDir.listFiles(new FileFilter() {
+ return Lists.newArrayList(newestVersionDir.listFiles(new FileFilter() {
@Override
public boolean accept(final File pathname) {
- return pathname.getName().contains(featureName);
+ return pathname.getName().contains(featureName)
+ && Files.getFileExtension(pathname.getName()).equals(suffix);
}
- })[0];
+ }));
}
private static File checkExistingDir(final File folder, final String msg) {
import com.google.common.io.CharStreams;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.local.LocalAddress;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.util.HashedWheelTimer;
import java.io.Closeable;
-import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.UnknownHostException;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.AbstractMap;
import java.util.Date;
import java.util.HashMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ParseTreeWalker;
+import org.apache.sshd.common.util.ThreadUtils;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringOperationService;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfiguration;
+import org.opendaylight.controller.netconf.ssh.SshProxyServerConfigurationBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceRepresentation;
private final NioEventLoopGroup nettyThreadgroup;
private final HashedWheelTimer hashedWheelTimer;
private final List<Channel> devicesChannels = Lists.newArrayList();
+ private final List<SshProxyServer> sshWrappers = Lists.newArrayList();
+ private final ScheduledExecutorService minaTimerExecutor;
+ private final ExecutorService nioExecutor;
public NetconfDeviceSimulator() {
- this(new NioEventLoopGroup(), new HashedWheelTimer());
+ // TODO make pool size configurable
+ this(new NioEventLoopGroup(), new HashedWheelTimer(),
+ Executors.newScheduledThreadPool(8, new ThreadFactoryBuilder().setNameFormat("netconf-ssh-server-mina-timers-%d").build()),
+ ThreadUtils.newFixedThreadPool("netconf-ssh-server-nio-group", 8));
}
- public NetconfDeviceSimulator(final NioEventLoopGroup eventExecutors, final HashedWheelTimer hashedWheelTimer) {
+ private NetconfDeviceSimulator(final NioEventLoopGroup eventExecutors, final HashedWheelTimer hashedWheelTimer, final ScheduledExecutorService minaTimerExecutor, final ExecutorService nioExecutor) {
this.nettyThreadgroup = eventExecutors;
this.hashedWheelTimer = hashedWheelTimer;
+ this.minaTimerExecutor = minaTimerExecutor;
+ this.nioExecutor = nioExecutor;
}
private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
int currentPort = params.startingPort;
final List<Integer> openDevices = Lists.newArrayList();
+
+ // Generate key to temp folder
+ final PEMGeneratorHostKeyProvider keyPairProvider = getPemGeneratorHostKeyProvider();
+
for (int i = 0; i < params.deviceCount; i++) {
final InetSocketAddress address = getAddress(currentPort);
final ChannelFuture server;
if(params.ssh) {
+ final InetSocketAddress bindingAddress = InetSocketAddress.createUnresolved("0.0.0.0", currentPort);
final LocalAddress tcpLocalAddress = new LocalAddress(address.toString());
server = dispatcher.createLocalServer(tcpLocalAddress);
try {
- final NetconfSSHServer sshServer = NetconfSSHServer.start(currentPort, tcpLocalAddress, nettyThreadgroup, getPemArray());
- sshServer.setAuthProvider(new AcceptingAuthProvider());
+ final SshProxyServer sshServer = new SshProxyServer(minaTimerExecutor, nettyThreadgroup, nioExecutor);
+ sshServer.bind(getSshConfiguration(bindingAddress, tcpLocalAddress));
+ sshWrappers.add(sshServer);
} catch (final Exception e) {
LOG.warn("Cannot start simulated device on {}, skipping", address, e);
// Close local server and continue
return openDevices;
}
- private char[] getPemArray() {
+ private SshProxyServerConfiguration getSshConfiguration(final InetSocketAddress bindingAddress, final LocalAddress tcpLocalAddress) throws IOException {
+ return new SshProxyServerConfigurationBuilder()
+ .setBindingAddress(bindingAddress)
+ .setLocalAddress(tcpLocalAddress)
+ .setAuthenticator(new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ })
+ .setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString()))
+ .setIdleTimeout(Integer.MAX_VALUE)
+ .createSshProxyServerConfiguration();
+ }
+
+ private PEMGeneratorHostKeyProvider getPemGeneratorHostKeyProvider() {
try {
- return PEMGenerator.readOrGeneratePK(new File("PK")).toCharArray();
+ final Path tempFile = Files.createTempFile("tempKeyNetconfTest", "suffix");
+ return new PEMGeneratorHostKeyProvider(tempFile.toAbsolutePath().toString());
} catch (final IOException e) {
+ LOG.error("Unable to generate PEM key", e);
throw new RuntimeException(e);
}
}
@Override
public void close() {
+ for (final SshProxyServer sshWrapper : sshWrappers) {
+ sshWrapper.close();
+ }
for (final Channel deviceCh : devicesChannels) {
deviceCh.close();
}
nettyThreadgroup.shutdownGracefully();
+ minaTimerExecutor.shutdownNow();
+ nioExecutor.shutdownNow();
// close Everything
}
package org.opendaylight.controller.netconf.util.mapping;
+import com.google.common.base.Optional;
import java.util.Map;
-
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.w3c.dom.Attr;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
-import com.google.common.base.Optional;
-
public abstract class AbstractNetconfOperation implements NetconfOperation {
private final String netconfSessionIdForReporting;
- private static final Logger logger = LoggerFactory.getLogger(AbstractNetconfOperation.class);
- protected AbstractNetconfOperation(String netconfSessionIdForReporting) {
+ protected AbstractNetconfOperation(final String netconfSessionIdForReporting) {
this.netconfSessionIdForReporting = netconfSessionIdForReporting;
}
}
@Override
- public HandlingPriority canHandle(Document message) throws NetconfDocumentedException {
+ public HandlingPriority canHandle(final Document message) throws NetconfDocumentedException {
OperationNameAndNamespace operationNameAndNamespace = null;
operationNameAndNamespace = new OperationNameAndNamespace(message);
return canHandle(operationNameAndNamespace.getOperationName(), operationNameAndNamespace.getNamespace());
private final String operationName, namespace;
private final XmlElement operationElement;
- public OperationNameAndNamespace(Document message) throws NetconfDocumentedException {
+ public OperationNameAndNamespace(final Document message) throws NetconfDocumentedException {
XmlElement requestElement = null;
requestElement = getRequestElementWithCheck(message);
operationElement = requestElement.getOnlyChildElement();
}
}
- protected static XmlElement getRequestElementWithCheck(Document message) throws NetconfDocumentedException {
+ protected static XmlElement getRequestElementWithCheck(final Document message) throws NetconfDocumentedException {
return XmlElement.fromDomElementWithExpected(message.getDocumentElement(), XmlNetconfConstants.RPC_KEY,
XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
}
- protected HandlingPriority canHandle(String operationName, String operationNamespace) {
+ protected HandlingPriority canHandle(final String operationName, final String operationNamespace) {
return operationName.equals(getOperationName()) && operationNamespace.equals(getOperationNamespace())
? getHandlingPriority()
: HandlingPriority.CANNOT_HANDLE;
protected abstract String getOperationName();
@Override
- public Document handle(Document requestMessage,
- NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+ public Document handle(final Document requestMessage,
+ final NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
XmlElement requestElement = getRequestElementWithCheck(requestMessage);
public final class XmlElement {
+ public static final String DEFAULT_NAMESPACE_PREFIX = "";
+
private final Element element;
private static final Logger logger = LoggerFactory.getLogger(XmlElement.class);
return xmlElement;
}
- private static Map<String, String> extractNamespaces(Element typeElement) throws NetconfDocumentedException {
+ private Map<String, String> extractNamespaces() throws NetconfDocumentedException {
Map<String, String> namespaces = new HashMap<>();
- NamedNodeMap attributes = typeElement.getAttributes();
+ NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
Node attribute = attributes.item(i);
String attribKey = attribute.getNodeName();
if (attribKey.startsWith(XmlUtil.XMLNS_ATTRIBUTE_KEY)) {
String prefix;
if (attribKey.equals(XmlUtil.XMLNS_ATTRIBUTE_KEY)) {
- prefix = "";
+ prefix = DEFAULT_NAMESPACE_PREFIX;
} else {
if (!attribKey.startsWith(XmlUtil.XMLNS_ATTRIBUTE_KEY + ":")){
throw new NetconfDocumentedException("Attribute doesn't start with :",
namespaces.put(prefix, attribute.getNodeValue());
}
}
+
+ // namespace does not have to be defined on this element but inherited
+ if(!namespaces.containsKey(DEFAULT_NAMESPACE_PREFIX)) {
+ Optional<String> namespaceOptionally = getNamespaceOptionally();
+ if(namespaceOptionally.isPresent()) {
+ namespaces.put(DEFAULT_NAMESPACE_PREFIX, namespaceOptionally.get());
+ }
+ }
+
return namespaces;
}
}
public String getName() {
- if (element.getLocalName()!=null && !element.getLocalName().equals("")){
+ if (element.getLocalName()!=null && !element.getLocalName().equals(DEFAULT_NAMESPACE_PREFIX)){
return element.getLocalName();
}
return element.getTagName();
public String getTextContent() throws NetconfDocumentedException {
NodeList childNodes = element.getChildNodes();
if (childNodes.getLength() == 0) {
- return "";
+ return DEFAULT_NAMESPACE_PREFIX;
}
for(int i = 0; i < childNodes.getLength(); i++) {
Node textChild = childNodes.item(i);
public String getNamespaceAttribute() throws MissingNameSpaceException {
String attribute = element.getAttribute(XmlUtil.XMLNS_ATTRIBUTE_KEY);
- if (attribute == null || attribute.equals("")){
+ if (attribute == null || attribute.equals(DEFAULT_NAMESPACE_PREFIX)){
throw new MissingNameSpaceException(String.format("Element %s must specify namespace",
toString()),
NetconfDocumentedException.ErrorType.application,
* is found value will be null.
*/
public Map.Entry<String/* prefix */, String/* namespace */> findNamespaceOfTextContent() throws NetconfDocumentedException {
- Map<String, String> namespaces = extractNamespaces(element);
+ Map<String, String> namespaces = extractNamespaces();
String textContent = getTextContent();
int indexOfColon = textContent.indexOf(':');
String prefix;
if (indexOfColon > -1) {
prefix = textContent.substring(0, indexOfColon);
} else {
- prefix = "";
+ prefix = DEFAULT_NAMESPACE_PREFIX;
}
if (!namespaces.containsKey(prefix)) {
throw new IllegalArgumentException("Cannot find namespace for " + XmlUtil.toString(element) + ". Prefix from content is "
*/
package org.opendaylight.controller.netconf.util;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
import org.junit.Test;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.w3c.dom.Document;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
-
public class NetconfUtilTest {
@Test
package org.opendaylight.controller.netconf.util.messages;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import com.google.common.base.Optional;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.collections.Sets;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
public class NetconfHelloMessageTest {
Set<String> caps;
@Before
- public void setUp() throws Exception {
+ public void setUp() {
caps = Sets.newSet("cap1");
}
@Test
- public void testConstructor() throws Exception {
+ public void testConstructor() throws NetconfDocumentedException {
NetconfHelloMessageAdditionalHeader additionalHeader = new NetconfHelloMessageAdditionalHeader("name","host","1","transp","id");
NetconfHelloMessage message = NetconfHelloMessage.createClientHello(caps, Optional.of(additionalHeader));
- assertTrue(message.isHelloMessage(message));
+ assertTrue(NetconfHelloMessage.isHelloMessage(message));
assertEquals(Optional.of(additionalHeader), message.getAdditionalHeader());
NetconfHelloMessage serverMessage = NetconfHelloMessage.createServerHello(caps, 100L);
- assertTrue(serverMessage.isHelloMessage(serverMessage));
+ assertTrue(NetconfHelloMessage.isHelloMessage(serverMessage));
}
}
package org.opendaylight.controller.netconf.util.messages;
-import com.google.common.base.Charsets;
-import java.util.Arrays;
-import org.junit.Test;
-
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+import com.google.common.base.Charsets;
+import org.junit.Test;
+@Deprecated
public class NetconfMessageHeaderTest {
@Test
public void testGet() throws Exception {
package org.opendaylight.controller.netconf.util.osgi;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import com.google.common.base.Optional;
import io.netty.channel.local.LocalAddress;
import java.net.InetSocketAddress;
import org.junit.Before;
import org.junit.Test;
-import org.opendaylight.controller.netconf.util.NetconfUtil;
import org.osgi.framework.BundleContext;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
public class NetconfConfigUtilTest {
private BundleContext bundleContext;
-<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" a="64" message-id="a">\r
-<start-exi xmlns="urn:ietf:params:xml:ns:netconf:exi:1.0">\r
-<alignment>pre-compression</alignment>\r
-<fidelity>\r
-<dtd/>\r
-<lexical-values/>\r
-</fidelity>\r
-</start-exi>\r
-</rpc>
\ No newline at end of file
+<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" a="64" message-id="a">
+<start-exi xmlns="urn:ietf:params:xml:ns:netconf:exi:1.0">
+<alignment>pre-compression</alignment>
+<fidelity>
+<dtd/>
+<lexical-values/>
+</fidelity>
+</start-exi>
+</rpc>
-<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">\r
- <stop-exi xmlns="urn:ietf:params:xml:ns:netconf:exi:1.0"/>\r
-</rpc>
\ No newline at end of file
+<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <stop-exi xmlns="urn:ietf:params:xml:ns:netconf:exi:1.0"/>
+</rpc>
<module>netconf-auth</module>
<module>netconf-usermanager</module>
<module>netconf-testtool</module>
+
+ <module>netconf-artifacts</module>
</modules>
<dependencies>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
- <version>2.12</version>
<configuration>
<failsOnError>false</failsOnError>
<failOnViolation>false</failOnViolation>
@QueryParam("name") String queryFirewallPolicyName,
@QueryParam("description") String querySecurityPolicyDescription,
@QueryParam("shared") String querySecurityPolicyIsShared,
- @QueryParam("firewall_rules") List querySecurityPolicyFirewallRules,
+ @QueryParam("firewall_rules") List<String> querySecurityPolicyFirewallRules,
@QueryParam("audited") Boolean querySecurityPolicyIsAudited,
// pagination
@QueryParam("limit") String limit,
package org.opendaylight.controller.networkconfig.neutron.northbound;
import java.util.List;
-
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
-
import org.opendaylight.controller.networkconfig.neutron.NeutronNetwork;
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
-public class NeutronNetworkRequest implements INeutronRequest {
+public class NeutronNetworkRequest implements INeutronRequest<NeutronNetwork> {
// See OpenStack Network API v2.0 Reference for description of
// annotated attributes
singletonNetwork = net;
}
+ @Override
public NeutronNetwork getSingleton() {
return singletonNetwork;
}
+ @Override
public boolean isSingleton() {
return (singletonNetwork != null);
}
+ @Override
public List<NeutronNetwork> getBulk() {
return bulkRequest;
}
package org.opendaylight.controller.networkconfig.neutron.northbound;
import java.util.List;
-
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
-
import org.opendaylight.controller.networkconfig.neutron.NeutronPort;
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
-public class NeutronPortRequest implements INeutronRequest {
+public class NeutronPortRequest implements INeutronRequest<NeutronPort> {
// See OpenStack Network API v2.0 Reference for description of
// annotated attributes
singletonPort = port;
}
+ @Override
public NeutronPort getSingleton() {
return singletonPort;
}
+ @Override
public boolean isSingleton() {
return (singletonPort != null);
}
+ @Override
public List<NeutronPort> getBulk() {
return bulkRequest;
}
package org.opendaylight.controller.networkconfig.neutron.northbound;
import java.util.List;
-
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
-
import org.opendaylight.controller.networkconfig.neutron.NeutronSubnet;
@XmlRootElement
@XmlAccessorType(XmlAccessType.NONE)
-
-public class NeutronSubnetRequest implements INeutronRequest {
+public class NeutronSubnetRequest implements INeutronRequest<NeutronSubnet> {
// See OpenStack Network API v2.0 Reference for description of
// annotated attributes
links = null;
}
+ @Override
public NeutronSubnet getSingleton() {
return singletonSubnet;
}
+ @Override
public List<NeutronSubnet> getBulk() {
return bulkRequest;
}
+ @Override
public boolean isSingleton() {
return (singletonSubnet != null);
}
package org.opendaylight.controller.networkconfig.neutron.northbound;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import javax.ws.rs.core.UriInfo;
import org.opendaylight.controller.networkconfig.neutron.INeutronObject;
import org.opendaylight.controller.networkconfig.neutron.NeutronNetwork;
import org.opendaylight.controller.networkconfig.neutron.NeutronPort;
import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
-import javax.ws.rs.core.UriInfo;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-
public class PaginatedRequestFactory {
+ private static final Comparator<INeutronObject> NEUTRON_OBJECT_COMPARATOR = new Comparator<INeutronObject>() {
+ @Override
+ public int compare(INeutronObject o1, INeutronObject o2) {
+ return o1.getID().compareTo(o2.getID());
+ }
+ };
public static class PaginationResults<T extends INeutronObject> {
List<T> collection;
}
}
- public static <T extends INeutronObject> INeutronRequest createRequest(Integer limit, String marker,
+ private static final class MarkerObject implements INeutronObject {
+ private final String id;
+
+ MarkerObject(String id) {
+ this.id = id;
+ }
+
+ @Override
+ public String getID() {
+ return id;
+ }
+
+ @Override
+ public void setID(String id) {
+ throw new UnsupportedOperationException("Marker has constant ID");
+ }
+ }
+
+ /*
+ * SuppressWarnings is needed because the compiler does not understand that we
+ * are actually safe here.
+ *
+ * FIXME: the only caller performs a cast back, so this is not actually necessary.
+ */
+ @SuppressWarnings("unchecked")
+ public static <T extends INeutronObject> INeutronRequest<T> createRequest(Integer limit, String marker,
Boolean pageReverse,
UriInfo uriInfo,
List<T> collection,
Class<T> clazz) {
- PaginationResults results = _paginate(limit, marker, pageReverse, uriInfo, collection);
+ PaginationResults<T> results = _paginate(limit, marker, pageReverse, uriInfo, collection);
if (clazz.equals(NeutronNetwork.class)){
- return new NeutronNetworkRequest(results.collection, results.links);
+ return (INeutronRequest<T>) new NeutronNetworkRequest((List<NeutronNetwork>) results.collection, results.links);
}
if (clazz.equals(NeutronSubnet.class)){
- return new NeutronSubnetRequest(results.collection, results.links);
+ return (INeutronRequest<T>) new NeutronSubnetRequest((List<NeutronSubnet>) results.collection, results.links);
}
if (clazz.equals(NeutronPort.class)){
- return new NeutronPortRequest(results.collection, results.links);
+ return (INeutronRequest<T>) new NeutronPortRequest((List<NeutronPort>) results.collection, results.links);
}
return null;
}
- private static <T extends INeutronObject> PaginationResults _paginate(Integer limit, String marker, Boolean pageReverse, UriInfo uriInfo, List<T> collection) {
+ private static <T extends INeutronObject> PaginationResults<T> _paginate(Integer limit, String marker, Boolean pageReverse, UriInfo uriInfo, List<T> collection) {
List<NeutronPageLink> links = new ArrayList<>();
- Integer startPos = null;
+ final int startPos;
String startMarker;
String endMarker;
Boolean firstPage = false;
Boolean lastPage = false;
- Comparator<INeutronObject> neutronObjectComparator = new Comparator<INeutronObject>() {
- @Override
- public int compare(INeutronObject o1, INeutronObject o2) {
- return o1.getID().compareTo(o2.getID());
- }
- };
-
- Collections.sort(collection, neutronObjectComparator);
-
- if (marker == null) {
- startPos = 0;
- }
-
- else {
-
- class MarkerObject implements INeutronObject {
- private String id;
-
- public String getID() {
- return id;
- }
+ Collections.sort(collection, NEUTRON_OBJECT_COMPARATOR);
- public void setID(String id) {
- this.id = id;
- }
+ if (marker != null) {
+ int offset = Collections.binarySearch(collection, new MarkerObject(marker), NEUTRON_OBJECT_COMPARATOR);
+ if (offset < 0) {
+ throw new ResourceNotFoundException("UUID for marker: " + marker + " could not be found");
}
- INeutronObject markerObject = new MarkerObject();
-
- markerObject.setID(marker);
-
- startPos = Collections.binarySearch(collection, markerObject, neutronObjectComparator);
-
- if (!pageReverse){
- startPos = startPos + 1;
+ if (!pageReverse) {
+ startPos = offset + 1;
}
else {
- startPos = startPos - limit;
+ startPos = offset - limit;
}
-
}
-
- if (startPos == null) {
- throw new ResourceNotFoundException("UUID for marker:" + marker + " could not be found");
+ else {
+ startPos = 0;
}
if (startPos == 0){
links.add(previous);
}
- return new PaginationResults(collection, links);
+ return new PaginationResults<T>(collection, links);
}
}
package org.opendaylight.controller.networkconfig.neutron;
import java.io.Serializable;
+import java.net.InetAddress;
+import java.net.Inet6Address;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
* a new subnet)
*/
public boolean isValidCIDR() {
- try {
- SubnetUtils util = new SubnetUtils(cidr);
- SubnetInfo info = util.getInfo();
- if (!info.getNetworkAddress().equals(info.getAddress())) {
+ // fix for Bug 2290 - need to wrap the existing test as
+ // IPv4 because SubnetUtils doesn't support IPv6
+ if (ipVersion == 4) {
+ try {
+ SubnetUtils util = new SubnetUtils(cidr);
+ SubnetInfo info = util.getInfo();
+ if (!info.getNetworkAddress().equals(info.getAddress())) {
+ return false;
+ }
+ } catch (Exception e) {
return false;
}
- } catch (Exception e) {
- return false;
+ return true;
}
- return true;
+ if (ipVersion == 6) {
+ // fix for Bug2290 - this is custom code because no classes
+ // with ODL-friendly licenses have been found
+ // extract address (in front of /) and length (after /)
+ String[] parts = cidr.split("/");
+ if (parts.length != 2) {
+ return false;
+ }
+ try {
+ int length = Integer.parseInt(parts[1]);
+ //TODO?: limit check on length
+ // convert to byte array
+ byte[] addrBytes = ((Inet6Address) InetAddress.getByName(parts[0])).getAddress();
+ int i;
+ for (i=length; i<128; i++) { // offset is to ensure proper comparison
+ if (((((int) addrBytes[i/8]) & 0x000000FF) & (1 << (7-(i%8)))) != 0) {
+ return(false);
+ }
+ }
+ return(true);
+ } catch (Exception e) {
+ return(false);
+ }
+ }
+ return false;
}
/* test to see if the gateway IP specified overlaps with specified
<maven>3.0</maven>
</prerequisites>
<modules>
- <module>opendaylight/distribution/opendaylight</module>
- <module>opendaylight/forwarding/staticrouting</module>
- <module>opendaylight/clustering/services</module>
- <module>opendaylight/clustering/services_implementation</module>
- <module>opendaylight/clustering/stub</module>
- <module>opendaylight/clustering/test</module>
- <module>opendaylight/configuration/api</module>
- <module>opendaylight/configuration/implementation</module>
- <module>opendaylight/routing/dijkstra_implementation</module>
- <module>opendaylight/arphandler</module>
- <module>opendaylight/forwardingrulesmanager/api</module>
- <module>opendaylight/forwardingrulesmanager/implementation</module>
- <module>opendaylight/hosttracker/api</module>
- <module>opendaylight/hosttracker/implementation</module>
- <module>opendaylight/hosttracker/shell</module>
- <module>opendaylight/hosttracker_new/api</module>
- <module>opendaylight/hosttracker_new/implementation</module>
- <module>opendaylight/containermanager/api</module>
- <module>opendaylight/containermanager/implementation</module>
- <module>opendaylight/containermanager/shell</module>
- <module>opendaylight/appauth</module>
- <module>opendaylight/switchmanager/api</module>
- <module>opendaylight/switchmanager/implementation</module>
- <module>opendaylight/statisticsmanager/api</module>
- <module>opendaylight/statisticsmanager/implementation</module>
- <module>opendaylight/topologymanager/implementation</module>
- <module>opendaylight/topologymanager/shell</module>
- <module>opendaylight/usermanager/api</module>
- <module>opendaylight/usermanager/implementation</module>
- <module>opendaylight/connectionmanager/api</module>
- <module>opendaylight/connectionmanager/implementation</module>
- <module>opendaylight/security</module>
- <module>opendaylight/karaf-tomcat-security</module>
-
- <!-- third-parties uncomment them if you need snapshot version of it -->
- <!-- <module>third-party/openflowj</module> -->
- <!-- <module>third-party/net.sf.jung2</module> -->
- <!-- <module>third-party/jersey-servlet</module> -->
- <!-- <module>third-party/org.apache.catalina.filters.CorsFilter</module> -->
- <module>third-party/ganymed</module>
-
- <module>third-party/commons/thirdparty</module>
-
- <!-- SAL bundles -->
- <module>opendaylight/sal/api</module>
- <module>opendaylight/sal/implementation</module>
-
- <!-- SAL Extension bundles -->
- <module>opendaylight/sal/connection/api</module>
- <module>opendaylight/sal/connection/implementation</module>
- <module>opendaylight/sal/networkconfiguration/api</module>
- <module>opendaylight/sal/networkconfiguration/implementation</module>
<!-- md-sal -->
<module>opendaylight/md-sal</module>
<!-- netconf -->
<module>opendaylight/netconf</module>
- <!-- Web bundles -->
- <module>opendaylight/web/root</module>
- <module>opendaylight/web/flows</module>
- <module>opendaylight/web/devices</module>
- <module>opendaylight/web/troubleshoot</module>
- <module>opendaylight/web/topology</module>
- <module>opendaylight/web/osgi-brandfragment</module>
+ <!-- adsal -->
+ <module>opendaylight/adsal</module>
<!-- Neutron -->
<module>opendaylight/networkconfiguration/neutron</module>
<module>opendaylight/networkconfiguration/neutron/implementation</module>
- <module>opendaylight/northbound/networkconfiguration/neutron</module>
-
- <!-- Northbound bundles -->
- <module>opendaylight/northbound/commons</module>
- <module>opendaylight/northbound/bundlescanner/api</module>
- <module>opendaylight/northbound/bundlescanner/implementation</module>
- <module>opendaylight/northbound/topology</module>
- <module>opendaylight/northbound/staticrouting</module>
- <module>opendaylight/northbound/statistics</module>
- <module>opendaylight/northbound/flowprogrammer</module>
- <module>opendaylight/northbound/hosttracker</module>
- <module>opendaylight/northbound/subnets</module>
- <module>opendaylight/northbound/switchmanager</module>
- <module>opendaylight/northbound/containermanager</module>
- <module>opendaylight/northbound/networkconfiguration/bridgedomain</module>
- <module>opendaylight/northbound/httpservice-bridge</module>
- <module>opendaylight/northbound/jolokia</module>
- <module>opendaylight/northbound/connectionmanager</module>
- <module>opendaylight/northbound/usermanager</module>
- <module>opendaylight/northbound/controllermanager</module>
-
- <!-- Debug and logging -->
- <module>opendaylight/logging/bridge</module>
-
- <!-- Southbound bundles -->
- <module>opendaylight/protocol_plugins/openflow</module>
- <module>opendaylight/protocol_plugins/stub</module>
-
- <!-- Samples -->
- <module>opendaylight/samples/simpleforwarding</module>
- <module>opendaylight/samples/loadbalancer</module>
- <module>opendaylight/samples/northbound/loadbalancer</module>
+ <module>opendaylight/networkconfiguration/neutron/northbound</module>
<!-- Parents -->
<module>opendaylight/commons/concepts</module>
<module>opendaylight/commons/protocol-framework</module>
- <module>opendaylight/commons/httpclient</module>
<module>opendaylight/commons/checkstyle</module>
<module>opendaylight/commons/opendaylight</module>
<module>opendaylight/commons/parent</module>
<module>opendaylight/commons/liblldp</module>
<!-- Karaf Distribution -->
- <module>opendaylight/dummy-console</module>
<module>opendaylight/karaf-branding</module>
<module>opendaylight/distribution/opendaylight-karaf-empty</module>
<module>opendaylight/distribution/opendaylight-karaf</module>
<tag>HEAD</tag>
<url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
</scm>
-
- <profiles>
- <profile>
- <id>integrationtests</id>
- <activation>
- <activeByDefault>false</activeByDefault>
- </activation>
- <modules>
- <module>opendaylight/clustering/integrationtest</module>
- <module>opendaylight/configuration/integrationtest</module>
- <module>opendaylight/forwardingrulesmanager/integrationtest</module>
- <module>opendaylight/hosttracker/integrationtest</module>
- <module>opendaylight/switchmanager/integrationtest</module>
- <module>opendaylight/topologymanager/integrationtest</module>
- <!-- Northbound integration tests -->
- <module>opendaylight/northbound/integrationtest</module>
- <module>opendaylight/statisticsmanager/integrationtest</module>
- <module>opendaylight/commons/integrationtest</module>
- <module>opendaylight/containermanager/it.implementation</module>
- <module>opendaylight/distribution/sanitytest/</module>
- </modules>
- </profile>
- <profile>
- <id>docs</id>
- <activation>
- <activeByDefault>false</activeByDefault>
- </activation>
- <modules>
- <module>opendaylight/northbound/java-client</module>
- <module>opendaylight/northbound/swagger-ui</module>
- </modules>
- </profile>
- </profiles>
</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <prerequisites>
- <maven>3.0</maven>
- </prerequisites>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <packaging>pom</packaging>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
-
- <properties>
- <sonar.host.url>https://sonar.opendaylight.org/</sonar.host.url>
- <nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
- <nexus.repository.release>opendaylight.release</nexus.repository.release>
- <nexus.repository.snapshot>opendaylight.snapshot</nexus.repository.snapshot>
- <sitedeploy>dav:http://nexus.opendaylight.org/content/sites/site</sitedeploy>
- <siteplugin>3.2</siteplugin>
- <projectinfo>2.6</projectinfo>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <compiler.version>2.3.2</compiler.version>
- <surefire.version>2.13</surefire.version>
- <releaseplugin.version>2.3.2</releaseplugin.version>
- <enforcer.version>1.3.1</enforcer.version>
- <bundle.plugin.version>2.3.7</bundle.plugin.version>
- </properties>
-
- <pluginRepositories>
- <pluginRepository>
- <id>central2</id>
- <name>central2</name>
- <url>http://repo2.maven.org/maven2</url>
- </pluginRepository>
- </pluginRepositories>
-
- <profiles>
- <profile>
- <id>fastreassembly</id>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-dependency-plugin</artifactId>
- <version>2.4</version>
- <executions>
- <execution>
- <id>copyfastreassembly</id>
- <phase>install</phase>
- <goals>
- <goal>copy</goal>
- </goals>
- <configuration>
- <artifactItems>
- <artifactItem>
- <groupId>${project.groupId}</groupId>
- <artifactId>${project.artifactId}</artifactId>
- <version>${project.version}</version>
- <destFileName>${project.groupId}.${project.artifactId}-${project.version}.jar</destFileName>
- </artifactItem>
- </artifactItems>
- <outputDirectory>${fastreassembly.directory}</outputDirectory>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </profile>
- </profiles>
-
- <build>
- <plugins>
- <plugin>
- <groupId>com.googlecode.maven-java-formatter-plugin</groupId>
- <artifactId>maven-java-formatter-plugin</artifactId>
- <version>0.3.1</version>
- <configuration>
- <excludes>
- <exclude>**/*</exclude>
- </excludes>
- </configuration>
- </plugin>
- </plugins>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-release-plugin</artifactId>
- <version>${releaseplugin.version}</version>
- </plugin>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
- <extensions>true</extensions>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-site-plugin</artifactId>
- <version>${siteplugin}</version>
- <configuration>
- <reportPlugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-project-info-reports-plugin</artifactId>
- <version>${projectinfo}</version>
- <configuration>
- <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
- <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
- </configuration>
- <reports>
- <report>index</report>
- <report>project-team</report>
- <report>license</report>
- <report>mailing-list</report>
- <report>plugin-management</report>
- <report>cim</report>
- <report>issue-tracking</report>
- <report>scm</report>
- <report>summary</report>
- </reports>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- <version>2.10</version>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-javadoc-plugin</artifactId>
- <version>2.8.1</version>
- <configuration>
- <doclet>org.jboss.apiviz.APIviz</doclet>
- <docletArtifact>
- <groupId>org.jboss.apiviz</groupId>
- <artifactId>apiviz</artifactId>
- <version>1.3.2.GA</version>
- </docletArtifact>
- <finalName>${project.artifactId}-${build.suffix}</finalName>
- <useStandardDocletOptions>true</useStandardDocletOptions>
- <charset>UTF-8</charset>
- <encoding>UTF-8</encoding>
- <docencoding>UTF-8</docencoding>
- <breakiterator>true</breakiterator>
- <version>true</version>
- <author>true</author>
- <keywords>true</keywords>
- <excludePackageNames>net.sf.jnetlib.*:cern.*:corejava</excludePackageNames>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-jxr-plugin</artifactId>
- <version>2.3</version>
- <configuration>
- <aggregate>true</aggregate>
- <linkJavadoc>true</linkJavadoc>
- </configuration>
- </plugin>
- </reportPlugins>
- </configuration>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
-
- <repositories>
- <repository>
- <id>central2</id>
- <name>central2</name>
- <url>http://repo2.maven.org/maven2</url>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- <releases>
- <updatePolicy>never</updatePolicy>
- <enabled>true</enabled>
- </releases>
- </repository>
- <repository>
- <id>central</id>
- <name>central</name>
- <url>http://repo1.maven.org/maven2</url>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- <releases>
- <updatePolicy>never</updatePolicy>
- <enabled>true</enabled>
- </releases>
- </repository>
- <!-- Third Packages hosted in local maven because not available in
- other places -->
- <repository>
- <id>thirdparty</id>
- <name>thirdparty</name>
- <url>${nexusproxy}/repositories/thirdparty</url>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- <releases>
- <updatePolicy>never</updatePolicy>
- <enabled>true</enabled>
- </releases>
- </repository>
- </repositories>
- <distributionManagement>
- <!-- OpenDayLight Released artifact -->
- <repository>
- <id>opendaylight-release</id>
- <url>${nexusproxy}/repositories/${nexus.repository.release}/</url>
- </repository>
- <!-- OpenDayLight Snapshot artifact -->
- <snapshotRepository>
- <id>opendaylight-snapshot</id>
- <url>${nexusproxy}/repositories/${nexus.repository.snapshot}/</url>
- </snapshotRepository>
- <!-- Site deployment -->
- <site>
- <id>website</id>
- <url>${sitedeploy}</url>
- </site>
- </distributionManagement>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../commons/thirdparty</relativePath>
- </parent>
-
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- <version>5.0.0</version>
- </dependency>
- <dependency>
- <groupId>ch.ethz.ganymed</groupId>
- <artifactId>ganymed-ssh2</artifactId>
- <version>261</version>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>ch.ethz.ssh2.*</Export-Package>
- <Embed-Dependency>ganymed-ssh2;scope=compile</Embed-Dependency>
- <Embed-Transitive>true</Embed-Transitive>
- </instructions>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-enforcer-plugin</artifactId>
- <version>${enforcer.version}</version>
- <executions>
- <execution>
- <id>enforce-no-snapshots</id>
- <goals>
- <goal>enforce</goal>
- </goals>
- <configuration>
- <rules>
- <bannedDependencies>
- <excludes>
- <exclude>ch.ethz.ganymed:ganymed-ssh2:*</exclude>
- </excludes>
- <includes>
- <include>ch.ethz.ganymed:ganymed-ssh2:[261]</include>
- </includes>
- </bannedDependencies>
- </rules>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
-
-
+++ /dev/null
-/*
- * Copyright (c) 2006-2011 Christian Plattner. All rights reserved.
- * Please refer to the LICENSE.txt for licensing details.
- */
-
-package ch.ethz.ssh2;
-
-import java.io.CharArrayWriter;
-import java.io.File;
-import java.net.Socket;
-import java.io.FileReader;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.security.SecureRandom;
-import java.util.List;
-import java.util.Vector;
-
-import ch.ethz.ssh2.auth.AuthenticationManager;
-import ch.ethz.ssh2.channel.ChannelManager;
-import ch.ethz.ssh2.crypto.CryptoWishList;
-import ch.ethz.ssh2.crypto.cipher.BlockCipherFactory;
-import ch.ethz.ssh2.crypto.digest.MAC;
-import ch.ethz.ssh2.packets.PacketIgnore;
-import ch.ethz.ssh2.transport.KexManager;
-import ch.ethz.ssh2.transport.TransportManager;
-import ch.ethz.ssh2.util.TimeoutService;
-import ch.ethz.ssh2.util.TimeoutService.TimeoutToken;
-
-/**
- * A <code>Connection</code> is used to establish an encrypted TCP/IP
- * connection to a SSH-2 server.
- * <p>
- * Typically, one
- * <ol>
- * <li>creates a {@link #Connection(String) Connection} object.</li>
- * <li>calls the {@link #connect() connect()} method.</li>
- * <li>calls some of the authentication methods (e.g., {@link #authenticateWithPublicKey(String, File, String) authenticateWithPublicKey()}).</li>
- * <li>calls one or several times the {@link #openSession() openSession()} method.</li>
- * <li>finally, one must close the connection and release resources with the {@link #close() close()} method.</li>
- * </ol>
- *
- * @author Christian Plattner
- * @version $Id: Connection.java 69 2013-08-09 06:39:56Z dkocher@sudo.ch $
- */
-
-public class Connection
-{
- /**
- * The identifier presented to the SSH-2 server. This is the same
- * as the "softwareversion" defined in RFC 4253.
- * <p/>
- * <b>NOTE: As per the RFC, the "softwareversion" string MUST consist of printable
- * US-ASCII characters, with the exception of whitespace characters and the minus sign (-).</b>
- */
- private String softwareversion = String.format("Ganymed_%s", Version.getSpecification());
-
- /* Will be used to generate all random data needed for the current connection.
- * Note: SecureRandom.nextBytes() is thread safe.
- */
-
- private SecureRandom generator;
-
- private Socket precreatedSocket;
-
- public Connection(Socket socket) {
- this.precreatedSocket = socket;
- this.hostname = socket.getInetAddress().getHostName();
- this.port = socket.getPort();
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @return The list of supported cipher algorithms by this implementation.
- */
- public static synchronized String[] getAvailableCiphers()
- {
- return BlockCipherFactory.getDefaultCipherList();
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @return The list of supported MAC algorthims by this implementation.
- */
- public static synchronized String[] getAvailableMACs()
- {
- return MAC.getMacList();
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @return The list of supported server host key algorthims by this implementation.
- */
- public static synchronized String[] getAvailableServerHostKeyAlgorithms()
- {
- return KexManager.getDefaultServerHostkeyAlgorithmList();
- }
-
- private AuthenticationManager am;
-
- private boolean authenticated = false;
- private ChannelManager cm;
-
- private CryptoWishList cryptoWishList = new CryptoWishList();
-
- private DHGexParameters dhgexpara = new DHGexParameters();
-
- private final String hostname;
-
- private final int port;
-
- private TransportManager tm;
-
- private boolean tcpNoDelay = false;
-
- private ProxyData proxyData = null;
-
- private List<ConnectionMonitor> connectionMonitors = new Vector<ConnectionMonitor>();
-
- /**
- * Prepares a fresh <code>Connection</code> object which can then be used
- * to establish a connection to the specified SSH-2 server.
- * <p>
- * Same as {@link #Connection(String, int) Connection(hostname, 22)}.
- *
- * @param hostname the hostname of the SSH-2 server.
- */
- public Connection(String hostname)
- {
- this(hostname, 22);
- }
-
- /**
- * Prepares a fresh <code>Connection</code> object which can then be used
- * to establish a connection to the specified SSH-2 server.
- *
- * @param hostname
- * the host where we later want to connect to.
- * @param port
- * port on the server, normally 22.
- */
- public Connection(String hostname, int port)
- {
- this.hostname = hostname;
- this.port = port;
- }
-
- /**
- * Prepares a fresh <code>Connection</code> object which can then be used
- * to establish a connection to the specified SSH-2 server.
- *
- * @param hostname
- * the host where we later want to connect to.
- * @param port
- * port on the server, normally 22.
- * @param softwareversion
- * Allows you to set a custom "softwareversion" string as defined in RFC 4253.
- * <b>NOTE: As per the RFC, the "softwareversion" string MUST consist of printable
- * US-ASCII characters, with the exception of whitespace characters and the minus sign (-).</b>
- */
- public Connection(String hostname, int port, String softwareversion)
- {
- this.hostname = hostname;
- this.port = port;
- this.softwareversion = softwareversion;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * is based on DSA (it uses DSA to sign a challenge sent by the server).
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param pem
- * A <code>String</code> containing the DSA private key of the
- * user in OpenSSH key format (PEM, you can't miss the
- * "-----BEGIN DSA PRIVATE KEY-----" tag). The string may contain
- * linefeeds.
- * @param password
- * If the PEM string is 3DES encrypted ("DES-EDE3-CBC"), then you
- * must specify the password. Otherwise, this argument will be
- * ignored and can be set to <code>null</code>.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- *
- * @deprecated You should use one of the {@link #authenticateWithPublicKey(String, File, String) authenticateWithPublicKey()}
- * methods, this method is just a wrapper for it and will
- * disappear in future builds.
- *
- */
- public synchronized boolean authenticateWithDSA(String user, String pem, String password) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- if (pem == null)
- throw new IllegalArgumentException("pem argument is null");
-
- authenticated = am.authenticatePublicKey(user, pem.toCharArray(), password, getOrCreateSecureRND());
-
- return authenticated;
- }
-
- /**
- * A wrapper that calls {@link #authenticateWithKeyboardInteractive(String, String[], InteractiveCallback)
- * authenticateWithKeyboardInteractivewith} a <code>null</code> submethod list.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param cb
- * An <code>InteractiveCallback</code> which will be used to
- * determine the responses to the questions asked by the server.
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithKeyboardInteractive(String user, InteractiveCallback cb)
- throws IOException
- {
- return authenticateWithKeyboardInteractive(user, null, cb);
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * is based on "keyboard-interactive", specified in
- * draft-ietf-secsh-auth-kbdinteract-XX. Basically, you have to define a
- * callback object which will be feeded with challenges generated by the
- * server. Answers are then sent back to the server. It is possible that the
- * callback will be called several times during the invocation of this
- * method (e.g., if the server replies to the callback's answer(s) with
- * another challenge...)
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- * <p>
- * Note: some SSH servers advertise "keyboard-interactive", however, any
- * interactive request will be denied (without having sent any challenge to
- * the client).
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param submethods
- * An array of submethod names, see
- * draft-ietf-secsh-auth-kbdinteract-XX. May be <code>null</code>
- * to indicate an empty list.
- * @param cb
- * An <code>InteractiveCallback</code> which will be used to
- * determine the responses to the questions asked by the server.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithKeyboardInteractive(String user, String[] submethods,
- InteractiveCallback cb) throws IOException
- {
- if (cb == null)
- throw new IllegalArgumentException("Callback may not ne NULL!");
-
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- authenticated = am.authenticateInteractive(user, submethods, cb);
-
- return authenticated;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * sends username and password to the server.
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- * <p>
- * Note: if this method fails, then please double-check that it is actually
- * offered by the server (use {@link #getRemainingAuthMethods(String) getRemainingAuthMethods()}.
- * <p>
- * Often, password authentication is disabled, but users are not aware of it.
- * Many servers only offer "publickey" and "keyboard-interactive". However,
- * even though "keyboard-interactive" *feels* like password authentication
- * (e.g., when using the putty or openssh clients) it is *not* the same mechanism.
- *
- * @param user
- * @param password
- * @return if the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithPassword(String user, String password) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- if (password == null)
- throw new IllegalArgumentException("password argument is null");
-
- authenticated = am.authenticatePassword(user, password);
-
- return authenticated;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself.
- * This method can be used to explicitly use the special "none"
- * authentication method (where only a username has to be specified).
- * <p>
- * Note 1: The "none" method may always be tried by clients, however as by
- * the specs, the server will not explicitly announce it. In other words,
- * the "none" token will never show up in the list returned by
- * {@link #getRemainingAuthMethods(String)}.
- * <p>
- * Note 2: no matter which one of the authenticateWithXXX() methods
- * you call, the library will always issue exactly one initial "none"
- * authentication request to retrieve the initially allowed list of
- * authentication methods by the server. Please read RFC 4252 for the
- * details.
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If further authentication steps are needed, <code>false</code>
- * is returned and one can retry by any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- *
- * @param user
- * @return if the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithNone(String user) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- /* Trigger the sending of the PacketUserauthRequestNone packet */
- /* (if not already done) */
-
- authenticated = am.authenticateNone(user);
-
- return authenticated;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself.
- * The authentication method "publickey" works by signing a challenge
- * sent by the server. The signature is either DSA or RSA based - it
- * just depends on the type of private key you specify, either a DSA
- * or RSA private key in PEM format. And yes, this is may seem to be a
- * little confusing, the method is called "publickey" in the SSH-2 protocol
- * specification, however since we need to generate a signature, you
- * actually have to supply a private key =).
- * <p>
- * The private key contained in the PEM file may also be encrypted ("Proc-Type: 4,ENCRYPTED").
- * The library supports DES-CBC and DES-EDE3-CBC encryption, as well
- * as the more exotic PEM encrpytions AES-128-CBC, AES-192-CBC and AES-256-CBC.
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- * <p>
- * NOTE PUTTY USERS: Event though your key file may start with "-----BEGIN..."
- * it is not in the expected format. You have to convert it to the OpenSSH
- * key format by using the "puttygen" tool (can be downloaded from the Putty
- * website). Simply load your key and then use the "Conversions/Export OpenSSH key"
- * functionality to get a proper PEM file.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param pemPrivateKey
- * A <code>char[]</code> containing a DSA or RSA private key of the
- * user in OpenSSH key format (PEM, you can't miss the
- * "-----BEGIN DSA PRIVATE KEY-----" or "-----BEGIN RSA PRIVATE KEY-----"
- * tag). The char array may contain linebreaks/linefeeds.
- * @param password
- * If the PEM structure is encrypted ("Proc-Type: 4,ENCRYPTED") then
- * you must specify a password. Otherwise, this argument will be ignored
- * and can be set to <code>null</code>.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithPublicKey(String user, char[] pemPrivateKey, String password)
- throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- if (pemPrivateKey == null)
- throw new IllegalArgumentException("pemPrivateKey argument is null");
-
- authenticated = am.authenticatePublicKey(user, pemPrivateKey, password, getOrCreateSecureRND());
-
- return authenticated;
- }
-
- /**
- * A convenience wrapper function which reads in a private key (PEM format, either DSA or RSA)
- * and then calls <code>authenticateWithPublicKey(String, char[], String)</code>.
- * <p>
- * NOTE PUTTY USERS: Event though your key file may start with "-----BEGIN..."
- * it is not in the expected format. You have to convert it to the OpenSSH
- * key format by using the "puttygen" tool (can be downloaded from the Putty
- * website). Simply load your key and then use the "Conversions/Export OpenSSH key"
- * functionality to get a proper PEM file.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param pemFile
- * A <code>File</code> object pointing to a file containing a DSA or RSA
- * private key of the user in OpenSSH key format (PEM, you can't miss the
- * "-----BEGIN DSA PRIVATE KEY-----" or "-----BEGIN RSA PRIVATE KEY-----"
- * tag).
- * @param password
- * If the PEM file is encrypted then you must specify the password.
- * Otherwise, this argument will be ignored and can be set to <code>null</code>.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithPublicKey(String user, File pemFile, String password)
- throws IOException
- {
- if (pemFile == null)
- throw new IllegalArgumentException("pemFile argument is null");
-
- char[] buff = new char[256];
-
- CharArrayWriter cw = new CharArrayWriter();
-
- FileReader fr = new FileReader(pemFile);
-
- while (true)
- {
- int len = fr.read(buff);
- if (len < 0)
- break;
- cw.write(buff, 0, len);
- }
-
- fr.close();
-
- return authenticateWithPublicKey(user, cw.toCharArray(), password);
- }
-
- /**
- * Add a {@link ConnectionMonitor} to this connection. Can be invoked at any time,
- * but it is best to add connection monitors before invoking
- * <code>connect()</code> to avoid glitches (e.g., you add a connection monitor after
- * a successful connect(), but the connection has died in the mean time. Then,
- * your connection monitor won't be notified.)
- * <p>
- * You can add as many monitors as you like. If a monitor has already been added, then
- * this method does nothing.
- *
- * @see ConnectionMonitor
- *
- * @param cmon An object implementing the {@link ConnectionMonitor} interface.
- */
- public synchronized void addConnectionMonitor(ConnectionMonitor cmon)
- {
- if (cmon == null)
- throw new IllegalArgumentException("cmon argument is null");
-
- if (!connectionMonitors.contains(cmon))
- {
- connectionMonitors.add(cmon);
-
- if (tm != null)
- tm.setConnectionMonitors(connectionMonitors);
- }
- }
-
- /**
- * Remove a {@link ConnectionMonitor} from this connection.
- *
- * @param cmon
- * @return whether the monitor could be removed
- */
- public synchronized boolean removeConnectionMonitor(ConnectionMonitor cmon)
- {
- if (cmon == null)
- throw new IllegalArgumentException("cmon argument is null");
-
- boolean existed = connectionMonitors.remove(cmon);
-
- if (tm != null)
- tm.setConnectionMonitors(connectionMonitors);
-
- return existed;
- }
-
- /**
- * Close the connection to the SSH-2 server. All assigned sessions will be
- * closed, too. Can be called at any time. Don't forget to call this once
- * you don't need a connection anymore - otherwise the receiver thread may
- * run forever.
- */
- public synchronized void close()
- {
- Throwable t = new Throwable("Closed due to user request.");
- close(t, false);
- }
-
- public synchronized void close(Throwable t, boolean hard)
- {
- if (cm != null)
- cm.closeAllChannels();
-
- if (tm != null)
- {
- tm.close(t, hard == false);
- tm = null;
- }
- am = null;
- cm = null;
- authenticated = false;
- }
-
- /**
- * Same as {@link #connect(ServerHostKeyVerifier, int, int) connect(null, 0, 0)}.
- *
- * @return see comments for the {@link #connect(ServerHostKeyVerifier, int, int) connect(ServerHostKeyVerifier, int, int)} method.
- * @throws IOException
- */
- public synchronized ConnectionInfo connect() throws IOException
- {
- return connect(null, 0, 0);
- }
-
- /**
- * Same as {@link #connect(ServerHostKeyVerifier, int, int) connect(verifier, 0, 0)}.
- *
- * @return see comments for the {@link #connect(ServerHostKeyVerifier, int, int) connect(ServerHostKeyVerifier, int, int)} method.
- * @throws IOException
- */
- public synchronized ConnectionInfo connect(ServerHostKeyVerifier verifier) throws IOException
- {
- return connect(verifier, 0, 0);
- }
-
- /**
- * Connect to the SSH-2 server and, as soon as the server has presented its
- * host key, use the {@link ServerHostKeyVerifier#verifyServerHostKey(String,
- * int, String, byte[]) ServerHostKeyVerifier.verifyServerHostKey()}
- * method of the <code>verifier</code> to ask for permission to proceed.
- * If <code>verifier</code> is <code>null</code>, then any host key will be
- * accepted - this is NOT recommended, since it makes man-in-the-middle attackes
- * VERY easy (somebody could put a proxy SSH server between you and the real server).
- * <p>
- * Note: The verifier will be called before doing any crypto calculations
- * (i.e., diffie-hellman). Therefore, if you don't like the presented host key then
- * no CPU cycles are wasted (and the evil server has less information about us).
- * <p>
- * However, it is still possible that the server presented a fake host key: the server
- * cheated (typically a sign for a man-in-the-middle attack) and is not able to generate
- * a signature that matches its host key. Don't worry, the library will detect such
- * a scenario later when checking the signature (the signature cannot be checked before
- * having completed the diffie-hellman exchange).
- * <p>
- * Note 2: The {@link ServerHostKeyVerifier#verifyServerHostKey(String,
- * int, String, byte[]) ServerHostKeyVerifier.verifyServerHostKey()} method
- * will *NOT* be called from the current thread, the call is being made from a
- * background thread (there is a background dispatcher thread for every
- * established connection).
- * <p>
- * Note 3: This method will block as long as the key exchange of the underlying connection
- * has not been completed (and you have not specified any timeouts).
- * <p>
- * Note 4: If you want to re-use a connection object that was successfully connected,
- * then you must call the {@link #close()} method before invoking <code>connect()</code> again.
- *
- * @param verifier
- * An object that implements the
- * {@link ServerHostKeyVerifier} interface. Pass <code>null</code>
- * to accept any server host key - NOT recommended.
- *
- * @param connectTimeout
- * Connect the underlying TCP socket to the server with the given timeout
- * value (non-negative, in milliseconds). Zero means no timeout. If a proxy is being
- * used (see {@link #setProxyData(ProxyData)}), then this timeout is used for the
- * connection establishment to the proxy.
- *
- * @param kexTimeout
- * Timeout for complete connection establishment (non-negative,
- * in milliseconds). Zero means no timeout. The timeout counts from the
- * moment you invoke the connect() method and is cancelled as soon as the
- * first key-exchange round has finished. It is possible that
- * the timeout event will be fired during the invocation of the
- * <code>verifier</code> callback, but it will only have an effect after
- * the <code>verifier</code> returns.
- *
- * @return A {@link ConnectionInfo} object containing the details of
- * the established connection.
- *
- * @throws IOException
- * If any problem occurs, e.g., the server's host key is not
- * accepted by the <code>verifier</code> or there is problem during
- * the initial crypto setup (e.g., the signature sent by the server is wrong).
- * <p>
- * In case of a timeout (either connectTimeout or kexTimeout)
- * a SocketTimeoutException is thrown.
- * <p>
- * An exception may also be thrown if the connection was already successfully
- * connected (no matter if the connection broke in the mean time) and you invoke
- * <code>connect()</code> again without having called {@link #close()} first.
- * <p>
- * If a HTTP proxy is being used and the proxy refuses the connection,
- * then a {@link HTTPProxyException} may be thrown, which
- * contains the details returned by the proxy. If the proxy is buggy and does
- * not return a proper HTTP response, then a normal IOException is thrown instead.
- */
- public synchronized ConnectionInfo connect(ServerHostKeyVerifier verifier, int connectTimeout, int kexTimeout)
- throws IOException
- {
- final class TimeoutState
- {
- boolean isCancelled = false;
- boolean timeoutSocketClosed = false;
- }
-
- if (tm != null)
- throw new IOException("Connection to " + hostname + " is already in connected state!");
-
- if (connectTimeout < 0)
- throw new IllegalArgumentException("connectTimeout must be non-negative!");
-
- if (kexTimeout < 0)
- throw new IllegalArgumentException("kexTimeout must be non-negative!");
-
- final TimeoutState state = new TimeoutState();
-
- tm = new TransportManager();
- tm.setSoTimeout(connectTimeout);
- tm.setConnectionMonitors(connectionMonitors);
-
- /* Make sure that the runnable below will observe the new value of "tm"
- * and "state" (the runnable will be executed in a different thread, which
- * may be already running, that is why we need a memory barrier here).
- * See also the comment in Channel.java if you
- * are interested in the details.
- *
- * OKOK, this is paranoid since adding the runnable to the todo list
- * of the TimeoutService will ensure that all writes have been flushed
- * before the Runnable reads anything
- * (there is a synchronized block in TimeoutService.addTimeoutHandler).
- */
-
- synchronized (tm)
- {
- /* We could actually synchronize on anything. */
- }
-
- try
- {
- TimeoutToken token = null;
-
- if (kexTimeout > 0)
- {
- final Runnable timeoutHandler = new Runnable()
- {
- public void run()
- {
- synchronized (state)
- {
- if (state.isCancelled)
- return;
- state.timeoutSocketClosed = true;
- tm.close(new SocketTimeoutException("The connect timeout expired"), false);
- }
- }
- };
-
- long timeoutHorizont = System.currentTimeMillis() + kexTimeout;
-
- token = TimeoutService.addTimeoutHandler(timeoutHorizont, timeoutHandler);
- }
-
- try
- {
-
- if (precreatedSocket != null) {
- tm.clientInit(precreatedSocket, softwareversion, cryptoWishList, verifier, dhgexpara,
- getOrCreateSecureRND());
- } else {
- tm.clientInit(hostname, port, softwareversion, cryptoWishList, verifier, dhgexpara, connectTimeout,
- getOrCreateSecureRND(), proxyData);
- }
- }
- catch (SocketTimeoutException se)
- {
- throw (SocketTimeoutException) new SocketTimeoutException(
- "The connect() operation on the socket timed out.").initCause(se);
- }
-
- tm.setTcpNoDelay(tcpNoDelay);
-
- /* Wait until first KEX has finished */
-
- ConnectionInfo ci = tm.getConnectionInfo(1);
-
- /* Now try to cancel the timeout, if needed */
-
- if (token != null)
- {
- TimeoutService.cancelTimeoutHandler(token);
-
- /* Were we too late? */
-
- synchronized (state)
- {
- if (state.timeoutSocketClosed)
- throw new IOException("This exception will be replaced by the one below =)");
- /* Just in case the "cancelTimeoutHandler" invocation came just a little bit
- * too late but the handler did not enter the semaphore yet - we can
- * still stop it.
- */
- state.isCancelled = true;
- }
- }
-
- return ci;
- }
- catch (SocketTimeoutException ste)
- {
- throw ste;
- }
- catch (IOException e1)
- {
- /* This will also invoke any registered connection monitors */
- close(new Throwable("There was a problem during connect."), false);
-
- synchronized (state)
- {
- /* Show a clean exception, not something like "the socket is closed!?!" */
- if (state.timeoutSocketClosed)
- throw new SocketTimeoutException("The kexTimeout (" + kexTimeout + " ms) expired.");
- }
-
- /* Do not wrap a HTTPProxyException */
- if (e1 instanceof HTTPProxyException)
- throw e1;
-
- throw (IOException) new IOException("There was a problem while connecting to " + hostname + ":" + port)
- .initCause(e1);
- }
- }
-
- /**
- * Creates a new {@link LocalPortForwarder}.
- * A <code>LocalPortForwarder</code> forwards TCP/IP connections that arrive at a local
- * port via the secure tunnel to another host (which may or may not be
- * identical to the remote SSH-2 server).
- * <p>
- * This method must only be called after one has passed successfully the authentication step.
- * There is no limit on the number of concurrent forwardings.
- *
- * @param local_port the local port the LocalPortForwarder shall bind to.
- * @param host_to_connect target address (IP or hostname)
- * @param port_to_connect target port
- * @return A {@link LocalPortForwarder} object.
- * @throws IOException
- */
- public synchronized LocalPortForwarder createLocalPortForwarder(int local_port, String host_to_connect,
- int port_to_connect) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot forward ports, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot forward ports, connection is not authenticated.");
-
- return new LocalPortForwarder(cm, local_port, host_to_connect, port_to_connect);
- }
-
- /**
- * Creates a new {@link LocalPortForwarder}.
- * A <code>LocalPortForwarder</code> forwards TCP/IP connections that arrive at a local
- * port via the secure tunnel to another host (which may or may not be
- * identical to the remote SSH-2 server).
- * <p>
- * This method must only be called after one has passed successfully the authentication step.
- * There is no limit on the number of concurrent forwardings.
- *
- * @param addr specifies the InetSocketAddress where the local socket shall be bound to.
- * @param host_to_connect target address (IP or hostname)
- * @param port_to_connect target port
- * @return A {@link LocalPortForwarder} object.
- * @throws IOException
- */
- public synchronized LocalPortForwarder createLocalPortForwarder(InetSocketAddress addr, String host_to_connect,
- int port_to_connect) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot forward ports, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot forward ports, connection is not authenticated.");
-
- return new LocalPortForwarder(cm, addr, host_to_connect, port_to_connect);
- }
-
- /**
- * Creates a new {@link LocalStreamForwarder}.
- * A <code>LocalStreamForwarder</code> manages an Input/Outputstream pair
- * that is being forwarded via the secure tunnel into a TCP/IP connection to another host
- * (which may or may not be identical to the remote SSH-2 server).
- *
- * @param host_to_connect
- * @param port_to_connect
- * @return A {@link LocalStreamForwarder} object.
- * @throws IOException
- */
- public synchronized LocalStreamForwarder createLocalStreamForwarder(String host_to_connect, int port_to_connect)
- throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot forward, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot forward, connection is not authenticated.");
-
- return new LocalStreamForwarder(cm, host_to_connect, port_to_connect);
- }
-
- /**
- * Create a very basic {@link SCPClient} that can be used to copy
- * files from/to the SSH-2 server.
- * <p>
- * Works only after one has passed successfully the authentication step.
- * There is no limit on the number of concurrent SCP clients.
- * <p>
- * Note: This factory method will probably disappear in the future.
- *
- * @return A {@link SCPClient} object.
- * @throws IOException
- */
- public synchronized SCPClient createSCPClient() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot create SCP client, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot create SCP client, connection is not authenticated.");
-
- return new SCPClient(this);
- }
-
- /**
- * Force an asynchronous key re-exchange (the call does not block). The
- * latest values set for MAC, Cipher and DH group exchange parameters will
- * be used. If a key exchange is currently in progress, then this method has
- * the only effect that the so far specified parameters will be used for the
- * next (server driven) key exchange.
- * <p>
- * Note: This implementation will never start a key exchange (other than the initial one)
- * unless you or the SSH-2 server ask for it.
- *
- * @throws IOException
- * In case of any failure behind the scenes.
- */
- public synchronized void forceKeyExchange() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("You need to establish a connection first.");
-
- tm.forceKeyExchange(cryptoWishList, dhgexpara, null, null);
- }
-
- /**
- * Returns the hostname that was passed to the constructor.
- *
- * @return the hostname
- */
- public synchronized String getHostname()
- {
- return hostname;
- }
-
- /**
- * Returns the port that was passed to the constructor.
- *
- * @return the TCP port
- */
- public synchronized int getPort()
- {
- return port;
- }
-
- /**
- * Returns a {@link ConnectionInfo} object containing the details of
- * the connection. Can be called as soon as the connection has been
- * established (successfully connected).
- *
- * @return A {@link ConnectionInfo} object.
- * @throws IOException
- * In case of any failure behind the scenes.
- */
- public synchronized ConnectionInfo getConnectionInfo() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException(
- "Cannot get details of connection, you need to establish a connection first.");
- return tm.getConnectionInfo(1);
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * can be used to tell which authentication methods are supported by the
- * server at a certain stage of the authentication process (for the given
- * username).
- * <p>
- * Note 1: the username will only be used if no authentication step was done
- * so far (it will be used to ask the server for a list of possible
- * authentication methods by sending the initial "none" request). Otherwise,
- * this method ignores the user name and returns a cached method list
- * (which is based on the information contained in the last negative server response).
- * <p>
- * Note 2: the server may return method names that are not supported by this
- * implementation.
- * <p>
- * After a successful authentication, this method must not be called
- * anymore.
- *
- * @param user
- * A <code>String</code> holding the username.
- *
- * @return a (possibly emtpy) array holding authentication method names.
- * @throws IOException
- */
- public synchronized String[] getRemainingAuthMethods(String user) throws IOException
- {
- if (user == null)
- throw new IllegalArgumentException("user argument may not be NULL!");
-
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- return am.getRemainingMethods(user);
- }
-
- /**
- * Determines if the authentication phase is complete. Can be called at any
- * time.
- *
- * @return <code>true</code> if no further authentication steps are
- * needed.
- */
- public synchronized boolean isAuthenticationComplete()
- {
- return authenticated;
- }
-
- /**
- * Returns true if there was at least one failed authentication request and
- * the last failed authentication request was marked with "partial success"
- * by the server. This is only needed in the rare case of SSH-2 server setups
- * that cannot be satisfied with a single successful authentication request
- * (i.e., multiple authentication steps are needed.)
- * <p>
- * If you are interested in the details, then have a look at RFC4252.
- *
- * @return if the there was a failed authentication step and the last one
- * was marked as a "partial success".
- */
- public synchronized boolean isAuthenticationPartialSuccess()
- {
- if (am == null)
- return false;
-
- return am.getPartialSuccess();
- }
-
- /**
- * Checks if a specified authentication method is available. This method is
- * actually just a wrapper for {@link #getRemainingAuthMethods(String)
- * getRemainingAuthMethods()}.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param method
- * An authentication method name (e.g., "publickey", "password",
- * "keyboard-interactive") as specified by the SSH-2 standard.
- * @return if the specified authentication method is currently available.
- * @throws IOException
- */
- public synchronized boolean isAuthMethodAvailable(String user, String method) throws IOException
- {
- if (method == null)
- throw new IllegalArgumentException("method argument may not be NULL!");
-
- String methods[] = getRemainingAuthMethods(user);
-
- for (int i = 0; i < methods.length; i++)
- {
- if (methods[i].compareTo(method) == 0)
- return true;
- }
-
- return false;
- }
-
- private SecureRandom getOrCreateSecureRND()
- {
- if (generator == null)
- generator = new SecureRandom();
-
- return generator;
- }
-
- /**
- * Open a new {@link Session} on this connection. Works only after one has passed
- * successfully the authentication step. There is no limit on the number of
- * concurrent sessions.
- *
- * @return A {@link Session} object.
- * @throws IOException
- */
- public synchronized Session openSession() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot open session, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot open session, connection is not authenticated.");
-
- return new Session(cm, getOrCreateSecureRND());
- }
-
- /**
- * Send an SSH_MSG_IGNORE packet. This method will generate a random data attribute
- * (length between 0 (invlusive) and 16 (exclusive) bytes, contents are random bytes).
- * <p>
- * This method must only be called once the connection is established.
- *
- * @throws IOException
- */
- public synchronized void sendIgnorePacket() throws IOException
- {
- SecureRandom rnd = getOrCreateSecureRND();
-
- byte[] data = new byte[rnd.nextInt(16)];
- rnd.nextBytes(data);
-
- sendIgnorePacket(data);
- }
-
- /**
- * Send an SSH_MSG_IGNORE packet with the given data attribute.
- * <p>
- * This method must only be called once the connection is established.
- *
- * @throws IOException
- */
- public synchronized void sendIgnorePacket(byte[] data) throws IOException
- {
- if (data == null)
- throw new IllegalArgumentException("data argument must not be null.");
-
- if (tm == null)
- throw new IllegalStateException(
- "Cannot send SSH_MSG_IGNORE packet, you need to establish a connection first.");
-
- PacketIgnore pi = new PacketIgnore();
- pi.setData(data);
-
- tm.sendMessage(pi.getPayload());
- }
-
- /**
- * Removes duplicates from a String array, keeps only first occurence
- * of each element. Does not destroy order of elements; can handle nulls.
- * Uses a very efficient O(N^2) algorithm =)
- *
- * @param list a String array.
- * @return a cleaned String array.
- */
- private String[] removeDuplicates(String[] list)
- {
- if ((list == null) || (list.length < 2))
- return list;
-
- String[] list2 = new String[list.length];
-
- int count = 0;
-
- for (int i = 0; i < list.length; i++)
- {
- boolean duplicate = false;
-
- String element = list[i];
-
- for (int j = 0; j < count; j++)
- {
- if (((element == null) && (list2[j] == null)) || ((element != null) && (element.equals(list2[j]))))
- {
- duplicate = true;
- break;
- }
- }
-
- if (duplicate)
- continue;
-
- list2[count++] = list[i];
- }
-
- if (count == list2.length)
- return list2;
-
- String[] tmp = new String[count];
- System.arraycopy(list2, 0, tmp, 0, count);
-
- return tmp;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param ciphers
- */
- public synchronized void setClient2ServerCiphers(String[] ciphers)
- {
- if ((ciphers == null) || (ciphers.length == 0))
- throw new IllegalArgumentException();
- ciphers = removeDuplicates(ciphers);
- BlockCipherFactory.checkCipherList(ciphers);
- cryptoWishList.c2s_enc_algos = ciphers;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param macs
- */
- public synchronized void setClient2ServerMACs(String[] macs)
- {
- if ((macs == null) || (macs.length == 0))
- throw new IllegalArgumentException();
- macs = removeDuplicates(macs);
- MAC.checkMacList(macs);
- cryptoWishList.c2s_mac_algos = macs;
- }
-
- /**
- * Sets the parameters for the diffie-hellman group exchange. Unless you
- * know what you are doing, you will never need this. Default values are
- * defined in the {@link DHGexParameters} class.
- *
- * @param dgp {@link DHGexParameters}, non null.
- *
- */
- public synchronized void setDHGexParameters(DHGexParameters dgp)
- {
- if (dgp == null)
- throw new IllegalArgumentException();
-
- dhgexpara = dgp;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param ciphers
- */
- public synchronized void setServer2ClientCiphers(String[] ciphers)
- {
- if ((ciphers == null) || (ciphers.length == 0))
- throw new IllegalArgumentException();
- ciphers = removeDuplicates(ciphers);
- BlockCipherFactory.checkCipherList(ciphers);
- cryptoWishList.s2c_enc_algos = ciphers;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param macs
- */
- public synchronized void setServer2ClientMACs(String[] macs)
- {
- if ((macs == null) || (macs.length == 0))
- throw new IllegalArgumentException();
-
- macs = removeDuplicates(macs);
- MAC.checkMacList(macs);
- cryptoWishList.s2c_mac_algos = macs;
- }
-
- /**
- * Define the set of allowed server host key algorithms to be used for
- * the following key exchange operations.
- * <p>
- * Unless you know what you are doing, you will never need this.
- *
- * @param algos An array of allowed server host key algorithms.
- * SSH-2 defines <code>ssh-dss</code> and <code>ssh-rsa</code>.
- * The entries of the array must be ordered after preference, i.e.,
- * the entry at index 0 is the most preferred one. You must specify
- * at least one entry.
- */
- public synchronized void setServerHostKeyAlgorithms(String[] algos)
- {
- if ((algos == null) || (algos.length == 0))
- throw new IllegalArgumentException();
-
- algos = removeDuplicates(algos);
- KexManager.checkServerHostkeyAlgorithmsList(algos);
- cryptoWishList.serverHostKeyAlgorithms = algos;
- }
-
- /**
- * Enable/disable TCP_NODELAY (disable/enable Nagle's algorithm) on the underlying socket.
- * <p>
- * Can be called at any time. If the connection has not yet been established
- * then the passed value will be stored and set after the socket has been set up.
- * The default value that will be used is <code>false</code>.
- *
- * @param enable the argument passed to the <code>Socket.setTCPNoDelay()</code> method.
- * @throws IOException
- */
- public synchronized void setTCPNoDelay(boolean enable) throws IOException
- {
- tcpNoDelay = enable;
-
- if (tm != null)
- tm.setTcpNoDelay(enable);
- }
-
- /**
- * Used to tell the library that the connection shall be established through a proxy server.
- * It only makes sense to call this method before calling the {@link #connect() connect()}
- * method.
- * <p>
- * At the moment, only HTTP proxies are supported.
- * <p>
- * Note: This method can be called any number of times. The {@link #connect() connect()}
- * method will use the value set in the last preceding invocation of this method.
- *
- * @see HTTPProxyData
- *
- * @param proxyData Connection information about the proxy. If <code>null</code>, then
- * no proxy will be used (non surprisingly, this is also the default).
- */
- public synchronized void setProxyData(ProxyData proxyData)
- {
- this.proxyData = proxyData;
- }
-
- /**
- * Request a remote port forwarding.
- * If successful, then forwarded connections will be redirected to the given target address.
- * You can cancle a requested remote port forwarding by calling
- * {@link #cancelRemotePortForwarding(int) cancelRemotePortForwarding()}.
- * <p>
- * A call of this method will block until the peer either agreed or disagreed to your request-
- * <p>
- * Note 1: this method typically fails if you
- * <ul>
- * <li>pass a port number for which the used remote user has not enough permissions (i.e., port
- * < 1024)</li>
- * <li>or pass a port number that is already in use on the remote server</li>
- * <li>or if remote port forwarding is disabled on the server.</li>
- * </ul>
- * <p>
- * Note 2: (from the openssh man page): By default, the listening socket on the server will be
- * bound to the loopback interface only. This may be overriden by specifying a bind address.
- * Specifying a remote bind address will only succeed if the server's <b>GatewayPorts</b> option
- * is enabled (see sshd_config(5)).
- *
- * @param bindAddress address to bind to on the server:
- * <ul>
- * <li>"" means that connections are to be accepted on all protocol families
- * supported by the SSH implementation</li>
- * <li>"0.0.0.0" means to listen on all IPv4 addresses</li>
- * <li>"::" means to listen on all IPv6 addresses</li>
- * <li>"localhost" means to listen on all protocol families supported by the SSH
- * implementation on loopback addresses only, [RFC3330] and RFC3513]</li>
- * <li>"127.0.0.1" and "::1" indicate listening on the loopback interfaces for
- * IPv4 and IPv6 respectively</li>
- * </ul>
- * @param bindPort port number to bind on the server (must be > 0)
- * @param targetAddress the target address (IP or hostname)
- * @param targetPort the target port
- * @throws IOException
- */
- public synchronized void requestRemotePortForwarding(String bindAddress, int bindPort, String targetAddress,
- int targetPort) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("You need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("The connection is not authenticated.");
-
- if ((bindAddress == null) || (targetAddress == null) || (bindPort <= 0) || (targetPort <= 0))
- throw new IllegalArgumentException();
-
- cm.requestGlobalForward(bindAddress, bindPort, targetAddress, targetPort);
- }
-
- /**
- * Cancel an earlier requested remote port forwarding.
- * Currently active forwardings will not be affected (e.g., disrupted).
- * Note that further connection forwarding requests may be received until
- * this method has returned.
- *
- * @param bindPort the allocated port number on the server
- * @throws IOException if the remote side refuses the cancel request or another low
- * level error occurs (e.g., the underlying connection is closed)
- */
- public synchronized void cancelRemotePortForwarding(int bindPort) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("You need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("The connection is not authenticated.");
-
- cm.requestCancelGlobalForward(bindPort);
- }
-
- /**
- * Provide your own instance of SecureRandom. Can be used, e.g., if you
- * want to seed the used SecureRandom generator manually.
- * <p>
- * The SecureRandom instance is used during key exchanges, public key authentication,
- * x11 cookie generation and the like.
- *
- * @param rnd a SecureRandom instance
- */
- public synchronized void setSecureRandom(SecureRandom rnd)
- {
- if (rnd == null)
- throw new IllegalArgumentException();
-
- this.generator = rnd;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2006-2013 Christian Plattner. All rights reserved.
- * Please refer to the LICENSE.txt for licensing details.
- */
-
-package ch.ethz.ssh2.channel;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Vector;
-
-import ch.ethz.ssh2.ChannelCondition;
-import ch.ethz.ssh2.PtySettings;
-import ch.ethz.ssh2.ServerConnectionCallback;
-import ch.ethz.ssh2.ServerSessionCallback;
-import ch.ethz.ssh2.log.Logger;
-import ch.ethz.ssh2.packets.PacketChannelFailure;
-import ch.ethz.ssh2.packets.PacketChannelOpenConfirmation;
-import ch.ethz.ssh2.packets.PacketChannelOpenFailure;
-import ch.ethz.ssh2.packets.PacketChannelSuccess;
-import ch.ethz.ssh2.packets.PacketGlobalCancelForwardRequest;
-import ch.ethz.ssh2.packets.PacketGlobalForwardRequest;
-import ch.ethz.ssh2.packets.PacketOpenDirectTCPIPChannel;
-import ch.ethz.ssh2.packets.PacketOpenSessionChannel;
-import ch.ethz.ssh2.packets.PacketSessionExecCommand;
-import ch.ethz.ssh2.packets.PacketSessionPtyRequest;
-import ch.ethz.ssh2.packets.PacketSessionStartShell;
-import ch.ethz.ssh2.packets.PacketSessionSubsystemRequest;
-import ch.ethz.ssh2.packets.PacketSessionX11Request;
-import ch.ethz.ssh2.packets.Packets;
-import ch.ethz.ssh2.packets.TypesReader;
-import ch.ethz.ssh2.server.ServerConnectionState;
-import ch.ethz.ssh2.transport.MessageHandler;
-import ch.ethz.ssh2.transport.TransportManager;
-
-/**
- * ChannelManager. Please read the comments in Channel.java.
- * <p/>
- * Besides the crypto part, this is the core of the library.
- *
- * @author Christian Plattner
- * @version $Id: ChannelManager.java 48 2013-08-01 12:22:33Z cleondris@gmail.com $
- */
-public class ChannelManager implements MessageHandler
-{
- private static final Logger log = Logger.getLogger(ChannelManager.class);
-
- private final ServerConnectionState server_state;
- private final TransportManager tm;
-
- private final HashMap<String, X11ServerData> x11_magic_cookies = new HashMap<String, X11ServerData>();
-
- private final List<Channel> channels = new Vector<Channel>();
- private int nextLocalChannel = 100;
- private boolean shutdown = false;
- private int globalSuccessCounter = 0;
- private int globalFailedCounter = 0;
-
- private final HashMap<Integer, RemoteForwardingData> remoteForwardings = new HashMap<Integer, RemoteForwardingData>();
-
- private final List<IChannelWorkerThread> listenerThreads = new Vector<IChannelWorkerThread>();
-
- private boolean listenerThreadsAllowed = true;
-
- /**
- * Constructor for client-mode.
- * @param tm
- */
- public ChannelManager(TransportManager tm)
- {
- this.server_state = null;
- this.tm = tm;
- tm.registerMessageHandler(this, 80, 100);
- }
-
- /**
- * Constructor for server-mode.
- * @param state
- */
- public ChannelManager(ServerConnectionState state)
- {
- this.server_state = state;
- this.tm = state.tm;
- tm.registerMessageHandler(this, 80, 100);
- }
-
- private Channel getChannel(int id)
- {
- synchronized (channels)
- {
- for (Channel c : channels)
- {
- if (c.localID == id)
- return c;
- }
- }
- return null;
- }
-
- private void removeChannel(int id)
- {
- synchronized (channels)
- {
- for (Channel c : channels)
- {
- if (c.localID == id)
- {
- channels.remove(c);
- break;
- }
- }
- }
- }
-
- private int addChannel(Channel c)
- {
- synchronized (channels)
- {
- channels.add(c);
- return nextLocalChannel++;
- }
- }
-
- private void waitUntilChannelOpen(Channel c) throws IOException
- {
- boolean wasInterrupted = false;
-
- synchronized (c)
- {
- while (c.state == Channel.STATE_OPENING)
- {
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (c.state != Channel.STATE_OPEN)
- {
- removeChannel(c.localID);
-
- String detail = c.getReasonClosed();
-
- if (detail == null)
- detail = "state: " + c.state;
-
- throw new IOException("Could not open channel (" + detail + ")");
- }
- }
-
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
-
- private void waitForGlobalSuccessOrFailure() throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (channels)
- {
- while ((globalSuccessCounter == 0) && (globalFailedCounter == 0))
- {
- if (shutdown)
- {
- throw new IOException("The connection is being shutdown");
- }
-
- try
- {
- channels.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (globalFailedCounter != 0)
- {
- throw new IOException("The server denied the request (did you enable port forwarding?)");
- }
-
- if (globalSuccessCounter == 0)
- {
- throw new IOException("Illegal state.");
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- private void waitForChannelSuccessOrFailure(Channel c) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (c)
- {
- while ((c.successCounter == 0) && (c.failedCounter == 0))
- {
- if (c.state != Channel.STATE_OPEN)
- {
- String detail = c.getReasonClosed();
-
- if (detail == null)
- detail = "state: " + c.state;
-
- throw new IOException("This SSH2 channel is not open (" + detail + ")");
- }
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (c.failedCounter != 0)
- {
- throw new IOException("The server denied the request.");
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public void registerX11Cookie(String hexFakeCookie, X11ServerData data)
- {
- synchronized (x11_magic_cookies)
- {
- x11_magic_cookies.put(hexFakeCookie, data);
- }
- }
-
- public void unRegisterX11Cookie(String hexFakeCookie, boolean killChannels)
- {
- if (hexFakeCookie == null)
- throw new IllegalStateException("hexFakeCookie may not be null");
-
- synchronized (x11_magic_cookies)
- {
- x11_magic_cookies.remove(hexFakeCookie);
- }
-
- if (killChannels == false)
- return;
-
- log.debug("Closing all X11 channels for the given fake cookie");
-
- List<Channel> channel_copy = new Vector<Channel>();
-
- synchronized (channels)
- {
- channel_copy.addAll(channels);
- }
-
- for (Channel c : channel_copy)
- {
- synchronized (c)
- {
- if (hexFakeCookie.equals(c.hexX11FakeCookie) == false)
- continue;
- }
-
- try
- {
- closeChannel(c, "Closing X11 channel since the corresponding session is closing", true);
- }
- catch (IOException ignored)
- {
- }
- }
- }
-
- public X11ServerData checkX11Cookie(String hexFakeCookie)
- {
- synchronized (x11_magic_cookies)
- {
- if (hexFakeCookie != null)
- return x11_magic_cookies.get(hexFakeCookie);
- }
- return null;
- }
-
- public void closeAllChannels()
- {
- log.debug("Closing all channels");
-
- List<Channel> channel_copy = new Vector<Channel>();
-
- synchronized (channels)
- {
- channel_copy.addAll(channels);
- }
-
- for (Channel c : channel_copy)
- {
- try
- {
- closeChannel(c, "Closing all channels", true);
- }
- catch (IOException ignored)
- {
- }
- }
- }
-
- public void closeChannel(Channel c, String reason, boolean force) throws IOException
- {
- byte msg[] = new byte[5];
-
- synchronized (c)
- {
- if (force)
- {
- c.state = Channel.STATE_CLOSED;
- c.EOF = true;
- }
-
- c.setReasonClosed(reason);
-
- msg[0] = Packets.SSH_MSG_CHANNEL_CLOSE;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
-
- c.notifyAll();
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(msg);
- c.closeMessageSent = true;
- }
-
- log.debug("Sent SSH_MSG_CHANNEL_CLOSE (channel " + c.localID + ")");
- }
-
- public void sendEOF(Channel c) throws IOException
- {
- byte[] msg = new byte[5];
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- return;
-
- msg[0] = Packets.SSH_MSG_CHANNEL_EOF;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(msg);
- }
-
-
- log.debug("Sent EOF (Channel " + c.localID + "/" + c.remoteID + ")");
- }
-
- public void sendOpenConfirmation(Channel c) throws IOException
- {
- PacketChannelOpenConfirmation pcoc = null;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPENING)
- return;
-
- c.state = Channel.STATE_OPEN;
-
- pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID, c.localWindow, c.localMaxPacketSize);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(pcoc.getPayload());
- }
- }
-
- public void sendData(Channel c, byte[] buffer, int pos, int len) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- while (len > 0)
- {
- int thislen = 0;
- byte[] msg;
-
- synchronized (c)
- {
- while (true)
- {
- if (c.state == Channel.STATE_CLOSED)
- throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
-
- if (c.state != Channel.STATE_OPEN)
- throw new ChannelClosedException("SSH channel in strange state. (" + c.state + ")");
-
- if (c.remoteWindow != 0)
- break;
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- /* len > 0, no sign extension can happen when comparing */
-
- thislen = (c.remoteWindow >= len) ? len : (int) c.remoteWindow;
-
- int estimatedMaxDataLen = c.remoteMaxPacketSize - (tm.getPacketOverheadEstimate() + 9);
-
- /* The worst case scenario =) a true bottleneck */
-
- if (estimatedMaxDataLen <= 0)
- {
- estimatedMaxDataLen = 1;
- }
-
- if (thislen > estimatedMaxDataLen)
- thislen = estimatedMaxDataLen;
-
- c.remoteWindow -= thislen;
-
- msg = new byte[1 + 8 + thislen];
-
- msg[0] = Packets.SSH_MSG_CHANNEL_DATA;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
- msg[5] = (byte) (thislen >> 24);
- msg[6] = (byte) (thislen >> 16);
- msg[7] = (byte) (thislen >> 8);
- msg[8] = (byte) (thislen);
-
- System.arraycopy(buffer, pos, msg, 9, thislen);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
-
- tm.sendMessage(msg);
- }
-
- pos += thislen;
- len -= thislen;
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public int requestGlobalForward(String bindAddress, int bindPort, String targetAddress, int targetPort)
- throws IOException
- {
- RemoteForwardingData rfd = new RemoteForwardingData();
-
- rfd.bindAddress = bindAddress;
- rfd.bindPort = bindPort;
- rfd.targetAddress = targetAddress;
- rfd.targetPort = targetPort;
-
- synchronized (remoteForwardings)
- {
- Integer key = new Integer(bindPort);
-
- if (remoteForwardings.get(key) != null)
- {
- throw new IOException("There is already a forwarding for remote port " + bindPort);
- }
-
- remoteForwardings.put(key, rfd);
- }
-
- synchronized (channels)
- {
- globalSuccessCounter = globalFailedCounter = 0;
- }
-
- PacketGlobalForwardRequest pgf = new PacketGlobalForwardRequest(true, bindAddress, bindPort);
- tm.sendMessage(pgf.getPayload());
-
- log.debug("Requesting a remote forwarding ('" + bindAddress + "', " + bindPort + ")");
-
- try
- {
- waitForGlobalSuccessOrFailure();
- }
- catch (IOException e)
- {
- synchronized (remoteForwardings)
- {
- remoteForwardings.remove(rfd);
- }
- throw e;
- }
-
- return bindPort;
- }
-
- public void requestCancelGlobalForward(int bindPort) throws IOException
- {
- RemoteForwardingData rfd = null;
-
- synchronized (remoteForwardings)
- {
- rfd = remoteForwardings.get(new Integer(bindPort));
-
- if (rfd == null)
- throw new IOException("Sorry, there is no known remote forwarding for remote port " + bindPort);
- }
-
- synchronized (channels)
- {
- globalSuccessCounter = globalFailedCounter = 0;
- }
-
- PacketGlobalCancelForwardRequest pgcf = new PacketGlobalCancelForwardRequest(true, rfd.bindAddress,
- rfd.bindPort);
- tm.sendMessage(pgcf.getPayload());
-
- log.debug("Requesting cancelation of remote forward ('" + rfd.bindAddress + "', " + rfd.bindPort + ")");
-
- waitForGlobalSuccessOrFailure();
-
- /* Only now we are sure that no more forwarded connections will arrive */
-
- synchronized (remoteForwardings)
- {
- remoteForwardings.remove(rfd);
- }
- }
-
- public void registerThread(IChannelWorkerThread thr) throws IOException
- {
- synchronized (listenerThreads)
- {
- if (listenerThreadsAllowed == false)
- throw new IOException("Too late, this connection is closed.");
- listenerThreads.add(thr);
- }
- }
-
- public Channel openDirectTCPIPChannel(String host_to_connect, int port_to_connect, String originator_IP_address,
- int originator_port) throws IOException
- {
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.localID = addChannel(c);
- // end of synchronized block forces writing out to main memory
- }
-
- PacketOpenDirectTCPIPChannel dtc = new PacketOpenDirectTCPIPChannel(c.localID, c.localWindow,
- c.localMaxPacketSize, host_to_connect, port_to_connect, originator_IP_address, originator_port);
-
- tm.sendMessage(dtc.getPayload());
-
- waitUntilChannelOpen(c);
-
- return c;
- }
-
- public Channel openSessionChannel() throws IOException
- {
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.localID = addChannel(c);
- // end of synchronized block forces the writing out to main memory
- }
-
- log.debug("Sending SSH_MSG_CHANNEL_OPEN (Channel " + c.localID + ")");
-
- PacketOpenSessionChannel smo = new PacketOpenSessionChannel(c.localID, c.localWindow, c.localMaxPacketSize);
- tm.sendMessage(smo.getPayload());
-
- waitUntilChannelOpen(c);
-
- return c;
- }
-
- public void requestPTY(Channel c, String term, int term_width_characters, int term_height_characters,
- int term_width_pixels, int term_height_pixels, byte[] terminal_modes) throws IOException
- {
- PacketSessionPtyRequest spr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
-
- spr = new PacketSessionPtyRequest(c.remoteID, true, term, term_width_characters, term_height_characters,
- term_width_pixels, term_height_pixels, terminal_modes);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(spr.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("PTY request failed").initCause(e);
- }
- }
-
- public void requestX11(Channel c, boolean singleConnection, String x11AuthenticationProtocol,
- String x11AuthenticationCookie, int x11ScreenNumber) throws IOException
- {
- PacketSessionX11Request psr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
-
- psr = new PacketSessionX11Request(c.remoteID, true, singleConnection, x11AuthenticationProtocol,
- x11AuthenticationCookie, x11ScreenNumber);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(psr.getPayload());
- }
-
- log.debug("Requesting X11 forwarding (Channel " + c.localID + "/" + c.remoteID + ")");
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The X11 request failed.").initCause(e);
- }
- }
-
- public void requestSubSystem(Channel c, String subSystemName) throws IOException
- {
- PacketSessionSubsystemRequest ssr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
-
- ssr = new PacketSessionSubsystemRequest(c.remoteID, true, subSystemName);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(ssr.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The subsystem request failed.").initCause(e);
- }
- }
-
- public void requestExecCommand(Channel c, String cmd) throws IOException
- {
- this.requestExecCommand(c, cmd, null);
- }
-
- /**
- * @param charsetName The charset used to convert between Java Unicode Strings and byte encodings
- */
- public void requestExecCommand(Channel c, String cmd, String charsetName) throws IOException
- {
- PacketSessionExecCommand sm;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
-
- sm = new PacketSessionExecCommand(c.remoteID, true, cmd);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(sm.getPayload(charsetName));
- }
-
- log.debug("Executing command (channel " + c.localID + ", '" + cmd + "')");
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The execute request failed.").initCause(e);
- }
- }
-
- public void requestShell(Channel c) throws IOException
- {
- PacketSessionStartShell sm;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
-
- sm = new PacketSessionStartShell(c.remoteID, true);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(sm.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The shell request failed.").initCause(e);
- }
- }
-
- public void msgChannelExtendedData(byte[] msg, int msglen) throws IOException
- {
- if (msglen <= 13)
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int dataType = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
- int len = ((msg[9] & 0xff) << 24) | ((msg[10] & 0xff) << 16) | ((msg[11] & 0xff) << 8) | (msg[12] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_EXTENDED_DATA message for non-existent channel " + id);
-
- if (dataType != Packets.SSH_EXTENDED_DATA_STDERR)
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has unknown type (" + dataType + ")");
-
- if (len != (msglen - 13))
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong len (calculated " + (msglen - 13)
- + ", got " + len + ")");
-
- log.debug("Got SSH_MSG_CHANNEL_EXTENDED_DATA (channel " + id + ", " + len + ")");
-
- synchronized (c)
- {
- if (c.state == Channel.STATE_CLOSED)
- return; // ignore
-
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Got SSH_MSG_CHANNEL_EXTENDED_DATA, but channel is not in correct state ("
- + c.state + ")");
-
- if (c.localWindow < len)
- throw new IOException("Remote sent too much data, does not fit into window.");
-
- c.localWindow -= len;
-
- System.arraycopy(msg, 13, c.stderrBuffer, c.stderrWritepos, len);
- c.stderrWritepos += len;
-
- c.notifyAll();
- }
- }
-
- /**
- * Wait until for a condition.
- *
- * @param c Channel
- * @param timeout in ms, 0 means no timeout.
- * @param condition_mask minimum event mask (at least one of the conditions must be fulfilled)
- * @return all current events
- */
- public int waitForCondition(Channel c, long timeout, int condition_mask)
- {
- boolean wasInterrupted = false;
-
- try
- {
- long end_time = 0;
- boolean end_time_set = false;
-
- synchronized (c)
- {
- while (true)
- {
- int current_cond = 0;
-
- int stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
- int stderrAvail = c.stderrWritepos - c.stderrReadpos;
-
- if (stdoutAvail > 0)
- current_cond = current_cond | ChannelCondition.STDOUT_DATA;
-
- if (stderrAvail > 0)
- current_cond = current_cond | ChannelCondition.STDERR_DATA;
-
- if (c.EOF)
- current_cond = current_cond | ChannelCondition.EOF;
-
- if (c.getExitStatus() != null)
- current_cond = current_cond | ChannelCondition.EXIT_STATUS;
-
- if (c.getExitSignal() != null)
- current_cond = current_cond | ChannelCondition.EXIT_SIGNAL;
-
- if (c.state == Channel.STATE_CLOSED)
- return current_cond | ChannelCondition.CLOSED | ChannelCondition.EOF;
-
- if ((current_cond & condition_mask) != 0)
- return current_cond;
-
- if (timeout > 0)
- {
- if (!end_time_set)
- {
- end_time = System.currentTimeMillis() + timeout;
- end_time_set = true;
- }
- else
- {
- timeout = end_time - System.currentTimeMillis();
-
- if (timeout <= 0)
- return current_cond | ChannelCondition.TIMEOUT;
- }
- }
-
- try
- {
- if (timeout > 0)
- c.wait(timeout);
- else
- c.wait();
- }
- catch (InterruptedException e)
- {
- wasInterrupted = true;
- }
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public int getAvailable(Channel c, boolean extended) throws IOException
- {
- synchronized (c)
- {
- int avail;
-
- if (extended)
- avail = c.stderrWritepos - c.stderrReadpos;
- else
- avail = c.stdoutWritepos - c.stdoutReadpos;
-
- return ((avail > 0) ? avail : (c.EOF ? -1 : 0));
- }
- }
-
- public int getChannelData(Channel c, boolean extended, byte[] target, int off, int len) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- int copylen = 0;
- int increment = 0;
- int remoteID = 0;
- int localID = 0;
-
- synchronized (c)
- {
- int stdoutAvail = 0;
- int stderrAvail = 0;
-
- while (true)
- {
- /*
- * Data available? We have to return remaining data even if the
- * channel is already closed.
- */
-
- stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
- stderrAvail = c.stderrWritepos - c.stderrReadpos;
-
- if ((!extended) && (stdoutAvail != 0))
- break;
-
- if ((extended) && (stderrAvail != 0))
- break;
-
- /* Do not wait if more data will never arrive (EOF or CLOSED) */
-
- if ((c.EOF) || (c.state != Channel.STATE_OPEN))
- return -1;
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- /* OK, there is some data. Return it. */
-
- if (!extended)
- {
- copylen = (stdoutAvail > len) ? len : stdoutAvail;
- System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, target, off, copylen);
- c.stdoutReadpos += copylen;
-
- if (c.stdoutReadpos != c.stdoutWritepos)
-
- System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, c.stdoutBuffer, 0, c.stdoutWritepos
- - c.stdoutReadpos);
-
- c.stdoutWritepos -= c.stdoutReadpos;
- c.stdoutReadpos = 0;
- }
- else
- {
- copylen = (stderrAvail > len) ? len : stderrAvail;
- System.arraycopy(c.stderrBuffer, c.stderrReadpos, target, off, copylen);
- c.stderrReadpos += copylen;
-
- if (c.stderrReadpos != c.stderrWritepos)
-
- System.arraycopy(c.stderrBuffer, c.stderrReadpos, c.stderrBuffer, 0, c.stderrWritepos
- - c.stderrReadpos);
-
- c.stderrWritepos -= c.stderrReadpos;
- c.stderrReadpos = 0;
- }
-
- if (c.state != Channel.STATE_OPEN)
- return copylen;
-
- if (c.localWindow < ((Channel.CHANNEL_BUFFER_SIZE + 1) / 2))
- {
- int minFreeSpace = Math.min(Channel.CHANNEL_BUFFER_SIZE - c.stdoutWritepos,
- Channel.CHANNEL_BUFFER_SIZE - c.stderrWritepos);
-
- increment = minFreeSpace - c.localWindow;
- c.localWindow = minFreeSpace;
- }
-
- remoteID = c.remoteID; /* read while holding the lock */
- localID = c.localID; /* read while holding the lock */
- }
-
- /*
- * If a consumer reads stdout and stdin in parallel, we may end up with
- * sending two msgWindowAdjust messages. Luckily, it
- * does not matter in which order they arrive at the server.
- */
-
- if (increment > 0)
- {
- log.debug("Sending SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + localID + ", " + increment + ")");
-
- synchronized (c.channelSendLock)
- {
- byte[] msg = c.msgWindowAdjust;
-
- msg[0] = Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST;
- msg[1] = (byte) (remoteID >> 24);
- msg[2] = (byte) (remoteID >> 16);
- msg[3] = (byte) (remoteID >> 8);
- msg[4] = (byte) (remoteID);
- msg[5] = (byte) (increment >> 24);
- msg[6] = (byte) (increment >> 16);
- msg[7] = (byte) (increment >> 8);
- msg[8] = (byte) (increment);
-
- if (c.closeMessageSent == false)
- tm.sendMessage(msg);
- }
- }
-
- return copylen;
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
-
- }
-
- public void msgChannelData(byte[] msg, int msglen) throws IOException
- {
- if (msglen <= 9)
- throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int len = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_DATA message for non-existent channel " + id);
-
- if (len != (msglen - 9))
- throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong len (calculated " + (msglen - 9) + ", got "
- + len + ")");
-
- log.debug("Got SSH_MSG_CHANNEL_DATA (channel " + id + ", " + len + ")");
-
- synchronized (c)
- {
- if (c.state == Channel.STATE_CLOSED)
- return; // ignore
-
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Got SSH_MSG_CHANNEL_DATA, but channel is not in correct state (" + c.state + ")");
-
- if (c.localWindow < len)
- throw new IOException("Remote sent too much data, does not fit into window.");
-
- c.localWindow -= len;
-
- System.arraycopy(msg, 9, c.stdoutBuffer, c.stdoutWritepos, len);
- c.stdoutWritepos += len;
-
- c.notifyAll();
- }
- }
-
- public void msgChannelWindowAdjust(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 9)
- throw new IOException("SSH_MSG_CHANNEL_WINDOW_ADJUST message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int windowChange = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_WINDOW_ADJUST message for non-existent channel " + id);
-
- synchronized (c)
- {
- final long huge = 0xFFFFffffL; /* 2^32 - 1 */
-
- c.remoteWindow += (windowChange & huge); /* avoid sign extension */
-
- /* TODO - is this a good heuristic? */
-
- if ((c.remoteWindow > huge))
- c.remoteWindow = huge;
-
- c.notifyAll();
- }
-
-
- log.debug("Got SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + id + ", " + windowChange + ")");
- }
-
- public void msgChannelOpen(byte[] msg, int msglen) throws IOException
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- String channelType = tr.readString();
- int remoteID = tr.readUINT32(); /* sender channel */
- int remoteWindow = tr.readUINT32(); /* initial window size */
- int remoteMaxPacketSize = tr.readUINT32(); /* maximum packet size */
-
- if ("x11".equals(channelType))
- {
- synchronized (x11_magic_cookies)
- {
- /* If we did not request X11 forwarding, then simply ignore this bogus request. */
-
- if (x11_magic_cookies.size() == 0)
- {
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
- Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED, "X11 forwarding not activated", "");
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
- log.warning("Unexpected X11 request, denying it!");
-
- return;
- }
- }
-
- String remoteOriginatorAddress = tr.readString();
- int remoteOriginatorPort = tr.readUINT32();
-
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* properly convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- }
-
- /*
- * The open confirmation message will be sent from another thread
- */
-
- RemoteX11AcceptThread rxat = new RemoteX11AcceptThread(c, remoteOriginatorAddress, remoteOriginatorPort);
- rxat.setDaemon(true);
- rxat.start();
-
- return;
- }
-
- if ("forwarded-tcpip".equals(channelType))
- {
- String remoteConnectedAddress = tr.readString(); /* address that was connected */
- int remoteConnectedPort = tr.readUINT32(); /* port that was connected */
- String remoteOriginatorAddress = tr.readString(); /* originator IP address */
- int remoteOriginatorPort = tr.readUINT32(); /* originator port */
-
- RemoteForwardingData rfd = null;
-
- synchronized (remoteForwardings)
- {
- rfd = remoteForwardings.get(new Integer(remoteConnectedPort));
- }
-
- if (rfd == null)
- {
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
- Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
- "No thanks, unknown port in forwarded-tcpip request", "");
-
- /* Always try to be polite. */
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
- log.debug("Unexpected forwarded-tcpip request, denying it!");
-
- return;
- }
-
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- }
-
- /*
- * The open confirmation message will be sent from another thread.
- */
-
- RemoteAcceptThread rat = new RemoteAcceptThread(c, remoteConnectedAddress, remoteConnectedPort,
- remoteOriginatorAddress, remoteOriginatorPort, rfd.targetAddress, rfd.targetPort);
-
- rat.setDaemon(true);
- rat.start();
-
- return;
- }
-
- if ((server_state != null) && ("session".equals(channelType)))
- {
- ServerConnectionCallback cb = null;
-
- synchronized (server_state)
- {
- cb = server_state.cb_conn;
- }
-
- if (cb == null)
- {
- tm.sendAsynchronousMessage(new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
- "Sessions are currently not enabled", "en").getPayload());
-
- return;
- }
-
- final Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- c.state = Channel.STATE_OPEN;
- c.ss = new ServerSessionImpl(c);
- }
-
- PacketChannelOpenConfirmation pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID,
- c.localWindow, c.localMaxPacketSize);
-
- tm.sendAsynchronousMessage(pcoc.getPayload());
-
- c.ss.sscb = cb.acceptSession(c.ss);
-
- return;
- }
-
- /* Tell the server that we have no idea what it is talking about */
-
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_UNKNOWN_CHANNEL_TYPE,
- "Unknown channel type", "");
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
-
- log.warning("The peer tried to open an unsupported channel type (" + channelType + ")");
- }
-
- /* Starts the given runnable in a foreground (non-daemon) thread */
- private void runAsync(Runnable r)
- {
- Thread t = new Thread(r);
- t.start();
- }
-
- public void msgChannelRequest(byte[] msg, int msglen) throws IOException
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- int id = tr.readUINT32();
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_REQUEST message for non-existent channel " + id);
-
- ServerSessionImpl server_session = null;
-
- if (server_state != null)
- {
- synchronized (c)
- {
- server_session = c.ss;
- }
- }
-
- String type = tr.readString("US-ASCII");
- boolean wantReply = tr.readBoolean();
-
- log.debug("Got SSH_MSG_CHANNEL_REQUEST (channel " + id + ", '" + type + "')");
-
- if (type.equals("exit-status"))
- {
- if (wantReply != false)
- throw new IOException(
- "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-status message, 'want reply' is true");
-
- int exit_status = tr.readUINT32();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- synchronized (c)
- {
- c.exit_status = new Integer(exit_status);
- c.notifyAll();
- }
-
- log.debug("Got EXIT STATUS (channel " + id + ", status " + exit_status + ")");
-
- return;
- }
-
- if ((server_state == null) && (type.equals("exit-signal")))
- {
- if (wantReply != false)
- throw new IOException(
- "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-signal message, 'want reply' is true");
-
- String signame = tr.readString("US-ASCII");
- tr.readBoolean();
- tr.readString();
- tr.readString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- synchronized (c)
- {
- c.exit_signal = signame;
- c.notifyAll();
- }
-
- log.debug("Got EXIT SIGNAL (channel " + id + ", signal " + signame + ")");
-
- return;
- }
-
- if ((server_session != null) && (type.equals("pty-req")))
- {
- PtySettings pty = new PtySettings();
-
- pty.term = tr.readString();
- pty.term_width_characters = tr.readUINT32();
- pty.term_height_characters = tr.readUINT32();
- pty.term_width_pixels = tr.readUINT32();
- pty.term_height_pixels = tr.readUINT32();
- pty.terminal_modes = tr.readByteString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
-
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestPtyReq(server_session, pty);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("subsystem")))
- {
- String command = tr.readString();
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestSubsystem(server_session, command);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("shell")))
- {
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestShell(server_session);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("exec")))
- {
- String command = tr.readString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestExec(server_session, command);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- /* We simply ignore unknown channel requests, however, if the server wants a reply,
- * then we signal that we have no idea what it is about.
- */
-
- if (wantReply)
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
-
- log.debug("Channel request '" + type + "' is not known, ignoring it");
- }
-
- public void msgChannelEOF(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_EOF message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_EOF message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.EOF = true;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_EOF (channel " + id + ")");
- }
-
- public void msgChannelClose(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_CLOSE message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_CLOSE message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("Close requested by remote");
- c.closeMessageRecv = true;
-
- removeChannel(c.localID);
-
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_CLOSE (channel " + id + ")");
- }
-
- public void msgChannelSuccess(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_SUCCESS message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_SUCCESS message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.successCounter++;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_SUCCESS (channel " + id + ")");
- }
-
- public void msgChannelFailure(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_FAILURE message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_FAILURE message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.failedCounter++;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_FAILURE (channel " + id + ")");
- }
-
- public void msgChannelOpenConfirmation(byte[] msg, int msglen) throws IOException
- {
- PacketChannelOpenConfirmation sm = new PacketChannelOpenConfirmation(msg, 0, msglen);
-
- Channel c = getChannel(sm.recipientChannelID);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for non-existent channel "
- + sm.recipientChannelID);
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPENING)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for channel "
- + sm.recipientChannelID);
-
- c.remoteID = sm.senderChannelID;
- c.remoteWindow = sm.initialWindowSize & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = sm.maxPacketSize;
- c.state = Channel.STATE_OPEN;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_OPEN_CONFIRMATION (channel " + sm.recipientChannelID + " / remote: "
- + sm.senderChannelID + ")");
- }
-
- public void msgChannelOpenFailure(byte[] msg, int msglen) throws IOException
- {
- if (msglen < 5)
- throw new IOException("SSH_MSG_CHANNEL_OPEN_FAILURE message has wrong size (" + msglen + ")");
-
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- int id = tr.readUINT32(); /* sender channel */
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_FAILURE message for non-existent channel " + id);
-
- int reasonCode = tr.readUINT32();
- String description = tr.readString("UTF-8");
-
- String reasonCodeSymbolicName = null;
-
- switch (reasonCode)
- {
- case 1:
- reasonCodeSymbolicName = "SSH_OPEN_ADMINISTRATIVELY_PROHIBITED";
- break;
- case 2:
- reasonCodeSymbolicName = "SSH_OPEN_CONNECT_FAILED";
- break;
- case 3:
- reasonCodeSymbolicName = "SSH_OPEN_UNKNOWN_CHANNEL_TYPE";
- break;
- case 4:
- reasonCodeSymbolicName = "SSH_OPEN_RESOURCE_SHORTAGE";
- break;
- default:
- reasonCodeSymbolicName = "UNKNOWN REASON CODE (" + reasonCode + ")";
- }
-
- StringBuilder descriptionBuffer = new StringBuilder();
- descriptionBuffer.append(description);
-
- for (int i = 0; i < descriptionBuffer.length(); i++)
- {
- char cc = descriptionBuffer.charAt(i);
-
- if ((cc >= 32) && (cc <= 126))
- continue;
- descriptionBuffer.setCharAt(i, '\uFFFD');
- }
-
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("The server refused to open the channel (" + reasonCodeSymbolicName + ", '"
- + descriptionBuffer.toString() + "')");
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_OPEN_FAILURE (channel " + id + ")");
- }
-
- public void msgGlobalRequest(byte[] msg, int msglen) throws IOException
- {
- /* Currently we do not support any kind of global request */
-
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- String requestName = tr.readString();
- boolean wantReply = tr.readBoolean();
-
- if (wantReply)
- {
- byte[] reply_failure = new byte[1];
- reply_failure[0] = Packets.SSH_MSG_REQUEST_FAILURE;
-
- tm.sendAsynchronousMessage(reply_failure);
- }
-
- /* We do not clean up the requestName String - that is OK for debug */
-
- log.debug("Got SSH_MSG_GLOBAL_REQUEST (" + requestName + ")");
- }
-
- public void msgGlobalSuccess() throws IOException
- {
- synchronized (channels)
- {
- globalSuccessCounter++;
- channels.notifyAll();
- }
-
- log.debug("Got SSH_MSG_REQUEST_SUCCESS");
- }
-
- public void msgGlobalFailure() throws IOException
- {
- synchronized (channels)
- {
- globalFailedCounter++;
- channels.notifyAll();
- }
-
- log.debug("Got SSH_MSG_REQUEST_FAILURE");
- }
-
- public void handleMessage(byte[] msg, int msglen) throws IOException
- {
- if (msg == null)
- {
-
- log.debug("HandleMessage: got shutdown");
-
- synchronized (listenerThreads)
- {
- for (IChannelWorkerThread lat : listenerThreads)
- {
- lat.stopWorking();
- }
- listenerThreadsAllowed = false;
- }
-
- synchronized (channels)
- {
- shutdown = true;
-
- for (Channel c : channels)
- {
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("The connection is being shutdown");
- c.closeMessageRecv = true; /*
- * You never know, perhaps
- * we are waiting for a
- * pending close message
- * from the server...
- */
- c.notifyAll();
- }
- }
-
- channels.clear();
- channels.notifyAll(); /* Notify global response waiters */
- return;
- }
- }
-
- switch (msg[0])
- {
- case Packets.SSH_MSG_CHANNEL_OPEN_CONFIRMATION:
- msgChannelOpenConfirmation(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST:
- msgChannelWindowAdjust(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_DATA:
- msgChannelData(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_EXTENDED_DATA:
- msgChannelExtendedData(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_REQUEST:
- msgChannelRequest(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_EOF:
- msgChannelEOF(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_OPEN:
- msgChannelOpen(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_CLOSE:
- msgChannelClose(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_SUCCESS:
- msgChannelSuccess(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_FAILURE:
- msgChannelFailure(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_OPEN_FAILURE:
- msgChannelOpenFailure(msg, msglen);
- break;
- case Packets.SSH_MSG_GLOBAL_REQUEST:
- msgGlobalRequest(msg, msglen);
- break;
- case Packets.SSH_MSG_REQUEST_SUCCESS:
- msgGlobalSuccess();
- break;
- case Packets.SSH_MSG_REQUEST_FAILURE:
- msgGlobalFailure();
- break;
- default:
- throw new IOException("Cannot handle unknown channel message " + (msg[0] & 0xff));
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2006-2013 Christian Plattner. All rights reserved.
- * Please refer to the LICENSE.txt for licensing details.
- */
-
-package ch.ethz.ssh2.transport;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.security.SecureRandom;
-import java.util.List;
-import java.util.Vector;
-
-import ch.ethz.ssh2.ConnectionInfo;
-import ch.ethz.ssh2.ConnectionMonitor;
-import ch.ethz.ssh2.DHGexParameters;
-import ch.ethz.ssh2.HTTPProxyData;
-import ch.ethz.ssh2.HTTPProxyException;
-import ch.ethz.ssh2.ProxyData;
-import ch.ethz.ssh2.ServerHostKeyVerifier;
-import ch.ethz.ssh2.crypto.Base64;
-import ch.ethz.ssh2.crypto.CryptoWishList;
-import ch.ethz.ssh2.crypto.cipher.BlockCipher;
-import ch.ethz.ssh2.crypto.digest.MAC;
-import ch.ethz.ssh2.log.Logger;
-import ch.ethz.ssh2.packets.PacketDisconnect;
-import ch.ethz.ssh2.packets.Packets;
-import ch.ethz.ssh2.packets.TypesReader;
-import ch.ethz.ssh2.server.ServerConnectionState;
-import ch.ethz.ssh2.signature.DSAPrivateKey;
-import ch.ethz.ssh2.signature.RSAPrivateKey;
-import ch.ethz.ssh2.util.StringEncoder;
-import ch.ethz.ssh2.util.Tokenizer;
-
-/*
- * Yes, the "standard" is a big mess. On one side, the say that arbitary channel
- * packets are allowed during kex exchange, on the other side we need to blindly
- * ignore the next _packet_ if the KEX guess was wrong. Where do we know from that
- * the next packet is not a channel data packet? Yes, we could check if it is in
- * the KEX range. But the standard says nothing about this. The OpenSSH guys
- * block local "normal" traffic during KEX. That's fine - however, they assume
- * that the other side is doing the same. During re-key, if they receive traffic
- * other than KEX, they become horribly irritated and kill the connection. Since
- * we are very likely going to communicate with OpenSSH servers, we have to play
- * the same game - even though we could do better.
- *
- * btw: having stdout and stderr on the same channel, with a shared window, is
- * also a VERY good idea... =(
- */
-
-/**
- * TransportManager.
- *
- * @author Christian Plattner
- * @version $Id: TransportManager.java 47 2013-07-31 23:59:52Z cleondris@gmail.com $
- */
-public class TransportManager
-{
- private static final Logger log = Logger.getLogger(TransportManager.class);
-
- private static class HandlerEntry
- {
- MessageHandler mh;
- int low;
- int high;
- }
-
- private final List<AsynchronousEntry> asynchronousQueue = new Vector<AsynchronousEntry>();
- private Thread asynchronousThread = null;
- private boolean asynchronousPending = false;
-
- class AsynchronousEntry
- {
- public byte[] msg;
- public Runnable run;
-
- public AsynchronousEntry(byte[] msg, Runnable run)
- {
- this.msg = msg;
- this.run = run;
- }
- }
-
- class AsynchronousWorker extends Thread
- {
- @Override
- public void run()
- {
- while (true)
- {
- AsynchronousEntry item = null;
-
- synchronized (asynchronousQueue)
- {
- if (asynchronousQueue.size() == 0)
- {
- /* Only now we may reset the flag, since we are sure that all queued items
- * have been sent (there is a slight delay between de-queuing and sending,
- * this is why we need this flag! See code below. Sending takes place outside
- * of this lock, this is why a test for size()==0 (from another thread) does not ensure
- * that all messages have been sent.
- */
-
- asynchronousPending = false;
-
- /* Notify any senders that they can proceed, all async messages have been delivered */
-
- asynchronousQueue.notifyAll();
-
- /* After the queue is empty for about 2 seconds, stop this thread */
-
- try
- {
- asynchronousQueue.wait(2000);
- }
- catch (InterruptedException ignore)
- {
- }
-
- if (asynchronousQueue.size() == 0)
- {
- asynchronousThread = null;
- return;
- }
- }
-
- item = asynchronousQueue.remove(0);
- }
-
- /* The following invocation may throw an IOException.
- * There is no point in handling it - it simply means
- * that the connection has a problem and we should stop
- * sending asynchronously messages. We do not need to signal that
- * we have exited (asynchronousThread = null): further
- * messages in the queue cannot be sent by this or any
- * other thread.
- * Other threads will sooner or later (when receiving or
- * sending the next message) get the same IOException and
- * get to the same conclusion.
- */
-
- try
- {
- sendMessageImmediate(item.msg);
- }
- catch (IOException e)
- {
- return;
- }
-
- if (item.run != null)
- {
- try
- {
- item.run.run();
- }
- catch (Exception ignore)
- {
- }
-
- }
- }
- }
- }
-
- private Socket sock = new Socket();
-
- private final Object connectionSemaphore = new Object();
-
- private boolean flagKexOngoing = false;
- private boolean connectionClosed = false;
-
- private Throwable reasonClosedCause = null;
-
- private TransportConnection tc;
- private KexManager km;
-
- private final List<HandlerEntry> messageHandlers = new Vector<HandlerEntry>();
-
- private Thread receiveThread;
-
- private List<ConnectionMonitor> connectionMonitors = new Vector<ConnectionMonitor>();
- private boolean monitorsWereInformed = false;
-
- /**
- * There were reports that there are JDKs which use
- * the resolver even though one supplies a dotted IP
- * address in the Socket constructor. That is why we
- * try to generate the InetAdress "by hand".
- *
- * @param host
- * @return the InetAddress
- * @throws UnknownHostException
- */
- private static InetAddress createInetAddress(String host) throws UnknownHostException
- {
- /* Check if it is a dotted IP4 address */
-
- InetAddress addr = parseIPv4Address(host);
-
- if (addr != null)
- {
- return addr;
- }
-
- return InetAddress.getByName(host);
- }
-
- private static InetAddress parseIPv4Address(String host) throws UnknownHostException
- {
- if (host == null)
- {
- return null;
- }
-
- String[] quad = Tokenizer.parseTokens(host, '.');
-
- if ((quad == null) || (quad.length != 4))
- {
- return null;
- }
-
- byte[] addr = new byte[4];
-
- for (int i = 0; i < 4; i++)
- {
- int part = 0;
-
- if ((quad[i].length() == 0) || (quad[i].length() > 3))
- {
- return null;
- }
-
- for (int k = 0; k < quad[i].length(); k++)
- {
- char c = quad[i].charAt(k);
-
- /* No, Character.isDigit is not the same */
- if ((c < '0') || (c > '9'))
- {
- return null;
- }
-
- part = part * 10 + (c - '0');
- }
-
- if (part > 255) /* 300.1.2.3 is invalid =) */
- {
- return null;
- }
-
- addr[i] = (byte) part;
- }
-
- return InetAddress.getByAddress(host, addr);
- }
-
- public int getPacketOverheadEstimate()
- {
- return tc.getPacketOverheadEstimate();
- }
-
- public void setTcpNoDelay(boolean state) throws IOException
- {
- sock.setTcpNoDelay(state);
- }
-
- public void setSoTimeout(int timeout) throws IOException
- {
- sock.setSoTimeout(timeout);
- }
-
- public ConnectionInfo getConnectionInfo(int kexNumber) throws IOException
- {
- return km.getOrWaitForConnectionInfo(kexNumber);
- }
-
- public Throwable getReasonClosedCause()
- {
- synchronized (connectionSemaphore)
- {
- return reasonClosedCause;
- }
- }
-
- public byte[] getSessionIdentifier()
- {
- return km.sessionId;
- }
-
- public void close(Throwable cause, boolean useDisconnectPacket)
- {
- if (useDisconnectPacket == false)
- {
- /* OK, hard shutdown - do not aquire the semaphore,
- * perhaps somebody is inside (and waits until the remote
- * side is ready to accept new data). */
-
- try
- {
- sock.close();
- }
- catch (IOException ignore)
- {
- }
-
- /* OK, whoever tried to send data, should now agree that
- * there is no point in further waiting =)
- * It is safe now to aquire the semaphore.
- */
- }
-
- synchronized (connectionSemaphore)
- {
- if (connectionClosed == false)
- {
- if (useDisconnectPacket == true)
- {
- try
- {
- byte[] msg = new PacketDisconnect(Packets.SSH_DISCONNECT_BY_APPLICATION, cause.getMessage(), "")
- .getPayload();
- if (tc != null)
- {
- tc.sendMessage(msg);
- }
- }
- catch (IOException ignore)
- {
- }
-
- try
- {
- sock.close();
- }
- catch (IOException ignore)
- {
- }
- }
-
- connectionClosed = true;
- reasonClosedCause = cause; /* may be null */
- }
- connectionSemaphore.notifyAll();
- }
-
- /* No check if we need to inform the monitors */
-
- List<ConnectionMonitor> monitors = new Vector<ConnectionMonitor>();
-
- synchronized (this)
- {
- /* Short term lock to protect "connectionMonitors"
- * and "monitorsWereInformed"
- * (they may be modified concurrently)
- */
-
- if (monitorsWereInformed == false)
- {
- monitorsWereInformed = true;
- monitors.addAll(connectionMonitors);
- }
- }
-
- for (ConnectionMonitor cmon : monitors)
- {
- try
- {
- cmon.connectionLost(reasonClosedCause);
- }
- catch (Exception ignore)
- {
- }
- }
- }
-
- private static Socket establishConnection(String hostname, int port, ProxyData proxyData, int connectTimeout)
- throws IOException
- {
- /* See the comment for createInetAddress() */
-
- if (proxyData == null)
- {
- InetAddress addr = createInetAddress(hostname);
- Socket s = new Socket();
- s.connect(new InetSocketAddress(addr, port), connectTimeout);
- return s;
- }
-
- if (proxyData instanceof HTTPProxyData)
- {
- HTTPProxyData pd = (HTTPProxyData) proxyData;
-
- /* At the moment, we only support HTTP proxies */
-
- InetAddress addr = createInetAddress(pd.proxyHost);
- Socket s = new Socket();
- s.connect(new InetSocketAddress(addr, pd.proxyPort), connectTimeout);
-
- /* OK, now tell the proxy where we actually want to connect to */
-
- StringBuilder sb = new StringBuilder();
-
- sb.append("CONNECT ");
- sb.append(hostname);
- sb.append(':');
- sb.append(port);
- sb.append(" HTTP/1.0\r\n");
-
- if ((pd.proxyUser != null) && (pd.proxyPass != null))
- {
- String credentials = pd.proxyUser + ":" + pd.proxyPass;
- char[] encoded = Base64.encode(StringEncoder.GetBytes(credentials));
- sb.append("Proxy-Authorization: Basic ");
- sb.append(encoded);
- sb.append("\r\n");
- }
-
- if (pd.requestHeaderLines != null)
- {
- for (int i = 0; i < pd.requestHeaderLines.length; i++)
- {
- if (pd.requestHeaderLines[i] != null)
- {
- sb.append(pd.requestHeaderLines[i]);
- sb.append("\r\n");
- }
- }
- }
-
- sb.append("\r\n");
-
- OutputStream out = s.getOutputStream();
-
- out.write(StringEncoder.GetBytes(sb.toString()));
- out.flush();
-
- /* Now parse the HTTP response */
-
- byte[] buffer = new byte[1024];
- InputStream in = s.getInputStream();
-
- int len = ClientServerHello.readLineRN(in, buffer);
-
- String httpReponse = StringEncoder.GetString(buffer, 0, len);
-
- if (httpReponse.startsWith("HTTP/") == false)
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- /* "HTTP/1.X XYZ X" => 14 characters minimum */
-
- if ((httpReponse.length() < 14) || (httpReponse.charAt(8) != ' ') || (httpReponse.charAt(12) != ' '))
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- int errorCode = 0;
-
- try
- {
- errorCode = Integer.parseInt(httpReponse.substring(9, 12));
- }
- catch (NumberFormatException ignore)
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- if ((errorCode < 0) || (errorCode > 999))
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- if (errorCode != 200)
- {
- throw new HTTPProxyException(httpReponse.substring(13), errorCode);
- }
-
- /* OK, read until empty line */
-
- while (true)
- {
- len = ClientServerHello.readLineRN(in, buffer);
- if (len == 0)
- {
- break;
- }
- }
- return s;
- }
-
- throw new IOException("Unsupported ProxyData");
- }
-
- private void startReceiver() throws IOException
- {
- receiveThread = new Thread(new Runnable()
- {
- public void run()
- {
- try
- {
- receiveLoop();
- }
- catch (Exception e)
- {
- close(e, false);
-
- log.warning("Receive thread: error in receiveLoop: " + e.getMessage());
- }
-
- if (log.isDebugEnabled())
- {
- log.debug("Receive thread: back from receiveLoop");
- }
-
- /* Tell all handlers that it is time to say goodbye */
-
- if (km != null)
- {
- try
- {
- km.handleMessage(null, 0);
- }
- catch (IOException ignored)
- {
- }
- }
-
- for (HandlerEntry he : messageHandlers)
- {
- try
- {
- he.mh.handleMessage(null, 0);
- }
- catch (Exception ignore)
- {
- }
- }
- }
- });
-
- receiveThread.setDaemon(true);
- receiveThread.start();
- }
-
- public void clientInit(Socket socket, String softwareversion, CryptoWishList cwl,
- ServerHostKeyVerifier verifier, DHGexParameters dhgex, SecureRandom rnd) throws IOException
- {
- /* First, establish the TCP connection to the SSH-2 server */
-
- sock = socket;
-
- /* Parse the server line and say hello - important: this information is later needed for the
- * key exchange (to stop man-in-the-middle attacks) - that is why we wrap it into an object
- * for later use.
- */
-
- ClientServerHello csh = ClientServerHello.clientHello(softwareversion, sock.getInputStream(),
- sock.getOutputStream());
-
- tc = new TransportConnection(sock.getInputStream(), sock.getOutputStream(), rnd);
- String hostname = sock.getInetAddress().getHostName();
- int port = sock.getPort();
-
- km = new ClientKexManager(this, csh, cwl, hostname, port, verifier, rnd);
- km.initiateKEX(cwl, dhgex, null, null);
-
- startReceiver();
- }
-
- public void clientInit(String hostname, int port, String softwareversion, CryptoWishList cwl,
- ServerHostKeyVerifier verifier, DHGexParameters dhgex, int connectTimeout, SecureRandom rnd,
- ProxyData proxyData) throws IOException
- {
- /* First, establish the TCP connection to the SSH-2 server */
-
- sock = establishConnection(hostname, port, proxyData, connectTimeout);
-
- /* Parse the server line and say hello - important: this information is later needed for the
- * key exchange (to stop man-in-the-middle attacks) - that is why we wrap it into an object
- * for later use.
- */
-
- ClientServerHello csh = ClientServerHello.clientHello(softwareversion, sock.getInputStream(),
- sock.getOutputStream());
-
- tc = new TransportConnection(sock.getInputStream(), sock.getOutputStream(), rnd);
-
- km = new ClientKexManager(this, csh, cwl, hostname, port, verifier, rnd);
- km.initiateKEX(cwl, dhgex, null, null);
-
- startReceiver();
- }
-
- public void serverInit(ServerConnectionState state) throws IOException
- {
- /* TCP connection is already established */
-
- this.sock = state.s;
-
- /* Parse the client line and say hello - important: this information is later needed for the
- * key exchange (to stop man-in-the-middle attacks) - that is why we wrap it into an object
- * for later use.
- */
-
- state.csh = ClientServerHello.serverHello(state.softwareversion, sock.getInputStream(), sock.getOutputStream());
-
- tc = new TransportConnection(sock.getInputStream(), sock.getOutputStream(), state.generator);
-
- km = new ServerKexManager(state);
- km.initiateKEX(state.next_cryptoWishList, null, state.next_dsa_key, state.next_rsa_key);
-
- startReceiver();
- }
-
- public void registerMessageHandler(MessageHandler mh, int low, int high)
- {
- HandlerEntry he = new HandlerEntry();
- he.mh = mh;
- he.low = low;
- he.high = high;
-
- synchronized (messageHandlers)
- {
- messageHandlers.add(he);
- }
- }
-
- public void removeMessageHandler(MessageHandler mh, int low, int high)
- {
- synchronized (messageHandlers)
- {
- for (int i = 0; i < messageHandlers.size(); i++)
- {
- HandlerEntry he = messageHandlers.get(i);
- if ((he.mh == mh) && (he.low == low) && (he.high == high))
- {
- messageHandlers.remove(i);
- break;
- }
- }
- }
- }
-
- public void sendKexMessage(byte[] msg) throws IOException
- {
- synchronized (connectionSemaphore)
- {
- if (connectionClosed)
- {
- throw (IOException) new IOException("Sorry, this connection is closed.").initCause(reasonClosedCause);
- }
-
- flagKexOngoing = true;
-
- try
- {
- tc.sendMessage(msg);
- }
- catch (IOException e)
- {
- close(e, false);
- throw e;
- }
- }
- }
-
- public void kexFinished() throws IOException
- {
- synchronized (connectionSemaphore)
- {
- flagKexOngoing = false;
- connectionSemaphore.notifyAll();
- }
- }
-
- /**
- *
- * @param cwl
- * @param dhgex
- * @param dsa may be null if this is a client connection
- * @param rsa may be null if this is a client connection
- * @throws IOException
- */
- public void forceKeyExchange(CryptoWishList cwl, DHGexParameters dhgex, DSAPrivateKey dsa, RSAPrivateKey rsa)
- throws IOException
- {
- synchronized (connectionSemaphore)
- {
- if (connectionClosed)
- /* Inform the caller that there is no point in triggering a new kex */
- throw (IOException) new IOException("Sorry, this connection is closed.").initCause(reasonClosedCause);
- }
-
- km.initiateKEX(cwl, dhgex, dsa, rsa);
- }
-
- public void changeRecvCipher(BlockCipher bc, MAC mac)
- {
- tc.changeRecvCipher(bc, mac);
- }
-
- public void changeSendCipher(BlockCipher bc, MAC mac)
- {
- tc.changeSendCipher(bc, mac);
- }
-
- public void sendAsynchronousMessage(byte[] msg) throws IOException
- {
- sendAsynchronousMessage(msg, null);
- }
-
- public void sendAsynchronousMessage(byte[] msg, Runnable run) throws IOException
- {
- synchronized (asynchronousQueue)
- {
- asynchronousQueue.add(new AsynchronousEntry(msg, run));
- asynchronousPending = true;
-
- /* This limit should be flexible enough. We need this, otherwise the peer
- * can flood us with global requests (and other stuff where we have to reply
- * with an asynchronous message) and (if the server just sends data and does not
- * read what we send) this will probably put us in a low memory situation
- * (our send queue would grow and grow and...) */
-
- if (asynchronousQueue.size() > 100)
- {
- throw new IOException("Error: the peer is not consuming our asynchronous replies.");
- }
-
- /* Check if we have an asynchronous sending thread */
-
- if (asynchronousThread == null)
- {
- asynchronousThread = new AsynchronousWorker();
- asynchronousThread.setDaemon(true);
- asynchronousThread.start();
-
- /* The thread will stop after 2 seconds of inactivity (i.e., empty queue) */
- }
-
- asynchronousQueue.notifyAll();
- }
- }
-
- public void setConnectionMonitors(List<ConnectionMonitor> monitors)
- {
- synchronized (this)
- {
- connectionMonitors = new Vector<ConnectionMonitor>();
- connectionMonitors.addAll(monitors);
- }
- }
-
- /**
- * True if no response message expected.
- */
- private boolean idle;
-
- /**
- * Send a message but ensure that all queued messages are being sent first.
- *
- * @param msg
- * @throws IOException
- */
- public void sendMessage(byte[] msg) throws IOException
- {
- synchronized (asynchronousQueue)
- {
- while (asynchronousPending)
- {
- try
- {
- asynchronousQueue.wait(1000);
- }
- catch (InterruptedException e)
- {
- }
- }
- }
-
- sendMessageImmediate(msg);
- }
-
- /**
- * Send message, ignore queued async messages that have not been delivered yet.
- * Will be called directly from the asynchronousThread thread.
- *
- * @param msg
- * @throws IOException
- */
- public void sendMessageImmediate(byte[] msg) throws IOException
- {
- if (Thread.currentThread() == receiveThread)
- {
- throw new IOException("Assertion error: sendMessage may never be invoked by the receiver thread!");
- }
-
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (connectionSemaphore)
- {
- while (true)
- {
- if (connectionClosed)
- {
- throw (IOException) new IOException("Sorry, this connection is closed.")
- .initCause(reasonClosedCause);
- }
-
- if (flagKexOngoing == false)
- {
- break;
- }
-
- try
- {
- connectionSemaphore.wait();
- }
- catch (InterruptedException e)
- {
- wasInterrupted = true;
- }
- }
-
- try
- {
- tc.sendMessage(msg);
- idle = false;
- }
- catch (IOException e)
- {
- close(e, false);
- throw e;
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public void receiveLoop() throws IOException
- {
- byte[] msg = new byte[35000];
-
- while (true)
- {
- int msglen;
- try
- {
- msglen = tc.receiveMessage(msg, 0, msg.length);
- }
- catch (SocketTimeoutException e)
- {
- // Timeout in read
- if (idle)
- {
- log.debug("Ignoring socket timeout");
- continue;
- }
- throw e;
- }
- idle = true;
-
- int type = msg[0] & 0xff;
-
- if (type == Packets.SSH_MSG_IGNORE)
- {
- continue;
- }
-
- if (type == Packets.SSH_MSG_DEBUG)
- {
- if (log.isDebugEnabled())
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
- tr.readByte();
- tr.readBoolean();
- StringBuilder debugMessageBuffer = new StringBuilder();
- debugMessageBuffer.append(tr.readString("UTF-8"));
-
- for (int i = 0; i < debugMessageBuffer.length(); i++)
- {
- char c = debugMessageBuffer.charAt(i);
-
- if ((c >= 32) && (c <= 126))
- {
- continue;
- }
- debugMessageBuffer.setCharAt(i, '\uFFFD');
- }
-
- log.debug("DEBUG Message from remote: '" + debugMessageBuffer.toString() + "'");
- }
- continue;
- }
-
- if (type == Packets.SSH_MSG_UNIMPLEMENTED)
- {
- throw new IOException("Peer sent UNIMPLEMENTED message, that should not happen.");
- }
-
- if (type == Packets.SSH_MSG_DISCONNECT)
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
- tr.readByte();
- int reason_code = tr.readUINT32();
- StringBuilder reasonBuffer = new StringBuilder();
- reasonBuffer.append(tr.readString("UTF-8"));
-
- /*
- * Do not get fooled by servers that send abnormal long error
- * messages
- */
-
- if (reasonBuffer.length() > 255)
- {
- reasonBuffer.setLength(255);
- reasonBuffer.setCharAt(254, '.');
- reasonBuffer.setCharAt(253, '.');
- reasonBuffer.setCharAt(252, '.');
- }
-
- /*
- * Also, check that the server did not send characters that may
- * screw up the receiver -> restrict to reasonable US-ASCII
- * subset -> "printable characters" (ASCII 32 - 126). Replace
- * all others with 0xFFFD (UNICODE replacement character).
- */
-
- for (int i = 0; i < reasonBuffer.length(); i++)
- {
- char c = reasonBuffer.charAt(i);
-
- if ((c >= 32) && (c <= 126))
- {
- continue;
- }
- reasonBuffer.setCharAt(i, '\uFFFD');
- }
-
- throw new IOException("Peer sent DISCONNECT message (reason code " + reason_code + "): "
- + reasonBuffer.toString());
- }
-
- /*
- * Is it a KEX Packet?
- */
-
- if ((type == Packets.SSH_MSG_KEXINIT) || (type == Packets.SSH_MSG_NEWKEYS)
- || ((type >= 30) && (type <= 49)))
- {
- km.handleMessage(msg, msglen);
- continue;
- }
-
- MessageHandler mh = null;
-
- for (int i = 0; i < messageHandlers.size(); i++)
- {
- HandlerEntry he = messageHandlers.get(i);
- if ((he.low <= type) && (type <= he.high))
- {
- mh = he.mh;
- break;
- }
- }
-
- if (mh == null)
- {
- throw new IOException("Unexpected SSH message (type " + type + ")");
- }
-
- mh.handleMessage(msg, msglen);
- }
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <!-- Get some common settings for the project we are using it in -->
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../commons/thirdparty</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
-
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>com.sun.jersey.jersey-servlet</artifactId>
- <version>1.19.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.3.6</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Embed-Dependency>*;scope=!provided;type=!pom;inline=false</Embed-Dependency>
- <Embed-Transitive>false</Embed-Transitive>
- <Export-Package>
- com.sun.jersey.api.core.servlet,
- com.sun.jersey.spi.container.servlet,
- com.sun.jersey.spi.scanning.servlet,
- com.sun.jersey.server.impl.container.servlet
- </Export-Package>
- <Import-Package>
- com.sun.jersey.api.container,
- com.sun.jersey.api.core,
- com.sun.jersey.api.model,
- com.sun.jersey.api.representation,
- com.sun.jersey.api.uri,
- com.sun.jersey.api.view,
- com.sun.jersey.core.header,
- com.sun.jersey.core.reflection,
- com.sun.jersey.core.spi.component,
- com.sun.jersey.core.spi.component.ioc,
- com.sun.jersey.core.spi.scanning,
- com.sun.jersey.core.util,
- com.sun.jersey.server.impl,
- com.sun.jersey.server.impl.application,
- com.sun.jersey.server.impl.inject,
- com.sun.jersey.server.impl.monitoring,
- com.sun.jersey.server.probes,
- com.sun.jersey.server.spi.component,
- com.sun.jersey.spi,
- com.sun.jersey.spi.container,
- com.sun.jersey.spi.dispatch,
- com.sun.jersey.spi.inject,
- com.sun.jersey.spi.service,
- com.sun.jersey.spi.template,
- javax.naming,
- javax.ws.rs,
- javax.ws.rs.core,
- javax.ws.rs.ext,
- *;resolution:=optional
- </Import-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
-
- <dependencies>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-servlet</artifactId>
- <version>1.17</version>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>javax.servlet</artifactId>
- <version>3.0.0.v201112011016</version>
- <scope>provided</scope>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <!-- Get some common settings for the project we are using it in -->
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../commons/thirdparty</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
-
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>net.sf.jung2</artifactId>
- <version>2.1.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.3.6</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Embed-Dependency>*;scope=compile|runtime;type=!pom;inline=false</Embed-Dependency>
- <Embed-Transitive>false</Embed-Transitive>
- <Export-Package>
- org.apache.commons*,
- edu.uci.ics.jung.algorithms.blockmodel,
- edu.uci.ics.jung.algorithms.cluster,
- edu.uci.ics.jung.algorithms.filters,
- edu.uci.ics.jung.algorithms.flows,
- edu.uci.ics.jung.algorithms.generators,
- edu.uci.ics.jung.algorithms.generators.random,
- edu.uci.ics.jung.algorithms.layout,
- edu.uci.ics.jung.algorithms.layout.util,
- edu.uci.ics.jung.algorithms.metrics,
- edu.uci.ics.jung.algorithms.scoring,
- edu.uci.ics.jung.algorithms.scoring.util,
- edu.uci.ics.jung.algorithms.shortestpath,
- edu.uci.ics.jung.algorithms.transformation,
- edu.uci.ics.jung.algorithms.util,
- edu.uci.ics.jung.graph;-split-package:=merge-first,
- edu.uci.ics.jung.graph.event,
- edu.uci.ics.jung.graph.util;-split-package:=merge-first
- </Export-Package>
- <Import-Package>
- !*
- </Import-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
-
- <dependencies>
- <dependency>
- <groupId>net.sf.jung</groupId>
- <artifactId>jung-api</artifactId>
- <version>2.0.1</version>
- </dependency>
- <dependency>
- <groupId>net.sf.jung</groupId>
- <artifactId>jung-graph-impl</artifactId>
- <version>2.0.1</version>
- </dependency>
- <dependency>
- <groupId>net.sourceforge.collections</groupId>
- <artifactId>collections-generic</artifactId>
- <version>4.01</version>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2004, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- * Created on Jan 28, 2004
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.blockmodel;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.CollectionUtils;
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * Identifies sets of structurally equivalent vertices in a graph. Vertices <i>
- * i</i> and <i>j</i> are structurally equivalent iff the set of <i>i</i>'s
- * neighbors is identical to the set of <i>j</i>'s neighbors, with the
- * exception of <i>i</i> and <i>j</i> themselves. This algorithm finds all
- * sets of equivalent vertices in O(V^2) time.
- *
- * <p>You can extend this class to have a different definition of equivalence (by
- * overriding <code>isStructurallyEquivalent</code>), and may give it hints for
- * accelerating the process by overriding <code>canPossiblyCompare</code>.
- * (For example, in a bipartite graph, <code>canPossiblyCompare</code> may
- * return <code>false</code> for vertices in
- * different partitions. This function should be fast.)
- *
- * @author Danyel Fisher
- */
-public class StructurallyEquivalent<V,E> implements Transformer<Graph<V,E>, VertexPartition<V,E>>
-{
- public VertexPartition<V,E> transform(Graph<V,E> g)
- {
- Set<Pair<V>> vertex_pairs = getEquivalentPairs(g);
-
- Set<Set<V>> rv = new HashSet<Set<V>>();
- Map<V, Set<V>> intermediate = new HashMap<V, Set<V>>();
- for (Pair<V> p : vertex_pairs)
- {
- Set<V> res = intermediate.get(p.getFirst());
- if (res == null)
- res = intermediate.get(p.getSecond());
- if (res == null) // we haven't seen this one before
- res = new HashSet<V>();
- res.add(p.getFirst());
- res.add(p.getSecond());
- intermediate.put(p.getFirst(), res);
- intermediate.put(p.getSecond(), res);
- }
- rv.addAll(intermediate.values());
-
- // pick up the vertices which don't appear in intermediate; they are
- // singletons (equivalence classes of size 1)
- Collection<V> singletons = CollectionUtils.subtract(g.getVertices(),
- intermediate.keySet());
- for (V v : singletons)
- {
- Set<V> v_set = Collections.singleton(v);
- intermediate.put(v, v_set);
- rv.add(v_set);
- }
-
- return new VertexPartition<V, E>(g, intermediate, rv);
- }
-
- /**
- * For each vertex pair v, v1 in G, checks whether v and v1 are fully
- * equivalent: meaning that they connect to the exact same vertices. (Is
- * this regular equivalence, or whathaveyou?)
- *
- * Returns a Set of Pairs of vertices, where all the vertices in the inner
- * Pairs are equivalent.
- *
- * @param g
- */
- protected Set<Pair<V>> getEquivalentPairs(Graph<V,?> g) {
-
- Set<Pair<V>> rv = new HashSet<Pair<V>>();
- Set<V> alreadyEquivalent = new HashSet<V>();
-
- List<V> l = new ArrayList<V>(g.getVertices());
-
- for (V v1 : l)
- {
- if (alreadyEquivalent.contains(v1))
- continue;
-
- for (Iterator<V> iterator = l.listIterator(l.indexOf(v1) + 1); iterator.hasNext();) {
- V v2 = iterator.next();
-
- if (alreadyEquivalent.contains(v2))
- continue;
-
- if (!canPossiblyCompare(v1, v2))
- continue;
-
- if (isStructurallyEquivalent(g, v1, v2)) {
- Pair<V> p = new Pair<V>(v1, v2);
- alreadyEquivalent.add(v2);
- rv.add(p);
- }
- }
- }
-
- return rv;
- }
-
- /**
- * Checks whether a pair of vertices are structurally equivalent.
- * Specifically, whether v1's predecessors are equal to v2's predecessors,
- * and same for successors.
- *
- * @param g the graph in which the structural equivalence comparison is to take place
- * @param v1 the vertex to check for structural equivalence to v2
- * @param v2 the vertex to check for structural equivalence to v1
- */
- protected boolean isStructurallyEquivalent(Graph<V,?> g, V v1, V v2) {
-
- if( g.degree(v1) != g.degree(v2)) {
- return false;
- }
-
- Set<V> n1 = new HashSet<V>(g.getPredecessors(v1));
- n1.remove(v2);
- n1.remove(v1);
- Set<V> n2 = new HashSet<V>(g.getPredecessors(v2));
- n2.remove(v1);
- n2.remove(v2);
-
- Set<V> o1 = new HashSet<V>(g.getSuccessors(v1));
- Set<V> o2 = new HashSet<V>(g.getSuccessors(v2));
- o1.remove(v1);
- o1.remove(v2);
- o2.remove(v1);
- o2.remove(v2);
-
- // this neglects self-loops and directed edges from 1 to other
- boolean b = (n1.equals(n2) && o1.equals(o2));
- if (!b)
- return b;
-
- // if there's a directed edge v1->v2 then there's a directed edge v2->v1
- b &= ( g.isSuccessor(v1, v2) == g.isSuccessor(v2, v1));
-
- // self-loop check
- b &= ( g.isSuccessor(v1, v1) == g.isSuccessor(v2, v2));
-
- return b;
-
- }
-
- /**
- * This is a space for optimizations. For example, for a bipartite graph,
- * vertices from different partitions cannot possibly be compared.
- *
- * @param v1
- * @param v2
- */
- protected boolean canPossiblyCompare(V v1, V v2) {
- return true;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Feb 3, 2004
- */
-package edu.uci.ics.jung.algorithms.blockmodel;
-
-import java.util.*;
-
-import edu.uci.ics.jung.graph.Graph;
-
-
-/**
- * Maintains information about a vertex partition of a graph.
- * This can be built from a map from vertices to vertex sets
- * or from a collection of (disjoint) vertex sets,
- * such as those created by various clustering methods.
- */
-public class VertexPartition<V,E>
-{
- private Map<V,Set<V>> vertex_partition_map;
- private Collection<Set<V>> vertex_sets;
- private Graph<V,E> graph;
-
- /**
- * Creates an instance based on the specified graph and mapping from vertices
- * to vertex sets, and generates a set of partitions based on this mapping.
- * @param g the graph over which the vertex partition is defined
- * @param partition_map the mapping from vertices to vertex sets (partitions)
- */
- public VertexPartition(Graph<V,E> g, Map<V, Set<V>> partition_map)
- {
- this.vertex_partition_map = Collections.unmodifiableMap(partition_map);
- this.graph = g;
- }
-
- /**
- * Creates an instance based on the specified graph, vertex-set mapping,
- * and set of disjoint vertex sets. The vertex-set mapping and vertex
- * partitions must be consistent; that is, the mapping must reflect the
- * division of vertices into partitions, and each vertex must appear in
- * exactly one partition.
- * @param g the graph over which the vertex partition is defined
- * @param partition_map the mapping from vertices to vertex sets (partitions)
- * @param vertex_sets the set of disjoint vertex sets
- */
- public VertexPartition(Graph<V,E> g, Map<V, Set<V>> partition_map,
- Collection<Set<V>> vertex_sets)
- {
- this.vertex_partition_map = Collections.unmodifiableMap(partition_map);
- this.vertex_sets = vertex_sets;
- this.graph = g;
- }
-
- /**
- * Creates an instance based on the specified graph and set of disjoint vertex sets,
- * and generates a vertex-to-partition map based on these sets.
- * @param g the graph over which the vertex partition is defined
- * @param vertex_sets the set of disjoint vertex sets
- */
- public VertexPartition(Graph<V,E> g, Collection<Set<V>> vertex_sets)
- {
- this.vertex_sets = vertex_sets;
- this.graph = g;
- }
-
- /**
- * Returns the graph on which the partition is defined.
- * @return the graph on which the partition is defined
- */
- public Graph<V,E> getGraph()
- {
- return graph;
- }
-
- /**
- * Returns a map from each vertex in the input graph to its partition.
- * This map is generated if it does not already exist.
- * @return a map from each vertex in the input graph to a vertex set
- */
- public Map<V,Set<V>> getVertexToPartitionMap()
- {
- if (vertex_partition_map == null)
- {
- this.vertex_partition_map = new HashMap<V, Set<V>>();
- for (Set<V> set : this.vertex_sets)
- for (V v : set)
- this.vertex_partition_map.put(v, set);
- }
- return vertex_partition_map;
- }
-
- /**
- * Returns a collection of vertex sets, where each vertex in the
- * input graph is in exactly one set.
- * This collection is generated based on the vertex-to-partition map
- * if it does not already exist.
- * @return a collection of vertex sets such that each vertex in the
- * instance's graph is in exactly one set
- */
- public Collection<Set<V>> getVertexPartitions()
- {
- if (vertex_sets == null)
- {
- this.vertex_sets = new HashSet<Set<V>>();
- this.vertex_sets.addAll(vertex_partition_map.values());
- }
- return vertex_sets;
- }
-
- /**
- * Returns the number of partitions.
- */
- public int numPartitions()
- {
- return vertex_sets.size();
- }
-
- @Override
- public String toString()
- {
- return "Partitions: " + vertex_partition_map;
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Support for establishing and maintaining graph element equivalence (such as in blockmodeling).
-<P/>
-In blockmodeling, groups of vertices are clustered together by similarity
-(as if members of a "block" appearing on the diagonal of the graph's adjacency
-matrix).
-<p/>
-This support currently includes:
-<ul>
-<li/><code>VertexPartition</code>: A class that maintains information on a
-division of the vertices of a graph into disjoint sets.
-<li/><code>StructurallyEquivalent</code>: An algorithm that finds sets of vertices that are
-structurally equivalent.
-</ul>
-
-<p/>
-</body>
-</html>
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.cluster;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.UndirectedGraph;
-
-/**
- * Finds all biconnected components (bicomponents) of an undirected graph.
- * A graph is a biconnected component if
- * at least 2 vertices must be removed in order to disconnect the graph. (Graphs
- * consisting of one vertex, or of two connected vertices, are also biconnected.) Biconnected
- * components of three or more vertices have the property that every pair of vertices in the component
- * are connected by two or more vertex-disjoint paths.
- * <p>
- * Running time: O(|V| + |E|) where |V| is the number of vertices and |E| is the number of edges
- * @see "Depth first search and linear graph algorithms by R. E. Tarjan (1972), SIAM J. Comp."
- *
- * @author Joshua O'Madadhain
- */
-public class BicomponentClusterer<V,E> implements Transformer<UndirectedGraph<V,E>, Set<Set<V>>>
-{
- protected Map<V,Number> dfs_num;
- protected Map<V,Number> high;
- protected Map<V,V> parents;
- protected Stack<E> stack;
- protected int converse_depth;
-
- /**
- * Constructs a new bicomponent finder
- */
- public BicomponentClusterer() {
- }
-
- /**
- * Extracts the bicomponents from the graph.
- * @param theGraph the graph whose bicomponents are to be extracted
- * @return the <code>ClusterSet</code> of bicomponents
- */
- public Set<Set<V>> transform(UndirectedGraph<V,E> theGraph)
- {
- Set<Set<V>> bicomponents = new LinkedHashSet<Set<V>>();
-
- if (theGraph.getVertices().isEmpty())
- return bicomponents;
-
- // initialize DFS number for each vertex to 0
- dfs_num = new HashMap<V,Number>();
- for (V v : theGraph.getVertices())
- {
- dfs_num.put(v, 0);
- }
-
- for (V v : theGraph.getVertices())
- {
- if (dfs_num.get(v).intValue() == 0) // if we haven't hit this vertex yet...
- {
- high = new HashMap<V,Number>();
- stack = new Stack<E>();
- parents = new HashMap<V,V>();
- converse_depth = theGraph.getVertexCount();
- // find the biconnected components for this subgraph, starting from v
- findBiconnectedComponents(theGraph, v, bicomponents);
-
- // if we only visited one vertex, this method won't have
- // ID'd it as a biconnected component, so mark it as one
- if (theGraph.getVertexCount() - converse_depth == 1)
- {
- Set<V> s = new HashSet<V>();
- s.add(v);
- bicomponents.add(s);
- }
- }
- }
-
- return bicomponents;
- }
-
- /**
- * <p>Stores, in <code>bicomponents</code>, all the biconnected
- * components that are reachable from <code>v</code>.</p>
- *
- * <p>The algorithm basically proceeds as follows: do a depth-first
- * traversal starting from <code>v</code>, marking each vertex with
- * a value that indicates the order in which it was encountered (dfs_num),
- * and with
- * a value that indicates the highest point in the DFS tree that is known
- * to be reachable from this vertex using non-DFS edges (high). (Since it
- * is measured on non-DFS edges, "high" tells you how far back in the DFS
- * tree you can reach by two distinct paths, hence biconnectivity.)
- * Each time a new vertex w is encountered, push the edge just traversed
- * on a stack, and call this method recursively. If w.high is no greater than
- * v.dfs_num, then the contents of the stack down to (v,w) is a
- * biconnected component (and v is an articulation point, that is, a
- * component boundary). In either case, set v.high to max(v.high, w.high),
- * and continue. If w has already been encountered but is
- * not v's parent, set v.high max(v.high, w.dfs_num) and continue.
- *
- * <p>(In case anyone cares, the version of this algorithm on p. 224 of
- * Udi Manber's "Introduction to Algorithms: A Creative Approach" seems to be
- * wrong: the stack should be initialized outside this method,
- * (v,w) should only be put on the stack if w hasn't been seen already,
- * and there's no real benefit to putting v on the stack separately: just
- * check for (v,w) on the stack rather than v. Had I known this, I could
- * have saved myself a few days. JRTOM)</p>
- *
- */
- protected void findBiconnectedComponents(UndirectedGraph<V,E> g, V v, Set<Set<V>> bicomponents)
- {
- int v_dfs_num = converse_depth;
- dfs_num.put(v, v_dfs_num);
- converse_depth--;
- high.put(v, v_dfs_num);
-
- for (V w : g.getNeighbors(v))
- {
- int w_dfs_num = dfs_num.get(w).intValue();//get(w, dfs_num);
- E vw = g.findEdge(v,w);
- if (w_dfs_num == 0) // w hasn't yet been visited
- {
- parents.put(w, v); // v is w's parent in the DFS tree
- stack.push(vw);
- findBiconnectedComponents(g, w, bicomponents);
- int w_high = high.get(w).intValue();//get(w, high);
- if (w_high <= v_dfs_num)
- {
- // v disconnects w from the rest of the graph,
- // i.e., v is an articulation point
- // thus, everything between the top of the stack and
- // v is part of a single biconnected component
- Set<V> bicomponent = new HashSet<V>();
- E e;
- do
- {
- e = stack.pop();
- bicomponent.addAll(g.getIncidentVertices(e));
- }
- while (e != vw);
- bicomponents.add(bicomponent);
- }
- high.put(v, Math.max(w_high, high.get(v).intValue()));
- }
- else if (w != parents.get(v)) // (v,w) is a back or a forward edge
- high.put(v, Math.max(w_dfs_num, high.get(v).intValue()));
- }
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.cluster;
-
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.BetweennessCentrality;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-
-/**
- * An algorithm for computing clusters (community structure) in graphs based on edge betweenness.
- * The betweenness of an edge is defined as the extent to which that edge lies along
- * shortest paths between all pairs of nodes.
- *
- * This algorithm works by iteratively following the 2 step process:
- * <ul>
- * <li> Compute edge betweenness for all edges in current graph
- * <li> Remove edge with highest betweenness
- * </ul>
- * <p>
- * Running time is: O(kmn) where k is the number of edges to remove, m is the total number of edges, and
- * n is the total number of vertices. For very sparse graphs the running time is closer to O(kn^2) and for
- * graphs with strong community structure, the complexity is even lower.
- * <p>
- * This algorithm is a slight modification of the algorithm discussed below in that the number of edges
- * to be removed is parameterized.
- * @author Scott White
- * @author Tom Nelson (converted to jung2)
- * @see "Community structure in social and biological networks by Michelle Girvan and Mark Newman"
- */
-public class EdgeBetweennessClusterer<V,E> implements Transformer<Graph<V,E>,Set<Set<V>>> {
- private int mNumEdgesToRemove;
- private Map<E, Pair<V>> edges_removed;
-
- /**
- * Constructs a new clusterer for the specified graph.
- * @param numEdgesToRemove the number of edges to be progressively removed from the graph
- */
- public EdgeBetweennessClusterer(int numEdgesToRemove) {
- mNumEdgesToRemove = numEdgesToRemove;
- edges_removed = new LinkedHashMap<E, Pair<V>>();
- }
-
- /**
- * Finds the set of clusters which have the strongest "community structure".
- * The more edges removed the smaller and more cohesive the clusters.
- * @param graph the graph
- */
- public Set<Set<V>> transform(Graph<V,E> graph) {
-
- if (mNumEdgesToRemove < 0 || mNumEdgesToRemove > graph.getEdgeCount()) {
- throw new IllegalArgumentException("Invalid number of edges passed in.");
- }
-
- edges_removed.clear();
-
- for (int k=0;k<mNumEdgesToRemove;k++) {
- BetweennessCentrality<V,E> bc = new BetweennessCentrality<V,E>(graph);
- E to_remove = null;
- double score = 0;
- for (E e : graph.getEdges())
- if (bc.getEdgeScore(e) > score)
- {
- to_remove = e;
- score = bc.getEdgeScore(e);
- }
- edges_removed.put(to_remove, graph.getEndpoints(to_remove));
- graph.removeEdge(to_remove);
- }
-
- WeakComponentClusterer<V,E> wcSearch = new WeakComponentClusterer<V,E>();
- Set<Set<V>> clusterSet = wcSearch.transform(graph);
-
- for (Map.Entry<E, Pair<V>> entry : edges_removed.entrySet())
- {
- Pair<V> endpoints = entry.getValue();
- graph.addEdge(entry.getKey(), endpoints.getFirst(), endpoints.getSecond());
- }
- return clusterSet;
- }
-
- /**
- * Retrieves the list of all edges that were removed
- * (assuming extract(...) was previously called).
- * The edges returned
- * are stored in order in which they were removed.
- *
- * @return the edges in the original graph
- */
- public List<E> getEdgesRemoved()
- {
- return new ArrayList<E>(edges_removed.keySet());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2004, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Aug 12, 2004
- */
-package edu.uci.ics.jung.algorithms.cluster;
-
-import edu.uci.ics.jung.algorithms.scoring.VoltageScorer;
-import edu.uci.ics.jung.algorithms.util.DiscreteDistribution;
-import edu.uci.ics.jung.algorithms.util.KMeansClusterer;
-import edu.uci.ics.jung.algorithms.util.KMeansClusterer.NotEnoughClustersException;
-import edu.uci.ics.jung.graph.Graph;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-/**
- * <p>Clusters vertices of a <code>Graph</code> based on their ranks as
- * calculated by <code>VoltageScorer</code>. This algorithm is based on,
- * but not identical with, the method described in the paper below.
- * The primary difference is that Wu and Huberman assume a priori that the clusters
- * are of approximately the same size, and therefore use a more complex
- * method than k-means (which is used here) for determining cluster
- * membership based on co-occurrence data.</p>
- *
- * <p>The algorithm proceeds as follows:
- * <ul>
- * <li/>first, generate a set of candidate clusters as follows:
- * <ul>
- * <li/>pick (widely separated) vertex pair, run VoltageScorer
- * <li/>group the vertices in two clusters according to their voltages
- * <li/>store resulting candidate clusters
- * </ul>
- * <li/>second, generate k-1 clusters as follows:
- * <ul>
- * <li/>pick a vertex v as a cluster 'seed'
- * <br>(Wu/Huberman: most frequent vertex in candidate clusters)
- * <li/>calculate co-occurrence over all candidate clusters of v with each other
- * vertex
- * <li/>separate co-occurrence counts into high/low;
- * high vertices constitute a cluster
- * <li/>remove v's vertices from candidate clusters; continue
- * </ul>
- * <li/>finally, remaining unassigned vertices are assigned to the kth ("garbage")
- * cluster.
- * </ul></p>
- *
- * <p><b>NOTE</b>: Depending on how the co-occurrence data splits the data into
- * clusters, the number of clusters returned by this algorithm may be less than the
- * number of clusters requested. The number of clusters will never be more than
- * the number requested, however.</p>
- *
- * @author Joshua O'Madadhain
- * @see "'Finding communities in linear time: a physics approach', Fang Wu and Bernardo Huberman, http://www.hpl.hp.com/research/idl/papers/linear/"
- * @see VoltageScorer
- * @see KMeansClusterer
- */
-public class VoltageClusterer<V,E>
-{
- protected int num_candidates;
- protected KMeansClusterer<V> kmc;
- protected Random rand;
- protected Graph<V,E> g;
-
- /**
- * Creates an instance of a VoltageCluster with the specified parameters.
- * These are mostly parameters that are passed directly to VoltageScorer
- * and KMeansClusterer.
- *
- * @param num_candidates the number of candidate clusters to create
- */
- public VoltageClusterer(Graph<V,E> g, int num_candidates)
- {
- if (num_candidates < 1)
- throw new IllegalArgumentException("must generate >=1 candidates");
-
- this.num_candidates = num_candidates;
- this.kmc = new KMeansClusterer<V>();
- rand = new Random();
- this.g = g;
- }
-
- protected void setRandomSeed(int random_seed)
- {
- rand = new Random(random_seed);
- }
-
- /**
- * Returns a community (cluster) centered around <code>v</code>.
- * @param v the vertex whose community we wish to discover
- */
- public Collection<Set<V>> getCommunity(V v)
- {
- return cluster_internal(v, 2);
- }
-
- /**
- * Clusters the vertices of <code>g</code> into
- * <code>num_clusters</code> clusters, based on their connectivity.
- * @param num_clusters the number of clusters to identify
- */
- public Collection<Set<V>> cluster(int num_clusters)
- {
- return cluster_internal(null, num_clusters);
- }
-
- /**
- * Does the work of <code>getCommunity</code> and <code>cluster</code>.
- * @param origin the vertex around which clustering is to be done
- * @param num_clusters the (maximum) number of clusters to find
- */
- protected Collection<Set<V>> cluster_internal(V origin, int num_clusters)
- {
- // generate candidate clusters
- // repeat the following 'samples' times:
- // * pick (widely separated) vertex pair, run VoltageScorer
- // * use k-means to identify 2 communities in ranked graph
- // * store resulting candidate communities
- ArrayList<V> v_array = new ArrayList<V>(g.getVertices());
-
- LinkedList<Set<V>> candidates = new LinkedList<Set<V>>();
-
- for (int j = 0; j < num_candidates; j++)
- {
- V source;
- if (origin == null)
- source = v_array.get((int)(rand.nextDouble() * v_array.size()));
- else
- source = origin;
- V target = null;
- do
- {
- target = v_array.get((int)(rand.nextDouble() * v_array.size()));
- }
- while (source == target);
- VoltageScorer<V,E> vs = new VoltageScorer<V,E>(g, source, target);
- vs.evaluate();
-
- Map<V, double[]> voltage_ranks = new HashMap<V, double[]>();
- for (V v : g.getVertices())
- voltage_ranks.put(v, new double[] {vs.getVertexScore(v)});
-
-// addOneCandidateCluster(candidates, voltage_ranks);
- addTwoCandidateClusters(candidates, voltage_ranks);
- }
-
- // repeat the following k-1 times:
- // * pick a vertex v as a cluster seed
- // (Wu/Huberman: most frequent vertex in candidates)
- // * calculate co-occurrence (in candidate clusters)
- // of this vertex with all others
- // * use k-means to separate co-occurrence counts into high/low;
- // high vertices are a cluster
- // * remove v's vertices from candidate clusters
-
- Collection<Set<V>> clusters = new LinkedList<Set<V>>();
- Set<V> remaining = new HashSet<V>(g.getVertices());
-
- List<V> seed_candidates = getSeedCandidates(candidates);
- int seed_index = 0;
-
- for (int j = 0; j < (num_clusters - 1); j++)
- {
- if (remaining.isEmpty())
- break;
-
- V seed;
- if (seed_index == 0 && origin != null)
- seed = origin;
- else
- {
- do { seed = seed_candidates.get(seed_index++); }
- while (!remaining.contains(seed));
- }
-
- Map<V, double[]> occur_counts = getObjectCounts(candidates, seed);
- if (occur_counts.size() < 2)
- break;
-
- // now that we have the counts, cluster them...
- try
- {
- Collection<Map<V, double[]>> high_low = kmc.cluster(occur_counts, 2);
- // ...get the cluster with the highest-valued centroid...
- Iterator<Map<V, double[]>> h_iter = high_low.iterator();
- Map<V, double[]> cluster1 = h_iter.next();
- Map<V, double[]> cluster2 = h_iter.next();
- double[] centroid1 = DiscreteDistribution.mean(cluster1.values());
- double[] centroid2 = DiscreteDistribution.mean(cluster2.values());
- Set<V> new_cluster;
- if (centroid1[0] >= centroid2[0])
- new_cluster = cluster1.keySet();
- else
- new_cluster = cluster2.keySet();
-
- // ...remove the elements of new_cluster from each candidate...
- for (Set<V> cluster : candidates)
- cluster.removeAll(new_cluster);
- clusters.add(new_cluster);
- remaining.removeAll(new_cluster);
- }
- catch (NotEnoughClustersException nece)
- {
- // all remaining vertices are in the same cluster
- break;
- }
- }
-
- // identify remaining vertices (if any) as a 'garbage' cluster
- if (!remaining.isEmpty())
- clusters.add(remaining);
-
- return clusters;
- }
-
- /**
- * Do k-means with three intervals and pick the
- * smaller two clusters (presumed to be on the ends); this is closer to the Wu-Huberman method.
- * @param candidates
- * @param voltage_ranks
- */
- protected void addTwoCandidateClusters(LinkedList<Set<V>> candidates,
- Map<V, double[]> voltage_ranks)
- {
- try
- {
- List<Map<V, double[]>> clusters = new ArrayList<Map<V, double[]>>(kmc.cluster(voltage_ranks, 3));
- boolean b01 = clusters.get(0).size() > clusters.get(1).size();
- boolean b02 = clusters.get(0).size() > clusters.get(2).size();
- boolean b12 = clusters.get(1).size() > clusters.get(2).size();
- if (b01 && b02)
- {
- candidates.add(clusters.get(1).keySet());
- candidates.add(clusters.get(2).keySet());
- }
- else if (!b01 && b12)
- {
- candidates.add(clusters.get(0).keySet());
- candidates.add(clusters.get(2).keySet());
- }
- else if (!b02 && !b12)
- {
- candidates.add(clusters.get(0).keySet());
- candidates.add(clusters.get(1).keySet());
- }
- }
- catch (NotEnoughClustersException e)
- {
- // no valid candidates, continue
- }
- }
-
- /**
- * alternative to addTwoCandidateClusters(): cluster vertices by voltages into 2 clusters.
- * We only consider the smaller of the two clusters returned
- * by k-means to be a 'true' cluster candidate; the other is a garbage cluster.
- * @param candidates
- * @param voltage_ranks
- */
- protected void addOneCandidateCluster(LinkedList<Set<V>> candidates,
- Map<V, double[]> voltage_ranks)
- {
- try
- {
- List<Map<V, double[]>> clusters;
- clusters = new ArrayList<Map<V, double[]>>(kmc.cluster(voltage_ranks, 2));
- if (clusters.get(0).size() < clusters.get(1).size())
- candidates.add(clusters.get(0).keySet());
- else
- candidates.add(clusters.get(1).keySet());
- }
- catch (NotEnoughClustersException e)
- {
- // no valid candidates, continue
- }
- }
-
- /**
- * Returns an array of cluster seeds, ranked in decreasing order
- * of number of appearances in the specified collection of candidate
- * clusters.
- * @param candidates
- */
- protected List<V> getSeedCandidates(Collection<Set<V>> candidates)
- {
- final Map<V, double[]> occur_counts = getObjectCounts(candidates, null);
-
- ArrayList<V> occurrences = new ArrayList<V>(occur_counts.keySet());
- Collections.sort(occurrences, new MapValueArrayComparator(occur_counts));
-
- System.out.println("occurrences: ");
- for (int i = 0; i < occurrences.size(); i++)
- System.out.println(occur_counts.get(occurrences.get(i))[0]);
-
- return occurrences;
- }
-
- protected Map<V, double[]> getObjectCounts(Collection<Set<V>> candidates, V seed)
- {
- Map<V, double[]> occur_counts = new HashMap<V, double[]>();
- for (V v : g.getVertices())
- occur_counts.put(v, new double[]{0});
-
- for (Set<V> candidate : candidates)
- {
- if (seed == null)
- System.out.println(candidate.size());
- if (seed == null || candidate.contains(seed))
- {
- for (V element : candidate)
- {
- double[] count = occur_counts.get(element);
- count[0]++;
- }
- }
- }
-
- if (seed == null)
- {
- System.out.println("occur_counts size: " + occur_counts.size());
- for (V v : occur_counts.keySet())
- System.out.println(occur_counts.get(v)[0]);
- }
-
- return occur_counts;
- }
-
- protected class MapValueArrayComparator implements Comparator<V>
- {
- private Map<V, double[]> map;
-
- protected MapValueArrayComparator(Map<V, double[]> map)
- {
- this.map = map;
- }
-
- public int compare(V o1, V o2)
- {
- double[] count0 = map.get(o1);
- double[] count1 = map.get(o2);
- if (count0[0] < count1[0])
- return 1;
- else if (count0[0] > count1[0])
- return -1;
- return 0;
- }
-
- }
-
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.cluster;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.collections15.Buffer;
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.buffer.UnboundedFifoBuffer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-
-
-/**
- * Finds all weak components in a graph as sets of vertex sets. A weak component is defined as
- * a maximal subgraph in which all pairs of vertices in the subgraph are reachable from one
- * another in the underlying undirected subgraph.
- * <p>This implementation identifies components as sets of vertex sets.
- * To create the induced graphs from any or all of these vertex sets,
- * see <code>algorithms.filters.FilterUtils</code>.
- * <p>
- * Running time: O(|V| + |E|) where |V| is the number of vertices and |E| is the number of edges.
- * @author Scott White
- */
-public class WeakComponentClusterer<V,E> implements Transformer<Graph<V,E>, Set<Set<V>>>
-{
- /**
- * Extracts the weak components from a graph.
- * @param graph the graph whose weak components are to be extracted
- * @return the list of weak components
- */
- public Set<Set<V>> transform(Graph<V,E> graph) {
-
- Set<Set<V>> clusterSet = new HashSet<Set<V>>();
-
- HashSet<V> unvisitedVertices = new HashSet<V>(graph.getVertices());
-
- while (!unvisitedVertices.isEmpty()) {
- Set<V> cluster = new HashSet<V>();
- V root = unvisitedVertices.iterator().next();
- unvisitedVertices.remove(root);
- cluster.add(root);
-
- Buffer<V> queue = new UnboundedFifoBuffer<V>();
- queue.add(root);
-
- while (!queue.isEmpty()) {
- V currentVertex = queue.remove();
- Collection<V> neighbors = graph.getNeighbors(currentVertex);
-
- for(V neighbor : neighbors) {
- if (unvisitedVertices.contains(neighbor)) {
- queue.add(neighbor);
- unvisitedVertices.remove(neighbor);
- cluster.add(neighbor);
- }
- }
- }
- clusterSet.add(cluster);
- }
- return clusterSet;
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Mechanisms for identifying clusters in graphs. Where these clusters define disjoint sets of vertices,
-they may be used to define a <code>VertexPartition</code> for more convenient manipulation of the vertex/set
-relationships.
-
-Current clustering algorithms include:
-<ul>
-<li/><code>BicomponentClusterer</code>: finds all subsets of vertices for which at least
-2 vertices must be removed in order to disconnect the induced subgraphs.
-<li/><code>EdgeBetweennessClusterer</code>: identifies vertex clusters by removing the edges of the highest
-'betweenness' scores (see the importance/scoring package).
-<li/><code>VoltageClusterer</code>: Clusters vertices based on their ranks as
-calculated by <code>VoltageRanker</code>.
-<li/><code>WeakComponentVertexClusterer</code>: Clusters vertices based on their membership in weakly
-connected components of a graph.
-</ul>
-
-
-</body>
-</html>
+++ /dev/null
-/*
- * Created on May 19, 2008
- *
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.filters;
-
-import org.apache.commons.collections15.Predicate;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Transforms the input graph into one which contains only those edges
- * that pass the specified <code>Predicate</code>. The filtered graph
- * is a copy of the original graph (same type, uses the same vertex and
- * edge objects). All vertices from the original graph
- * are copied into the new graph (even if they are not incident to any
- * edges in the new graph).
- *
- * @author Joshua O'Madadhain
- */
-public class EdgePredicateFilter<V, E> implements Filter<V, E>
-{
- protected Predicate<E> edge_pred;
-
- /**
- * Creates an instance based on the specified edge <code>Predicate</code>.
- * @param edge_pred the predicate that specifies which edges to add to the filtered graph
- */
- public EdgePredicateFilter(Predicate<E> edge_pred)
- {
- this.edge_pred = edge_pred;
- }
-
- @SuppressWarnings("unchecked")
- public Graph<V,E> transform(Graph<V,E> g)
- {
- Graph<V, E> filtered;
- try
- {
- filtered = g.getClass().newInstance();
- }
- catch (InstantiationException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
- catch (IllegalAccessException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
-
- for (V v : g.getVertices())
- filtered.addVertex(v);
-
- for (E e : g.getEdges())
- {
- if (edge_pred.evaluate(e))
- filtered.addEdge(e, g.getIncidentVertices(e));
- }
-
- return filtered;
- }
-
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.filters;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-
-
-/**
- * An interface for classes that return a subset of the input <code>Graph</code>
- * as a <code>Graph</code>. The <code>Graph</code> returned may be either a
- * new graph or a view into an existing graph; the documentation for the filter
- * must specify which.
- *
- * @author danyelf
- */
-public interface Filter<V,E> extends Transformer<Graph<V,E>, Graph<V,E>>{ }
+++ /dev/null
-/**
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- * Created on Jun 7, 2008
- *
- */
-package edu.uci.ics.jung.algorithms.filters;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Utility methods relating to filtering.
- */
-public class FilterUtils
-{
- /**
- * Creates the induced subgraph from <code>graph</code> whose vertex set
- * is equal to <code>vertices</code>. The graph returned has
- * <code>vertices</code> as its vertex set, and includes all edges from
- * <code>graph</code> which are incident only to elements of
- * <code>vertices</code>.
- *
- * @param <V> the vertex type
- * @param <E> the edge type
- * @param vertices the subset of <code>graph</code>'s vertices around
- * which the subgraph is to be constructed
- * @param graph the graph whose subgraph is to be constructed
- * @return the subgraph induced by <code>vertices</code>
- * @throws IllegalArgumentException if any vertex in
- * <code>vertices</code> is not in <code>graph</code>
- */
- @SuppressWarnings("unchecked")
- public static <V,E,G extends Hypergraph<V,E>> G createInducedSubgraph(Collection<V>
- vertices, G graph)
- {
- G subgraph = null;
- try
- {
- subgraph = (G)graph.getClass().newInstance();
-
- for (V v : vertices)
- {
- if (!graph.containsVertex(v))
- throw new IllegalArgumentException("Vertex " + v +
- " is not an element of " + graph);
- subgraph.addVertex(v);
- }
-
- for (E e : graph.getEdges())
- {
- Collection<V> incident = graph.getIncidentVertices(e);
- if (vertices.containsAll(incident))
- subgraph.addEdge(e, incident, graph.getEdgeType(e));
- }
- }
- catch (InstantiationException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
- catch (IllegalAccessException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
- return subgraph;
- }
-
- /**
- * Creates the induced subgraphs of <code>graph</code> associated with each
- * element of <code>vertex_collections</code>.
- * Note that these vertex collections need not be disjoint.
- * @param <V> the vertex type
- * @param <E> the edge type
- * @param vertex_collections the collections of vertex collections to be
- * used to induce the subgraphs
- * @param graph the graph whose subgraphs are to be created
- * @return the induced subgraphs of <code>graph</code> associated with each
- * element of <code>vertex_collections</code>
- */
- public static <V,E,G extends Hypergraph<V,E>> Collection<G>
- createAllInducedSubgraphs(Collection<? extends Collection<V>>
- vertex_collections, G graph)
- {
- Collection<G> subgraphs = new ArrayList<G>();
-
- for (Collection<V> vertex_set : vertex_collections)
- subgraphs.add(createInducedSubgraph(vertex_set, graph));
-
- return subgraphs;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Dec 26, 2001
- *
- */
-package edu.uci.ics.jung.algorithms.filters;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import edu.uci.ics.jung.algorithms.filters.Filter;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * A filter used to extract the k-neighborhood around one or more root node(s).
- * The k-neighborhood is defined as the subgraph induced by the set of
- * vertices that are k or fewer hops (unweighted shortest-path distance)
- * away from the root node.
- *
- * @author Danyel Fisher
- */
-public class KNeighborhoodFilter<V,E> implements Filter<V,E> {
-
- /**
- * The type of edge to follow for defining the neighborhood.
- */
- public static enum EdgeType { IN_OUT, IN, OUT }
- private Set<V> rootNodes;
- private int radiusK;
- private EdgeType edgeType;
-
- /**
- * Constructs a new instance of the filter.
- * @param rootNodes the set of root nodes
- * @param radiusK the neighborhood radius around the root set
- * @param edgeType 0 for in/out edges, 1 for in-edges, 2 for out-edges
- */
- public KNeighborhoodFilter(Set<V> rootNodes, int radiusK, EdgeType edgeType) {
- this.rootNodes = rootNodes;
- this.radiusK = radiusK;
- this.edgeType = edgeType;
- }
-
- /**
- * Constructs a new instance of the filter.
- * @param rootNode the root node
- * @param radiusK the neighborhood radius around the root set
- * @param edgeType 0 for in/out edges, 1 for in-edges, 2 for out-edges
- */
- public KNeighborhoodFilter(V rootNode, int radiusK, EdgeType edgeType) {
- this.rootNodes = new HashSet<V>();
- this.rootNodes.add(rootNode);
- this.radiusK = radiusK;
- this.edgeType = edgeType;
- }
-
- /**
- * Constructs an unassembled graph containing the k-neighborhood around the root node(s).
- */
- @SuppressWarnings("unchecked")
- public Graph<V,E> transform(Graph<V,E> graph) {
- // generate a Set of Vertices we want
- // add all to the UG
- int currentDepth = 0;
- List<V> currentVertices = new ArrayList<V>();
- Set<V> visitedVertices = new HashSet<V>();
- Set<E> visitedEdges = new HashSet<E>();
- Set<V> acceptedVertices = new HashSet<V>();
- //Copy, mark, and add all the root nodes to the new subgraph
- for (V currentRoot : rootNodes) {
-
- visitedVertices.add(currentRoot);
- acceptedVertices.add(currentRoot);
- currentVertices.add(currentRoot);
- }
- ArrayList<V> newVertices = null;
- //Use BFS to locate the neighborhood around the root nodes within distance k
- while (currentDepth < radiusK) {
- newVertices = new ArrayList<V>();
- for (V currentVertex : currentVertices) {
-
- Collection<E> edges = null;
- switch (edgeType) {
- case IN_OUT :
- edges = graph.getIncidentEdges(currentVertex);
- break;
- case IN :
- edges = graph.getInEdges(currentVertex);
- break;
- case OUT :
- edges = graph.getOutEdges(currentVertex);
- break;
- }
- for (E currentEdge : edges) {
-
- V currentNeighbor =
- graph.getOpposite(currentVertex, currentEdge);
- if (!visitedEdges.contains(currentEdge)) {
- visitedEdges.add(currentEdge);
- if (!visitedVertices.contains(currentNeighbor)) {
- visitedVertices.add(currentNeighbor);
- acceptedVertices.add(currentNeighbor);
- newVertices.add(currentNeighbor);
- }
- }
- }
- }
- currentVertices = newVertices;
- currentDepth++;
- }
- Graph<V,E> ug = null;
- try {
- ug = graph.getClass().newInstance();
- for(E edge : graph.getEdges()) {
- Pair<V> endpoints = graph.getEndpoints(edge);
- if(acceptedVertices.containsAll(endpoints)) {
- ug.addEdge(edge, endpoints.getFirst(), endpoints.getSecond());
- }
- }
- }
- catch (InstantiationException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
- catch (IllegalAccessException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
- return ug;
- }
-}
+++ /dev/null
-/*
- * Created on May 19, 2008
- *
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.filters;
-
-import java.util.Collection;
-
-import org.apache.commons.collections15.Predicate;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Transforms the input graph into one which contains only those vertices
- * that pass the specified <code>Predicate</code>. The filtered graph
- * is a copy of the original graph (same type, uses the same vertex and
- * edge objects). Only those edges whose entire incident vertex collection
- * passes the predicate are copied into the new graph.
- *
- * @author Joshua O'Madadhain
- */
-public class VertexPredicateFilter<V,E> implements Filter<V,E>
-{
- protected Predicate<V> vertex_pred;
-
- /**
- * Creates an instance based on the specified vertex <code>Predicate</code>.
- * @param vertex_pred the predicate that specifies which vertices to add to the filtered graph
- */
- public VertexPredicateFilter(Predicate<V> vertex_pred)
- {
- this.vertex_pred = vertex_pred;
- }
-
- @SuppressWarnings("unchecked")
- public Graph<V,E> transform(Graph<V,E> g)
- {
- Graph<V, E> filtered;
- try
- {
- filtered = g.getClass().newInstance();
- }
- catch (InstantiationException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
- catch (IllegalAccessException e)
- {
- throw new RuntimeException("Unable to create copy of existing graph: ", e);
- }
-
- for (V v : g.getVertices())
- if (vertex_pred.evaluate(v))
- filtered.addVertex(v);
-
- Collection<V> filtered_vertices = filtered.getVertices();
-
- for (E e : g.getEdges())
- {
- Collection<V> incident = g.getIncidentVertices(e);
- if (filtered_vertices.containsAll(incident))
- filtered.addEdge(e, incident);
- }
-
- return filtered;
- }
-
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2008 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Filtering mechanisms that produce subgraphs of an original graph.
-Currently includes:
-<ul>
-<li/><code>Filter</code>: an interface for graph filters
-<li/><code>{Edge,Vertex}PredicateFilter</code>: graph filters that return the
-induced subgraph according to the
-specified edge or vertex <code>Predicate</code>, respectively.
-<li/><code>KNeighborhoodFilter</code>: a filter that returns the subgraph
-induced by vertices within (unweighted) distance k of a specified vertex.
-</ul>
-
-
-</body>
-</html>
-
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.flows;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Buffer;
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.buffer.UnboundedFifoBuffer;
-
-import edu.uci.ics.jung.algorithms.util.IterativeProcess;
-import edu.uci.ics.jung.graph.DirectedGraph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-
-
-/**
- * Implements the Edmonds-Karp maximum flow algorithm for solving the maximum flow problem.
- * After the algorithm is executed,
- * the input {@code Map} is populated with a {@code Number} for each edge that indicates
- * the flow along that edge.
- * <p>
- * An example of using this algorithm is as follows:
- * <pre>
- * EdmondsKarpMaxFlow ek = new EdmondsKarpMaxFlow(graph, source, sink, edge_capacities, edge_flows,
- * edge_factory);
- * ek.evaluate(); // This instructs the class to compute the max flow
- * </pre>
- *
- * @see "Introduction to Algorithms by Cormen, Leiserson, Rivest, and Stein."
- * @see "Network Flows by Ahuja, Magnanti, and Orlin."
- * @see "Theoretical improvements in algorithmic efficiency for network flow problems by Edmonds and Karp, 1972."
- * @author Scott White, adapted to jung2 by Tom Nelson
- */
-public class EdmondsKarpMaxFlow<V,E> extends IterativeProcess {
-
- private DirectedGraph<V,E> mFlowGraph;
- private DirectedGraph<V,E> mOriginalGraph;
- private V source;
- private V target;
- private int mMaxFlow;
- private Set<V> mSourcePartitionNodes;
- private Set<V> mSinkPartitionNodes;
- private Set<E> mMinCutEdges;
-
- private Map<E,Number> residualCapacityMap = new HashMap<E,Number>();
- private Map<V,V> parentMap = new HashMap<V,V>();
- private Map<V,Number> parentCapacityMap = new HashMap<V,Number>();
- private Transformer<E,Number> edgeCapacityTransformer;
- private Map<E,Number> edgeFlowMap;
- private Factory<E> edgeFactory;
-
- /**
- * Constructs a new instance of the algorithm solver for a given graph, source, and sink.
- * Source and sink vertices must be elements of the specified graph, and must be
- * distinct.
- * @param directedGraph the flow graph
- * @param source the source vertex
- * @param sink the sink vertex
- * @param edgeCapacityTransformer the transformer that gets the capacity for each edge.
- * @param edgeFlowMap the map where the solver will place the value of the flow for each edge
- * @param edgeFactory used to create new edge instances for backEdges
- */
- @SuppressWarnings("unchecked")
- public EdmondsKarpMaxFlow(DirectedGraph<V,E> directedGraph, V source, V sink,
- Transformer<E,Number> edgeCapacityTransformer, Map<E,Number> edgeFlowMap,
- Factory<E> edgeFactory) {
-
- if(directedGraph.getVertices().contains(source) == false ||
- directedGraph.getVertices().contains(sink) == false) {
- throw new IllegalArgumentException("source and sink vertices must be elements of the specified graph");
- }
- if (source.equals(sink)) {
- throw new IllegalArgumentException("source and sink vertices must be distinct");
- }
- mOriginalGraph = directedGraph;
-
- this.source = source;
- this.target = sink;
- this.edgeFlowMap = edgeFlowMap;
- this.edgeCapacityTransformer = edgeCapacityTransformer;
- this.edgeFactory = edgeFactory;
- try {
- mFlowGraph = directedGraph.getClass().newInstance();
- for(E e : mOriginalGraph.getEdges()) {
- mFlowGraph.addEdge(e, mOriginalGraph.getSource(e),
- mOriginalGraph.getDest(e), EdgeType.DIRECTED);
- }
- for(V v : mOriginalGraph.getVertices()) {
- mFlowGraph.addVertex(v);
- }
-
- } catch (InstantiationException e) {
- e.printStackTrace();
- } catch (IllegalAccessException e) {
- e.printStackTrace();
- }
- mMaxFlow = 0;
- mSinkPartitionNodes = new HashSet<V>();
- mSourcePartitionNodes = new HashSet<V>();
- mMinCutEdges = new HashSet<E>();
- }
-
- private void clearParentValues() {
- parentMap.clear();
- parentCapacityMap.clear();
- parentCapacityMap.put(source, Integer.MAX_VALUE);
- parentMap.put(source, source);
- }
-
- protected boolean hasAugmentingPath() {
-
- mSinkPartitionNodes.clear();
- mSourcePartitionNodes.clear();
- mSinkPartitionNodes.addAll(mFlowGraph.getVertices());
-
- Set<E> visitedEdgesMap = new HashSet<E>();
- Buffer<V> queue = new UnboundedFifoBuffer<V>();
- queue.add(source);
-
- while (!queue.isEmpty()) {
- V currentVertex = queue.remove();
- mSinkPartitionNodes.remove(currentVertex);
- mSourcePartitionNodes.add(currentVertex);
- Number currentCapacity = parentCapacityMap.get(currentVertex);
-
- Collection<E> neighboringEdges = mFlowGraph.getOutEdges(currentVertex);
-
- for (E neighboringEdge : neighboringEdges) {
-
- V neighboringVertex = mFlowGraph.getDest(neighboringEdge);
-
- Number residualCapacity = residualCapacityMap.get(neighboringEdge);
- if (residualCapacity.intValue() <= 0 || visitedEdgesMap.contains(neighboringEdge))
- continue;
-
- V neighborsParent = parentMap.get(neighboringVertex);
- Number neighborCapacity = parentCapacityMap.get(neighboringVertex);
- int newCapacity = Math.min(residualCapacity.intValue(),currentCapacity.intValue());
-
- if ((neighborsParent == null) || newCapacity > neighborCapacity.intValue()) {
- parentMap.put(neighboringVertex, currentVertex);
- parentCapacityMap.put(neighboringVertex, new Integer(newCapacity));
- visitedEdgesMap.add(neighboringEdge);
- if (neighboringVertex != target) {
- queue.add(neighboringVertex);
- }
- }
- }
- }
-
- boolean hasAugmentingPath = false;
- Number targetsParentCapacity = parentCapacityMap.get(target);
- if (targetsParentCapacity != null && targetsParentCapacity.intValue() > 0) {
- updateResidualCapacities();
- hasAugmentingPath = true;
- }
- clearParentValues();
- return hasAugmentingPath;
- }
-
- @Override
- public void step() {
- while (hasAugmentingPath()) {
- }
- computeMinCut();
-// return 0;
- }
-
- private void computeMinCut() {
-
- for (E e : mOriginalGraph.getEdges()) {
-
- V source = mOriginalGraph.getSource(e);
- V destination = mOriginalGraph.getDest(e);
- if (mSinkPartitionNodes.contains(source) && mSinkPartitionNodes.contains(destination)) {
- continue;
- }
- if (mSourcePartitionNodes.contains(source) && mSourcePartitionNodes.contains(destination)) {
- continue;
- }
- if (mSinkPartitionNodes.contains(source) && mSourcePartitionNodes.contains(destination)) {
- continue;
- }
- mMinCutEdges.add(e);
- }
- }
-
- /**
- * Returns the value of the maximum flow from the source to the sink.
- */
- public int getMaxFlow() {
- return mMaxFlow;
- }
-
- /**
- * Returns the nodes which share the same partition (as defined by the min-cut edges)
- * as the sink node.
- */
- public Set<V> getNodesInSinkPartition() {
- return mSinkPartitionNodes;
- }
-
- /**
- * Returns the nodes which share the same partition (as defined by the min-cut edges)
- * as the source node.
- */
- public Set<V> getNodesInSourcePartition() {
- return mSourcePartitionNodes;
- }
-
- /**
- * Returns the edges in the minimum cut.
- */
- public Set<E> getMinCutEdges() {
- return mMinCutEdges;
- }
-
- /**
- * Returns the graph for which the maximum flow is calculated.
- */
- public DirectedGraph<V,E> getFlowGraph() {
- return mFlowGraph;
- }
-
- @Override
- protected void initializeIterations() {
- parentCapacityMap.put(source, Integer.MAX_VALUE);
- parentMap.put(source, source);
-
- List<E> edgeList = new ArrayList<E>(mFlowGraph.getEdges());
-
- for (int eIdx=0;eIdx< edgeList.size();eIdx++) {
- E edge = edgeList.get(eIdx);
- Number capacity = edgeCapacityTransformer.transform(edge);
-
- if (capacity == null) {
- throw new IllegalArgumentException("Edge capacities must be provided in Transformer passed to constructor");
- }
- residualCapacityMap.put(edge, capacity);
-
- V source = mFlowGraph.getSource(edge);
- V destination = mFlowGraph.getDest(edge);
-
- if(mFlowGraph.isPredecessor(source, destination) == false) {
- E backEdge = edgeFactory.create();
- mFlowGraph.addEdge(backEdge, destination, source, EdgeType.DIRECTED);
- residualCapacityMap.put(backEdge, 0);
- }
- }
- }
-
- @Override
- protected void finalizeIterations() {
-
- for (E currentEdge : mFlowGraph.getEdges()) {
- Number capacity = edgeCapacityTransformer.transform(currentEdge);
-
- Number residualCapacity = residualCapacityMap.get(currentEdge);
- if (capacity != null) {
- Integer flowValue = new Integer(capacity.intValue()-residualCapacity.intValue());
- this.edgeFlowMap.put(currentEdge, flowValue);
- }
- }
-
- Set<E> backEdges = new HashSet<E>();
- for (E currentEdge: mFlowGraph.getEdges()) {
-
- if (edgeCapacityTransformer.transform(currentEdge) == null) {
- backEdges.add(currentEdge);
- } else {
- residualCapacityMap.remove(currentEdge);
- }
- }
- for(E e : backEdges) {
- mFlowGraph.removeEdge(e);
- }
- }
-
- private void updateResidualCapacities() {
-
- Number augmentingPathCapacity = parentCapacityMap.get(target);
- mMaxFlow += augmentingPathCapacity.intValue();
- V currentVertex = target;
- V parentVertex = null;
- while ((parentVertex = parentMap.get(currentVertex)) != currentVertex) {
- E currentEdge = mFlowGraph.findEdge(parentVertex, currentVertex);
-
- Number residualCapacity = residualCapacityMap.get(currentEdge);
-
- residualCapacity = residualCapacity.intValue() - augmentingPathCapacity.intValue();
- residualCapacityMap.put(currentEdge, residualCapacity);
-
- E backEdge = mFlowGraph.findEdge(currentVertex, parentVertex);
- residualCapacity = residualCapacityMap.get(backEdge);
- residualCapacity = residualCapacity.intValue() + augmentingPathCapacity.intValue();
- residualCapacityMap.put(backEdge, residualCapacity);
- currentVertex = parentVertex;
- }
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Methods for calculating properties relating to network flows (such as max flow/min cut).
-
-</body>
-</html>
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.generators;
-
-
-
-/**
- * An interface for algorithms that generate graphs that evolve iteratively.
- * @author Scott White
- */
-public interface EvolvingGraphGenerator<V, E> extends GraphGenerator<V,E> {
-
- /**
- * Instructs the algorithm to evolve the graph N steps.
- * @param numSteps number of steps to iterate from the current state
- */
- void evolveGraph(int numSteps);
-
- /**
- * Retrieves the total number of steps elapsed.
- * @return number of elapsed steps
- */
- int numIterations();
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.generators;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * An interface for algorithms that generate graphs.
- * @author Scott White
- */
-public interface GraphGenerator<V, E> extends Factory<Graph<V,E>>{ }
+++ /dev/null
-/*
- * Copyright (c) 2009, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-
-package edu.uci.ics.jung.algorithms.generators;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-
-/**
- * Simple generator of an m x n lattice where each vertex
- * is incident with each of its neighbors (to the left, right, up, and down).
- * May be toroidal, in which case the vertices on the edges are connected to
- * their counterparts on the opposite edges as well.
- *
- * <p>If the graph factory supplied has a default edge type of {@code EdgeType.DIRECTED},
- * then edges will be created in both directions between adjacent vertices.
- *
- * @author Joshua O'Madadhain
- */
-public class Lattice2DGenerator<V,E> implements GraphGenerator<V,E>
-{
- protected int row_count;
- protected int col_count;
- protected boolean is_toroidal;
- protected boolean is_directed;
- protected Factory<? extends Graph<V, E>> graph_factory;
- protected Factory<V> vertex_factory;
- protected Factory<E> edge_factory;
- private List<V> v_array;
-
- /**
- * Constructs a generator of square lattices of size {@code latticeSize}
- * with the specified parameters.
- *
- * @param graph_factory used to create the {@code Graph} for the lattice
- * @param vertex_factory used to create the lattice vertices
- * @param edge_factory used to create the lattice edges
- * @param latticeSize the number of rows and columns of the lattice
- * @param isToroidal if true, the created lattice wraps from top to bottom and left to right
- */
- public Lattice2DGenerator(Factory<? extends Graph<V,E>> graph_factory, Factory<V> vertex_factory,
- Factory<E> edge_factory, int latticeSize, boolean isToroidal)
- {
- this(graph_factory, vertex_factory, edge_factory, latticeSize, latticeSize, isToroidal);
- }
-
- /**
- * Creates a generator of {@code row_count} x {@code col_count} lattices
- * with the specified parameters.
- *
- * @param graph_factory used to create the {@code Graph} for the lattice
- * @param vertex_factory used to create the lattice vertices
- * @param edge_factory used to create the lattice edges
- * @param row_count the number of rows in the lattice
- * @param col_count the number of columns in the lattice
- * @param isToroidal if true, the created lattice wraps from top to bottom and left to right
- */
- public Lattice2DGenerator(Factory<? extends Graph<V,E>> graph_factory, Factory<V> vertex_factory,
- Factory<E> edge_factory, int row_count, int col_count, boolean isToroidal)
- {
- if (row_count < 2 || col_count < 2)
- {
- throw new IllegalArgumentException("Row and column counts must each be at least 2.");
- }
-
- this.row_count = row_count;
- this.col_count = col_count;
- this.is_toroidal = isToroidal;
- this.graph_factory = graph_factory;
- this.vertex_factory = vertex_factory;
- this.edge_factory = edge_factory;
- this.is_directed = (graph_factory.create().getDefaultEdgeType() == EdgeType.DIRECTED);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.generators.GraphGenerator#create()
- */
- @SuppressWarnings("unchecked")
- public Graph<V,E> create()
- {
- int vertex_count = row_count * col_count;
- Graph<V,E> graph = graph_factory.create();
- v_array = new ArrayList<V>(vertex_count);
- for (int i = 0; i < vertex_count; i++)
- {
- V v = vertex_factory.create();
- graph.addVertex(v);
- v_array.add(i, v);
- }
-
- int start = is_toroidal ? 0 : 1;
- int end_row = is_toroidal ? row_count : row_count - 1;
- int end_col = is_toroidal ? col_count : col_count - 1;
-
- // fill in edges
- // down
- for (int i = 0; i < end_row; i++)
- for (int j = 0; j < col_count; j++)
- graph.addEdge(edge_factory.create(), getVertex(i,j), getVertex(i+1, j));
- // right
- for (int i = 0; i < row_count; i++)
- for (int j = 0; j < end_col; j++)
- graph.addEdge(edge_factory.create(), getVertex(i,j), getVertex(i, j+1));
-
- // if the graph is directed, fill in the edges going the other direction...
- if (graph.getDefaultEdgeType() == EdgeType.DIRECTED)
- {
- // up
- for (int i = start; i < row_count; i++)
- for (int j = 0; j < col_count; j++)
- graph.addEdge(edge_factory.create(), getVertex(i,j), getVertex(i-1, j));
- // left
- for (int i = 0; i < row_count; i++)
- for (int j = start; j < col_count; j++)
- graph.addEdge(edge_factory.create(), getVertex(i,j), getVertex(i, j-1));
- }
-
- return graph;
- }
-
- /**
- * Returns the number of edges found in a lattice of this generator's specifications.
- * (This is useful for subclasses that may modify the generated graphs to add more edges.)
- */
- public int getGridEdgeCount()
- {
- int boundary_adjustment = (is_toroidal ? 0 : 1);
- int vertical_edge_count = col_count * (row_count - boundary_adjustment);
- int horizontal_edge_count = row_count * (col_count - boundary_adjustment);
-
- return (vertical_edge_count + horizontal_edge_count) * (is_directed ? 2 : 1);
- }
-
- protected int getIndex(int i, int j)
- {
- return ((mod(i, row_count)) * col_count) + (mod(j, col_count));
- }
-
- protected int mod(int i, int modulus)
- {
- int i_mod = i % modulus;
- return i_mod >= 0 ? i_mod : i_mod + modulus;
- }
-
- /**
- * Returns the vertex at position ({@code i mod row_count, j mod col_count}).
- */
- protected V getVertex(int i, int j)
- {
- return v_array.get(getIndex(i, j));
- }
-
- /**
- * Returns the {@code i}th vertex (counting row-wise).
- */
- protected V getVertex(int i)
- {
- return v_array.get(i);
- }
-
- /**
- * Returns the row in which vertex {@code i} is found.
- */
- protected int getRow(int i)
- {
- return i / row_count;
- }
-
- /**
- * Returns the column in which vertex {@code i} is found.
- */
- protected int getCol(int i)
- {
- return i % col_count;
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Methods for generating new (often random) graphs with various properties.
-
-</body>
-</html>
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.generators.random;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.algorithms.generators.EvolvingGraphGenerator;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.MultiGraph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-import edu.uci.ics.jung.graph.util.Pair;
-
-
-/**
- * <p>Simple evolving scale-free random graph generator. At each time
- * step, a new vertex is created and is connected to existing vertices
- * according to the principle of "preferential attachment", whereby
- * vertices with higher degree have a higher probability of being
- * selected for attachment.</p>
- *
- * <p>At a given timestep, the probability <code>p</code> of creating an edge
- * between an existing vertex <code>v</code> and the newly added vertex is
- * <pre>
- * p = (degree(v) + 1) / (|E| + |V|);
- * </pre>
- *
- * <p>where <code>|E|</code> and <code>|V|</code> are, respectively, the number
- * of edges and vertices currently in the network (counting neither the new
- * vertex nor the other edges that are being attached to it).</p>
- *
- * <p>Note that the formula specified in the original paper
- * (cited below) was
- * <pre>
- * p = degree(v) / |E|
- * </pre>
- * </p>
- *
- * <p>However, this would have meant that the probability of attachment for any existing
- * isolated vertex would be 0. This version uses Lagrangian smoothing to give
- * each existing vertex a positive attachment probability.</p>
- *
- * <p>The graph created may be either directed or undirected (controlled by a constructor
- * parameter); the default is undirected.
- * If the graph is specified to be directed, then the edges added will be directed
- * from the newly added vertex u to the existing vertex v, with probability proportional to the
- * indegree of v (number of edges directed towards v). If the graph is specified to be undirected,
- * then the (undirected) edges added will connect u to v, with probability proportional to the
- * degree of v.</p>
- *
- * <p>The <code>parallel</code> constructor parameter specifies whether parallel edges
- * may be created.</p>
- *
- * @see "A.-L. Barabasi and R. Albert, Emergence of scaling in random networks, Science 286, 1999."
- * @author Scott White
- * @author Joshua O'Madadhain
- * @author Tom Nelson - adapted to jung2
- */
-public class BarabasiAlbertGenerator<V,E> implements EvolvingGraphGenerator<V,E> {
- private Graph<V, E> mGraph = null;
- private int mNumEdgesToAttachPerStep;
- private int mElapsedTimeSteps;
- private Random mRandom;
- protected List<V> vertex_index;
- protected int init_vertices;
- protected Map<V,Integer> index_vertex;
- protected Factory<Graph<V,E>> graphFactory;
- protected Factory<V> vertexFactory;
- protected Factory<E> edgeFactory;
-
- /**
- * Constructs a new instance of the generator.
- * @param init_vertices number of unconnected 'seed' vertices that the graph should start with
- * @param numEdgesToAttach the number of edges that should be attached from the
- * new vertex to pre-existing vertices at each time step
- * @param directed specifies whether the graph and edges to be created should be directed or not
- * @param parallel specifies whether the algorithm permits parallel edges
- * @param seed random number seed
- */
- public BarabasiAlbertGenerator(Factory<Graph<V,E>> graphFactory,
- Factory<V> vertexFactory, Factory<E> edgeFactory,
- int init_vertices, int numEdgesToAttach,
- int seed, Set<V> seedVertices)
- {
- assert init_vertices > 0 : "Number of initial unconnected 'seed' vertices " +
- "must be positive";
- assert numEdgesToAttach > 0 : "Number of edges to attach " +
- "at each time step must be positive";
-
- mNumEdgesToAttachPerStep = numEdgesToAttach;
- mRandom = new Random(seed);
- this.graphFactory = graphFactory;
- this.vertexFactory = vertexFactory;
- this.edgeFactory = edgeFactory;
- this.init_vertices = init_vertices;
- initialize(seedVertices);
- }
-
-
- /**
- * Constructs a new instance of the generator, whose output will be an undirected graph,
- * and which will use the current time as a seed for the random number generation.
- * @param init_vertices number of vertices that the graph should start with
- * @param numEdgesToAttach the number of edges that should be attached from the
- * new vertex to pre-existing vertices at each time step
- */
- public BarabasiAlbertGenerator(Factory<Graph<V,E>> graphFactory,
- Factory<V> vertexFactory, Factory<E> edgeFactory,
- int init_vertices, int numEdgesToAttach, Set<V> seedVertices) {
- this(graphFactory, vertexFactory, edgeFactory, init_vertices, numEdgesToAttach, (int) System.currentTimeMillis(), seedVertices);
- }
-
- private void initialize(Set<V> seedVertices) {
-
- mGraph = graphFactory.create();
-
- vertex_index = new ArrayList<V>(2*init_vertices);
- index_vertex = new HashMap<V, Integer>(2*init_vertices);
- for (int i = 0; i < init_vertices; i++) {
- V v = vertexFactory.create();
- mGraph.addVertex(v);
- vertex_index.add(v);
- index_vertex.put(v, i);
- seedVertices.add(v);
- }
-
- mElapsedTimeSteps = 0;
- }
-
- private void createRandomEdge(Collection<V> preexistingNodes,
- V newVertex, Set<Pair<V>> added_pairs) {
- V attach_point;
- boolean created_edge = false;
- Pair<V> endpoints;
- do {
- attach_point = vertex_index.get(mRandom.nextInt(vertex_index.size()));
-
- endpoints = new Pair<V>(newVertex, attach_point);
-
- // if parallel edges are not allowed, skip attach_point if <newVertex, attach_point>
- // already exists; note that because of the way edges are added, we only need to check
- // the list of candidate edges for duplicates.
- if (!(mGraph instanceof MultiGraph))
- {
- if (added_pairs.contains(endpoints))
- continue;
- if (mGraph.getDefaultEdgeType() == EdgeType.UNDIRECTED &&
- added_pairs.contains(new Pair<V>(attach_point, newVertex)))
- continue;
- }
-
- double degree = mGraph.inDegree(attach_point);
-
- // subtract 1 from numVertices because we don't want to count newVertex
- // (which has already been added to the graph, but not to vertex_index)
- double attach_prob = (degree + 1) / (mGraph.getEdgeCount() + mGraph.getVertexCount() - 1);
- if (attach_prob >= mRandom.nextDouble())
- created_edge = true;
- }
- while (!created_edge);
-
- added_pairs.add(endpoints);
-
- if (mGraph.getDefaultEdgeType() == EdgeType.UNDIRECTED) {
- added_pairs.add(new Pair<V>(attach_point, newVertex));
- }
- }
-
- public void evolveGraph(int numTimeSteps) {
-
- for (int i = 0; i < numTimeSteps; i++) {
- evolveGraph();
- mElapsedTimeSteps++;
- }
- }
-
- private void evolveGraph() {
- Collection<V> preexistingNodes = mGraph.getVertices();
- V newVertex = vertexFactory.create();
-
- mGraph.addVertex(newVertex);
-
- // generate and store the new edges; don't add them to the graph
- // yet because we don't want to bias the degree calculations
- // (all new edges in a timestep should be added in parallel)
- Set<Pair<V>> added_pairs = new HashSet<Pair<V>>(mNumEdgesToAttachPerStep*3);
-
- for (int i = 0; i < mNumEdgesToAttachPerStep; i++)
- createRandomEdge(preexistingNodes, newVertex, added_pairs);
-
- for (Pair<V> pair : added_pairs)
- {
- V v1 = pair.getFirst();
- V v2 = pair.getSecond();
- if (mGraph.getDefaultEdgeType() != EdgeType.UNDIRECTED ||
- !mGraph.isNeighbor(v1, v2))
- mGraph.addEdge(edgeFactory.create(), pair);
- }
- // now that we're done attaching edges to this new vertex,
- // add it to the index
- vertex_index.add(newVertex);
- index_vertex.put(newVertex, new Integer(vertex_index.size() - 1));
- }
-
- public int numIterations() {
- return mElapsedTimeSteps;
- }
-
- public Graph<V, E> create() {
- return mGraph;
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.generators.random;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.algorithms.generators.GraphGenerator;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Graph generator that generates undirected graphs with power-law degree distributions.
- * @author Scott White
- * @see "A Steady State Model for Graph Power Law by David Eppstein and Joseph Wang"
- */
-public class EppsteinPowerLawGenerator<V,E> implements GraphGenerator<V,E> {
- private int mNumVertices;
- private int mNumEdges;
- private int mNumIterations;
- private double mMaxDegree;
- private Random mRandom;
- private Factory<Graph<V,E>> graphFactory;
- private Factory<V> vertexFactory;
- private Factory<E> edgeFactory;
-
- /**
- * Creates an instance with the specified factories and specifications.
- * @param graphFactory the factory to use to generate the graph
- * @param vertexFactory the factory to use to create vertices
- * @param edgeFactory the factory to use to create edges
- * @param numVertices the number of vertices for the generated graph
- * @param numEdges the number of edges the generated graph will have, should be Theta(numVertices)
- * @param r the number of iterations to use; the larger the value the better the graph's degree
- * distribution will approximate a power-law
- */
- public EppsteinPowerLawGenerator(Factory<Graph<V,E>> graphFactory,
- Factory<V> vertexFactory, Factory<E> edgeFactory,
- int numVertices, int numEdges, int r) {
- this.graphFactory = graphFactory;
- this.vertexFactory = vertexFactory;
- this.edgeFactory = edgeFactory;
- mNumVertices = numVertices;
- mNumEdges = numEdges;
- mNumIterations = r;
- mRandom = new Random();
- }
-
- protected Graph<V,E> initializeGraph() {
- Graph<V,E> graph = null;
- graph = graphFactory.create();
- for(int i=0; i<mNumVertices; i++) {
- graph.addVertex(vertexFactory.create());
- }
- List<V> vertices = new ArrayList<V>(graph.getVertices());
- while (graph.getEdgeCount() < mNumEdges) {
- V u = vertices.get((int) (mRandom.nextDouble() * mNumVertices));
- V v = vertices.get((int) (mRandom.nextDouble() * mNumVertices));
- if (!graph.isSuccessor(v,u)) {
- graph.addEdge(edgeFactory.create(), u, v);
- }
- }
-
- double maxDegree = 0;
- for (V v : graph.getVertices()) {
- maxDegree = Math.max(graph.degree(v),maxDegree);
- }
- mMaxDegree = maxDegree; //(maxDegree+1)*(maxDegree)/2;
-
- return graph;
- }
-
- /**
- * Generates a graph whose degree distribution approximates a power-law.
- * @return the generated graph
- */
- public Graph<V,E> create() {
- Graph<V,E> graph = initializeGraph();
-
- List<V> vertices = new ArrayList<V>(graph.getVertices());
- for (int rIdx = 0; rIdx < mNumIterations; rIdx++) {
-
- V v = null;
- int degree = 0;
- do {
- v = vertices.get((int) (mRandom.nextDouble() * mNumVertices));
- degree = graph.degree(v);
-
- } while (degree == 0);
-
- List<E> edges = new ArrayList<E>(graph.getIncidentEdges(v));
- E randomExistingEdge = edges.get((int) (mRandom.nextDouble()*degree));
-
- // FIXME: look at email thread on a more efficient RNG for arbitrary distributions
-
- V x = vertices.get((int) (mRandom.nextDouble() * mNumVertices));
- V y = null;
- do {
- y = vertices.get((int) (mRandom.nextDouble() * mNumVertices));
-
- } while (mRandom.nextDouble() > ((graph.degree(y)+1)/mMaxDegree));
-
- if (!graph.isSuccessor(y,x) && x != y) {
- graph.removeEdge(randomExistingEdge);
- graph.addEdge(edgeFactory.create(), x, y);
- }
- }
-
- return graph;
- }
-
- /**
- * Sets the seed for the random number generator.
- * @param seed input to the random number generator.
- */
- public void setSeed(long seed) {
- mRandom.setSeed(seed);
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.generators.random;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.algorithms.generators.GraphGenerator;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.UndirectedGraph;
-
-/**
- * Generates a random graph using the Erdos-Renyi binomial model
- * (each pair of vertices is connected with probability p).
- *
- * @author William Giordano, Scott White, Joshua O'Madadhain
- */
-public class ErdosRenyiGenerator<V,E> implements GraphGenerator<V,E> {
- private int mNumVertices;
- private double mEdgeConnectionProbability;
- private Random mRandom;
- Factory<UndirectedGraph<V,E>> graphFactory;
- Factory<V> vertexFactory;
- Factory<E> edgeFactory;
-
- /**
- *
- * @param numVertices number of vertices graph should have
- * @param p Connection's probability between 2 vertices
- */
- public ErdosRenyiGenerator(Factory<UndirectedGraph<V,E>> graphFactory,
- Factory<V> vertexFactory, Factory<E> edgeFactory,
- int numVertices,double p)
- {
- if (numVertices <= 0) {
- throw new IllegalArgumentException("A positive # of vertices must be specified.");
- }
- mNumVertices = numVertices;
- if (p < 0 || p > 1) {
- throw new IllegalArgumentException("p must be between 0 and 1.");
- }
- this.graphFactory = graphFactory;
- this.vertexFactory = vertexFactory;
- this.edgeFactory = edgeFactory;
- mEdgeConnectionProbability = p;
- mRandom = new Random();
- }
-
- /**
- * Returns a graph in which each pair of vertices is connected by
- * an undirected edge with the probability specified by the constructor.
- */
- public Graph<V,E> create() {
- UndirectedGraph<V,E> g = graphFactory.create();
- for(int i=0; i<mNumVertices; i++) {
- g.addVertex(vertexFactory.create());
- }
- List<V> list = new ArrayList<V>(g.getVertices());
-
- for (int i = 0; i < mNumVertices-1; i++) {
- V v_i = list.get(i);
- for (int j = i+1; j < mNumVertices; j++) {
- V v_j = list.get(j);
- if (mRandom.nextDouble() < mEdgeConnectionProbability) {
- g.addEdge(edgeFactory.create(), v_i, v_j);
- }
- }
- }
- return g;
- }
-
- /**
- * Sets the seed of the internal random number generator to {@code seed}.
- * Enables consistent behavior.
- */
- public void setSeed(long seed) {
- mRandom.setSeed(seed);
- }
-}
-
-
-
-
-
-
-
-
-
-
-
+++ /dev/null
-
-package edu.uci.ics.jung.algorithms.generators.random;
-
-/*
-* Copyright (c) 2009, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Random;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.algorithms.generators.Lattice2DGenerator;
-import edu.uci.ics.jung.algorithms.util.WeightedChoice;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Graph generator that produces a random graph with small world properties.
- * The underlying model is an mxn (optionally toroidal) lattice. Each node u
- * has four local connections, one to each of its neighbors, and
- * in addition 1+ long range connections to some node v where v is chosen randomly according to
- * probability proportional to d^-alpha where d is the lattice distance between u and v and alpha
- * is the clustering exponent.
- *
- * @see "Navigation in a small world J. Kleinberg, Nature 406(2000), 845."
- * @author Joshua O'Madadhain
- */
-public class KleinbergSmallWorldGenerator<V, E> extends Lattice2DGenerator<V, E> {
- private double clustering_exponent;
- private Random random;
- private int num_connections = 1;
-
- /**
- * Creates
- * @param graph_factory
- * @param vertex_factory
- * @param edge_factory
- * @param latticeSize
- * @param clusteringExponent
- */
- public KleinbergSmallWorldGenerator(Factory<? extends Graph<V,E>> graph_factory, Factory<V> vertex_factory,
- Factory<E> edge_factory, int latticeSize, double clusteringExponent)
- {
- this(graph_factory, vertex_factory, edge_factory, latticeSize, latticeSize, clusteringExponent);
- }
-
- /**
- * @param graph_factory
- * @param vertex_factory
- * @param edge_factory
- * @param row_count
- * @param col_count
- * @param clusteringExponent
- */
- public KleinbergSmallWorldGenerator(Factory<? extends Graph<V,E>> graph_factory, Factory<V> vertex_factory,
- Factory<E> edge_factory, int row_count, int col_count, double clusteringExponent)
- {
- super(graph_factory, vertex_factory, edge_factory, row_count, col_count, true);
- clustering_exponent = clusteringExponent;
- initialize();
- }
-
- /**
- * @param graph_factory
- * @param vertex_factory
- * @param edge_factory
- * @param row_count
- * @param col_count
- * @param clusteringExponent
- * @param isToroidal
- */
- public KleinbergSmallWorldGenerator(Factory<? extends Graph<V,E>> graph_factory, Factory<V> vertex_factory,
- Factory<E> edge_factory, int row_count, int col_count, double clusteringExponent,
- boolean isToroidal)
- {
- super(graph_factory, vertex_factory, edge_factory, row_count, col_count, isToroidal);
- clustering_exponent = clusteringExponent;
- initialize();
- }
-
- private void initialize()
- {
- this.random = new Random();
- }
-
- /**
- * Sets the {@code Random} instance used by this instance. Useful for
- * unit testing.
- */
- public void setRandom(Random random)
- {
- this.random = random;
- }
-
- /**
- * Sets the seed of the internal random number generator. May be used to provide repeatable
- * experiments.
- */
- public void setRandomSeed(long seed)
- {
- random.setSeed(seed);
- }
-
- /**
- * Sets the number of new 'small-world' connections (outgoing edges) to be added to each vertex.
- */
- public void setConnectionCount(int num_connections)
- {
- if (num_connections <= 0)
- {
- throw new IllegalArgumentException("Number of new connections per vertex must be >= 1");
- }
- this.num_connections = num_connections;
- }
-
- /**
- * Returns the number of new 'small-world' connections to be made to each vertex.
- */
- public int getConnectionCount()
- {
- return this.num_connections;
- }
-
- /**
- * Generates a random small world network according to the parameters given
- * @return a random small world graph
- */
- @Override
- public Graph<V,E> create()
- {
- Graph<V, E> graph = super.create();
-
- // TODO: For toroidal graphs, we can make this more clever by pre-creating the WeightedChoice object
- // and using the output as an offset to the current vertex location.
- WeightedChoice<V> weighted_choice;
-
- // Add long range connections
- for (int i = 0; i < graph.getVertexCount(); i++)
- {
- V source = getVertex(i);
- int row = getRow(i);
- int col = getCol(i);
- int row_offset = row < row_count/2 ? -row_count : row_count;
- int col_offset = col < col_count/2 ? -col_count : col_count;
-
- Map<V, Float> vertex_weights = new HashMap<V, Float>();
- for (int j = 0; j < row_count; j++)
- {
- for (int k = 0; k < col_count; k++)
- {
- if (j == row && k == col)
- continue;
- int v_dist = Math.abs(j - row);
- int h_dist = Math.abs(k - col);
- if (is_toroidal)
- {
- v_dist = Math.min(v_dist, Math.abs(j - row+row_offset));
- h_dist = Math.min(h_dist, Math.abs(k - col+col_offset));
- }
- int distance = v_dist + h_dist;
- if (distance < 2)
- continue;
- else
- vertex_weights.put(getVertex(j,k), (float)Math.pow(distance, -clustering_exponent));
- }
- }
-
- for (int j = 0; j < this.num_connections; j++) {
- weighted_choice = new WeightedChoice<V>(vertex_weights, random);
- V target = weighted_choice.nextItem();
- graph.addEdge(edge_factory.create(), source, target);
- }
- }
-
- return graph;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Jul 2, 2003
- *
- */
-package edu.uci.ics.jung.algorithms.generators.random;
-
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-
-/**
- *
- * Generates a mixed-mode random graph based on the output of <code>BarabasiAlbertGenerator</code>.
- * Primarily intended for providing a heterogeneous sample graph for visualization testing, etc.
- *
- */
-public class MixedRandomGraphGenerator {
-
- /**
- * Equivalent to <code>generateMixedRandomGraph(edge_weight, num_vertices, true)</code>.
- */
- public static <V,E> Graph<V, E> generateMixedRandomGraph(
- Factory<Graph<V,E>> graphFactory,
- Factory<V> vertexFactory,
- Factory<E> edgeFactory,
- Map<E,Number> edge_weight,
- int num_vertices, Set<V> seedVertices)
- {
- return generateMixedRandomGraph(graphFactory, vertexFactory, edgeFactory,
- edge_weight, num_vertices, true, seedVertices);
- }
-
- /**
- * Returns a random mixed-mode graph. Starts with a randomly generated
- * Barabasi-Albert (preferential attachment) generator
- * (4 initial vertices, 3 edges added at each step, and num_vertices - 4 evolution steps).
- * Then takes the resultant graph, replaces random undirected edges with directed
- * edges, and assigns random weights to each edge.
- */
- public static <V,E> Graph<V,E> generateMixedRandomGraph(
- Factory<Graph<V,E>> graphFactory,
- Factory<V> vertexFactory,
- Factory<E> edgeFactory,
- Map<E,Number> edge_weights,
- int num_vertices, boolean parallel, Set<V> seedVertices)
- {
- int seed = (int)(Math.random() * 10000);
- BarabasiAlbertGenerator<V,E> bag =
- new BarabasiAlbertGenerator<V,E>(graphFactory, vertexFactory, edgeFactory,
- 4, 3, //false, parallel,
- seed, seedVertices);
- bag.evolveGraph(num_vertices - 4);
- Graph<V, E> ug = bag.create();
-
- // create a SparseMultigraph version of g
- Graph<V, E> g = graphFactory.create();
- //new SparseMultigraph<V, E>();
- for(V v : ug.getVertices()) {
- g.addVertex(v);
- }
-
- // randomly replace some of the edges by directed edges to
- // get a mixed-mode graph, add random weights
-
- for(E e : ug.getEdges()) {
- V v1 = ug.getEndpoints(e).getFirst();
- V v2 = ug.getEndpoints(e).getSecond();
-
- E me = edgeFactory.create();
- g.addEdge(me, v1, v2, Math.random() < .5 ? EdgeType.DIRECTED : EdgeType.UNDIRECTED);
- edge_weights.put(me, Math.random());
- }
-
- return g;
- }
-
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Methods for generating random graphs with various properties. These include:
-<ul>
-<li/><code>BarabasiAlbertGenerator</code>: scale-free graphs using the preferential attachment heuristic.
-<li/><code>EppsteinPowerLawGenerator</code>: graphs whose degree distribution approximates a power law
-<li/><code>ErdosRenyiGenerator</code>: graphs for which edges are created with a specified probability
-<li/><code>MixedRandomGraphGenerator</code>: takes the output of <code>BarabasiAlbertGenerator</code> and
-perturbs it to generate a mixed-mode analog with both directed and undirected edges.
-<li/>
-
-
-</body>
-</html>
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.importance;
-
-import java.text.DecimalFormat;
-import java.text.Format;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.algorithms.util.IterativeProcess;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Abstract class for algorithms that rank nodes or edges by some "importance" metric. Provides a common set of
- * services such as:
- * <ul>
- * <li> storing rank scores</li>
- * <li> getters and setters for rank scores</li>
- * <li> computing default edge weights</li>
- * <li> normalizing default or user-provided edge transition weights </li>
- * <li> normalizing rank scores</li>
- * <li> automatic cleanup of decorations</li>
- * <li> creation of Ranking list</li>
- * <li>print rankings in sorted order by rank</li>
- * </ul>
- * <p>
- * By default, all rank scores are removed from the vertices (or edges) being ranked.
- * @author Scott White
- */
-public abstract class AbstractRanker<V,E> extends IterativeProcess {
- private Graph<V,E> mGraph;
- private List<Ranking<?>> mRankings;
- private boolean mRemoveRankScoresOnFinalize;
- private boolean mRankNodes;
- private boolean mRankEdges;
- private boolean mNormalizeRankings;
- protected Map<Object,Map<V, Number>> vertexRankScores =
- LazyMap.decorate(
- new HashMap<Object,Map<V,Number>>(),
- new Factory<Map<V,Number>>() {
- public Map<V,Number> create() {
- return new HashMap<V,Number>();
- }});
- protected Map<Object,Map<E, Number>> edgeRankScores =
- LazyMap.decorate(
- new HashMap<Object,Map<E,Number>>(),
- new Factory<Map<E,Number>>() {
- public Map<E,Number> create() {
- return new HashMap<E,Number>();
- }});
- private Map<E,Number> edgeWeights = new HashMap<E,Number>();
-
- protected void initialize(Graph<V,E> graph, boolean isNodeRanker,
- boolean isEdgeRanker) {
- if (!isNodeRanker && !isEdgeRanker)
- throw new IllegalArgumentException("Must rank edges, vertices, or both");
- mGraph = graph;
- mRemoveRankScoresOnFinalize = true;
- mNormalizeRankings = true;
- mRankNodes = isNodeRanker;
- mRankEdges = isEdgeRanker;
- }
-
- /**
- * @return all rankScores
- */
- public Map<Object,Map<V, Number>> getVertexRankScores() {
- return vertexRankScores;
- }
-
- public Map<Object,Map<E, Number>> getEdgeRankScores() {
- return edgeRankScores;
- }
-
- /**
- * @return the rankScores
- */
- public Map<V, Number> getVertexRankScores(Object key) {
- return vertexRankScores.get(key);
- }
-
- public Map<E, Number> getEdgeRankScores(Object key) {
- return edgeRankScores.get(key);
- }
-
- protected Collection<V> getVertices() {
- return mGraph.getVertices();
- }
-
- protected int getVertexCount() {
- return mGraph.getVertexCount();
- }
-
- protected Graph<V,E> getGraph() {
- return mGraph;
- }
-
- @Override
- public void reset() {
- }
-
- /**
- * Returns <code>true</code> if this ranker ranks nodes, and
- * <code>false</code> otherwise.
- */
- public boolean isRankingNodes() {
- return mRankNodes;
- }
-
- /**
- * Returns <code>true</code> if this ranker ranks edges, and
- * <code>false</code> otherwise.
- */
- public boolean isRankingEdges() {
- return mRankEdges;
- }
-
- /**
- * Instructs the ranker whether or not it should remove the rank scores from the nodes (or edges) once the ranks
- * have been computed.
- * @param removeRankScoresOnFinalize <code>true</code> if the rank scores are to be removed, <code>false</code> otherwise
- */
- public void setRemoveRankScoresOnFinalize(boolean removeRankScoresOnFinalize) {
- this.mRemoveRankScoresOnFinalize = removeRankScoresOnFinalize;
- }
-
- protected void onFinalize(Object e) {}
-
- /**
- * The user datum key used to store the rank score.
- * @return the key
- */
- abstract public Object getRankScoreKey();
-
-
- @Override
- protected void finalizeIterations() {
- List<Ranking<?>> sortedRankings = new ArrayList<Ranking<?>>();
-
- int id = 1;
- if (mRankNodes) {
- for (V currentVertex : getVertices()) {
- Ranking<V> ranking = new Ranking<V>(id,getVertexRankScore(currentVertex),currentVertex);
- sortedRankings.add(ranking);
- if (mRemoveRankScoresOnFinalize) {
- this.vertexRankScores.get(getRankScoreKey()).remove(currentVertex);
- }
- id++;
- onFinalize(currentVertex);
- }
- }
- if (mRankEdges) {
- for (E currentEdge : mGraph.getEdges()) {
-
- Ranking<E> ranking = new Ranking<E>(id,getEdgeRankScore(currentEdge),currentEdge);
- sortedRankings.add(ranking);
- if (mRemoveRankScoresOnFinalize) {
- this.edgeRankScores.get(getRankScoreKey()).remove(currentEdge);
- }
- id++;
- onFinalize(currentEdge);
- }
- }
-
- mRankings = sortedRankings;
- Collections.sort(mRankings);
- }
-
- /**
- * Retrieves the list of ranking instances in descending sorted order by rank score
- * If the algorithm is ranking edges, the instances will be of type <code>EdgeRanking</code>, otherwise
- * if the algorithm is ranking nodes the instances will be of type <code>NodeRanking</code>
- * @return the list of rankings
- */
- public List<Ranking<?>> getRankings() {
- return mRankings;
- }
-
- /**
- * Return a list of the top k rank scores.
- * @param topKRankings the value of k to use
- * @return list of rank scores
- */
- public List<Double> getRankScores(int topKRankings) {
- List<Double> scores = new ArrayList<Double>();
- int count=1;
- for (Ranking<?> currentRanking : getRankings()) {
- if (count > topKRankings) {
- return scores;
- }
- scores.add(currentRanking.rankScore);
- count++;
- }
-
- return scores;
- }
-
- /**
- * Given an edge or node, returns the corresponding rank score. This is a default
- * implementation of getRankScore which assumes the decorations are of type MutableDouble.
- * This method only returns legal values if <code>setRemoveRankScoresOnFinalize(false)</code> was called
- * prior to <code>evaluate()</code>.
- * @return the rank score value
- */
- public double getVertexRankScore(V v) {
- Number rankScore = vertexRankScores.get(getRankScoreKey()).get(v);
- if (rankScore != null) {
- return rankScore.doubleValue();
- } else {
- throw new RuntimeException("setRemoveRankScoresOnFinalize(false) must be called before evaluate().");
- }
- }
-
- public double getVertexRankScore(V v, Object key) {
- return vertexRankScores.get(key).get(v).doubleValue();
- }
-
- public double getEdgeRankScore(E e) {
- Number rankScore = edgeRankScores.get(getRankScoreKey()).get(e);
- if (rankScore != null) {
- return rankScore.doubleValue();
- } else {
- throw new RuntimeException("setRemoveRankScoresOnFinalize(false) must be called before evaluate().");
- }
- }
-
- public double getEdgeRankScore(E e, Object key) {
- return edgeRankScores.get(key).get(e).doubleValue();
- }
-
- protected void setVertexRankScore(V v, double rankValue, Object key) {
- vertexRankScores.get(key).put(v, rankValue);
- }
-
- protected void setEdgeRankScore(E e, double rankValue, Object key) {
- edgeRankScores.get(key).put(e, rankValue);
- }
-
- protected void setVertexRankScore(V v, double rankValue) {
- setVertexRankScore(v,rankValue, getRankScoreKey());
- }
-
- protected void setEdgeRankScore(E e, double rankValue) {
- setEdgeRankScore(e, rankValue, getRankScoreKey());
- }
-
- protected void removeVertexRankScore(V v, Object key) {
- vertexRankScores.get(key).remove(v);
- }
-
- protected void removeEdgeRankScore(E e, Object key) {
- edgeRankScores.get(key).remove(e);
- }
-
- protected void removeVertexRankScore(V v) {
- vertexRankScores.get(getRankScoreKey()).remove(v);
- }
-
- protected void removeEdgeRankScore(E e) {
- edgeRankScores.get(getRankScoreKey()).remove(e);
- }
-
- protected double getEdgeWeight(E e) {
- return edgeWeights.get(e).doubleValue();
- }
-
- protected void setEdgeWeight(E e, double weight) {
- edgeWeights.put(e, weight);
- }
-
- public void setEdgeWeights(Map<E,Number> edgeWeights) {
- this.edgeWeights = edgeWeights;
- }
-
- /**
- * @return the edgeWeights
- */
- public Map<E, Number> getEdgeWeights() {
- return edgeWeights;
- }
-
- protected void assignDefaultEdgeTransitionWeights() {
-
- for (V currentVertex : getVertices()) {
-
- Collection<E> outgoingEdges = mGraph.getOutEdges(currentVertex);
-
- double numOutEdges = outgoingEdges.size();
- for (E currentEdge : outgoingEdges) {
- setEdgeWeight(currentEdge,1.0/numOutEdges);
- }
- }
- }
-
- protected void normalizeEdgeTransitionWeights() {
-
- for (V currentVertex : getVertices()) {
-
- Collection<E> outgoingEdges = mGraph.getOutEdges(currentVertex);
-
- double totalEdgeWeight = 0;
- for (E currentEdge : outgoingEdges) {
- totalEdgeWeight += getEdgeWeight(currentEdge);
- }
-
- for (E currentEdge : outgoingEdges) {
- setEdgeWeight(currentEdge,getEdgeWeight(currentEdge)/totalEdgeWeight);
- }
- }
- }
-
- protected void normalizeRankings() {
- if (!mNormalizeRankings) {
- return;
- }
- double totalWeight = 0;
-
- for (V currentVertex : getVertices()) {
- totalWeight += getVertexRankScore(currentVertex);
- }
-
- for (V currentVertex : getVertices()) {
- setVertexRankScore(currentVertex,getVertexRankScore(currentVertex)/totalWeight);
- }
- }
-
- /**
- * Print the rankings to standard out in descending order of rank score
- * @param verbose if <code>true</code>, include information about the actual rank order as well as
- * the original position of the vertex before it was ranked
- * @param printScore if <code>true</code>, include the actual value of the rank score
- */
- public void printRankings(boolean verbose,boolean printScore) {
- double total = 0;
- Format formatter = new DecimalFormat("#0.#######");
- int rank = 1;
-
- for (Ranking<?> currentRanking : getRankings()) {
- double rankScore = currentRanking.rankScore;
- if (verbose) {
- System.out.print("Rank " + rank + ": ");
- if (printScore) {
- System.out.print(formatter.format(rankScore));
- }
- System.out.print("\tVertex Id: " + currentRanking.originalPos);
- System.out.print(" (" + currentRanking.getRanked() + ")");
- System.out.println();
- } else {
- System.out.print(rank + "\t");
- if (printScore) {
- System.out.print(formatter.format(rankScore));
- }
- System.out.println("\t" + currentRanking.originalPos);
-
- }
- total += rankScore;
- rank++;
- }
-
- if (verbose) {
- System.out.println("Total: " + formatter.format(total));
- }
- }
-
- /**
- * Allows the user to specify whether or not s/he wants the rankings to be normalized.
- * In some cases, this will have no effect since the algorithm doesn't allow normalization
- * as an option
- * @param normalizeRankings
- */
- public void setNormalizeRankings(boolean normalizeRankings) {
- mNormalizeRankings = normalizeRankings;
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.importance;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Stack;
-
-import org.apache.commons.collections15.Buffer;
-import org.apache.commons.collections15.buffer.UnboundedFifoBuffer;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.UndirectedGraph;
-
-/**
- * Computes betweenness centrality for each vertex and edge in the graph. The result is that each vertex
- * and edge has a UserData element of type MutableDouble whose key is 'centrality.BetweennessCentrality'.
- * Note: Many social network researchers like to normalize the betweenness values by dividing the values by
- * (n-1)(n-2)/2. The values given here are unnormalized.<p>
- *
- * A simple example of usage is:
- * <pre>
- * BetweennessCentrality ranker = new BetweennessCentrality(someGraph);
- * ranker.evaluate();
- * ranker.printRankings();
- * </pre>
- *
- * Running time is: O(n^2 + nm).
- * @see "Ulrik Brandes: A Faster Algorithm for Betweenness Centrality. Journal of Mathematical Sociology 25(2):163-177, 2001."
- * @author Scott White
- * @author Tom Nelson converted to jung2
- */
-
-public class BetweennessCentrality<V,E> extends AbstractRanker<V,E> {
-
- public static final String CENTRALITY = "centrality.BetweennessCentrality";
-
- /**
- * Constructor which initializes the algorithm
- * @param g the graph whose nodes are to be analyzed
- */
- public BetweennessCentrality(Graph<V,E> g) {
- initialize(g, true, true);
- }
-
- public BetweennessCentrality(Graph<V,E> g, boolean rankNodes) {
- initialize(g, rankNodes, true);
- }
-
- public BetweennessCentrality(Graph<V,E> g, boolean rankNodes, boolean rankEdges)
- {
- initialize(g, rankNodes, rankEdges);
- }
-
- protected void computeBetweenness(Graph<V,E> graph) {
-
- Map<V,BetweennessData> decorator = new HashMap<V,BetweennessData>();
- Map<V,Number> bcVertexDecorator =
- vertexRankScores.get(getRankScoreKey());
- bcVertexDecorator.clear();
- Map<E,Number> bcEdgeDecorator =
- edgeRankScores.get(getRankScoreKey());
- bcEdgeDecorator.clear();
-
- Collection<V> vertices = graph.getVertices();
-
- for (V s : vertices) {
-
- initializeData(graph,decorator);
-
- decorator.get(s).numSPs = 1;
- decorator.get(s).distance = 0;
-
- Stack<V> stack = new Stack<V>();
- Buffer<V> queue = new UnboundedFifoBuffer<V>();
- queue.add(s);
-
- while (!queue.isEmpty()) {
- V v = queue.remove();
- stack.push(v);
-
- for(V w : getGraph().getSuccessors(v)) {
-
- if (decorator.get(w).distance < 0) {
- queue.add(w);
- decorator.get(w).distance = decorator.get(v).distance + 1;
- }
-
- if (decorator.get(w).distance == decorator.get(v).distance + 1) {
- decorator.get(w).numSPs += decorator.get(v).numSPs;
- decorator.get(w).predecessors.add(v);
- }
- }
- }
-
- while (!stack.isEmpty()) {
- V w = stack.pop();
-
- for (V v : decorator.get(w).predecessors) {
-
- double partialDependency = (decorator.get(v).numSPs / decorator.get(w).numSPs);
- partialDependency *= (1.0 + decorator.get(w).dependency);
- decorator.get(v).dependency += partialDependency;
- E currentEdge = getGraph().findEdge(v, w);
- double edgeValue = bcEdgeDecorator.get(currentEdge).doubleValue();
- edgeValue += partialDependency;
- bcEdgeDecorator.put(currentEdge, edgeValue);
- }
- if (w != s) {
- double bcValue = bcVertexDecorator.get(w).doubleValue();
- bcValue += decorator.get(w).dependency;
- bcVertexDecorator.put(w, bcValue);
- }
- }
- }
-
- if(graph instanceof UndirectedGraph) {
- for (V v : vertices) {
- double bcValue = bcVertexDecorator.get(v).doubleValue();
- bcValue /= 2.0;
- bcVertexDecorator.put(v, bcValue);
- }
- for (E e : graph.getEdges()) {
- double bcValue = bcEdgeDecorator.get(e).doubleValue();
- bcValue /= 2.0;
- bcEdgeDecorator.put(e, bcValue);
- }
- }
-
- for (V vertex : vertices) {
- decorator.remove(vertex);
- }
- }
-
- private void initializeData(Graph<V,E> g, Map<V,BetweennessData> decorator) {
- for (V vertex : g.getVertices()) {
-
- Map<V,Number> bcVertexDecorator = vertexRankScores.get(getRankScoreKey());
- if(bcVertexDecorator.containsKey(vertex) == false) {
- bcVertexDecorator.put(vertex, 0.0);
- }
- decorator.put(vertex, new BetweennessData());
- }
- for (E e : g.getEdges()) {
-
- Map<E,Number> bcEdgeDecorator = edgeRankScores.get(getRankScoreKey());
- if(bcEdgeDecorator.containsKey(e) == false) {
- bcEdgeDecorator.put(e, 0.0);
- }
- }
- }
-
- /**
- * the user datum key used to store the rank scores
- * @return the key
- */
- @Override
- public String getRankScoreKey() {
- return CENTRALITY;
- }
-
- @Override
- public void step() {
- computeBetweenness(getGraph());
- }
-
- class BetweennessData {
- double distance;
- double numSPs;
- List<V> predecessors;
- double dependency;
-
- BetweennessData() {
- distance = -1;
- numSPs = 0;
- predecessors = new ArrayList<V>();
- dependency = 0;
- }
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.importance;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import edu.uci.ics.jung.graph.DirectedGraph;
-
-
-/**
- * Algorithm variant of <code>PageRankWithPriors</code> that computes the importance of a node based upon taking fixed-length random
- * walks out from the root set and then computing the stationary probability of being at each node. Specifically, it computes
- * the relative probability that the markov chain will spend at any particular node, given that it start in the root
- * set and ends after k steps.
- * <p>
- * A simple example of usage is:
- * <pre>
- * KStepMarkov ranker = new KStepMarkov(someGraph,rootSet,6,null);
- * ranker.evaluate();
- * ranker.printRankings();
- * </pre>
- * <p>
- *
- * @author Scott White
- * @author Tom Nelson - adapter to jung2
- * @see "Algorithms for Estimating Relative Importance in Graphs by Scott White and Padhraic Smyth, 2003"
- */
-public class KStepMarkov<V,E> extends RelativeAuthorityRanker<V,E> {
- public final static String RANK_SCORE = "jung.algorithms.importance.KStepMarkovExperimental.RankScore";
- private final static String CURRENT_RANK = "jung.algorithms.importance.KStepMarkovExperimental.CurrentRank";
- private int mNumSteps;
- HashMap<V,Number> mPreviousRankingsMap;
-
- /**
- * Construct the algorihm instance and initializes the algorithm.
- * @param graph the graph to be analyzed
- * @param priors the set of root nodes
- * @param k positive integer parameter which controls the relative tradeoff between a distribution "biased" towards
- * R and the steady-state distribution which is independent of where the Markov-process started. Generally values
- * between 4-8 are reasonable
- * @param edgeWeights the weight for each edge
- */
- public KStepMarkov(DirectedGraph<V,E> graph, Set<V> priors, int k, Map<E,Number> edgeWeights) {
- super.initialize(graph,true,false);
- mNumSteps = k;
- setPriors(priors);
- initializeRankings();
- if (edgeWeights == null) {
- assignDefaultEdgeTransitionWeights();
- } else {
- setEdgeWeights(edgeWeights);
- }
- normalizeEdgeTransitionWeights();
- }
-
- /**
- * The user datum key used to store the rank scores.
- * @return the key
- */
- @Override
- public String getRankScoreKey() {
- return RANK_SCORE;
- }
-
- protected void incrementRankScore(V v, double rankValue) {
- double value = getVertexRankScore(v, RANK_SCORE);
- value += rankValue;
- setVertexRankScore(v, value, RANK_SCORE);
- }
-
- protected double getCurrentRankScore(V v) {
- return getVertexRankScore(v, CURRENT_RANK);
- }
-
- protected void setCurrentRankScore(V v, double rankValue) {
- setVertexRankScore(v, rankValue, CURRENT_RANK);
- }
-
- protected void initializeRankings() {
- mPreviousRankingsMap = new HashMap<V,Number>();
- for (V v : getVertices()) {
- Set<V> priors = getPriors();
- double numPriors = priors.size();
-
- if (getPriors().contains(v)) {
- setVertexRankScore(v, 1.0/ numPriors);
- setCurrentRankScore(v, 1.0/ numPriors);
- mPreviousRankingsMap.put(v,1.0/numPriors);
- } else {
- setVertexRankScore(v, 0);
- setCurrentRankScore(v, 0);
- mPreviousRankingsMap.put(v, 0);
- }
- }
- }
- @Override
- public void step() {
-
- for (int i=0;i<mNumSteps;i++) {
- updateRankings();
- for (V v : getVertices()) {
- double currentRankScore = getCurrentRankScore(v);
- incrementRankScore(v,currentRankScore);
- mPreviousRankingsMap.put(v, currentRankScore);
- }
- }
- normalizeRankings();
- }
-
- protected void updateRankings() {
-
- for (V v : getVertices()) {
-
- Collection<E> incomingEdges = getGraph().getInEdges(v);
-
- double currentPageRankSum = 0;
- for (E e : incomingEdges) {
- double currentWeight = getEdgeWeight(e);
- currentPageRankSum +=
- mPreviousRankingsMap.get(getGraph().getOpposite(v,e)).doubleValue()*currentWeight;
- }
- setCurrentRankScore(v,currentPageRankSum);
- }
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.importance;
-
-
-/**
- * Abstract data container for ranking objects. Stores common data relevant to both node and edge rankings, namely,
- * the original position of the instance in the list and the actual ranking score.
- * @author Scott White
- */
-public class Ranking<V> implements Comparable {
- /**
- * The original (0-indexed) position of the instance being ranked
- */
- public int originalPos;
- /**
- * The actual rank score (normally between 0 and 1)
- */
- public double rankScore;
-
- /**
- * what is being ranked
- */
- private V ranked;
-
- /**
- * Constructor which allows values to be set on construction
- * @param originalPos The original (0-indexed) position of the instance being ranked
- * @param rankScore The actual rank score (normally between 0 and 1)
- */
- public Ranking(int originalPos, double rankScore, V ranked) {
- this.originalPos = originalPos;
- this.rankScore = rankScore;
- this.ranked = ranked;
- }
-
- /**
- * Compares two ranking based on the rank score.
- * @param o The other ranking
- * @return -1 if the other ranking is higher, 0 if they are equal, and 1 if this ranking is higher
- */
- public int compareTo(Object o) {
-
- Ranking otherRanking = (Ranking) o;
- return Double.compare(otherRanking.rankScore,rankScore);
- }
-
- /**
- * Returns the rank score as a string.
- * @return the stringified rank score
- */
- @Override
- public String toString() {
- return String.valueOf(rankScore);
- }
-
- /**
- * @return the ranked
- */
- public V getRanked() {
- return ranked;
- }
-
- /**
- * @param ranked the ranked to set
- */
- public void setRanked(V ranked) {
- this.ranked = ranked;
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.importance;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- * This class provides basic infrastructure for relative authority algorithms that compute the importance of nodes
- * relative to one or more root nodes. The services provided are:
- * <ul>
- * <li>The set of root nodes (priors) is stored and maintained</li>
- * <li>Getters and setters for the prior rank score are provided</li>
- * </ul>
- *
- * @author Scott White
- */
-public abstract class RelativeAuthorityRanker<V,E> extends AbstractRanker<V,E> {
- private Set<V> mPriors;
- /**
- * The default key used for the user datum key corresponding to prior rank scores.
- */
-
- protected Map<V,Number> priorRankScoreMap = new HashMap<V,Number>();
- /**
- * Cleans up all of the prior rank scores on finalize.
- */
- @Override
- protected void finalizeIterations() {
- super.finalizeIterations();
- priorRankScoreMap.clear();
- }
-
- /**
- * Retrieves the value of the prior rank score.
- * @param v the root node (prior)
- * @return the prior rank score
- */
- protected double getPriorRankScore(V v) {
- return priorRankScoreMap.get(v).doubleValue();
-
- }
-
- /**
- * Allows the user to specify a value to set for the prior rank score
- * @param v the root node (prior)
- * @param value the score to set to
- */
- public void setPriorRankScore(V v, double value) {
- this.priorRankScoreMap.put(v, value);
- }
-
- /**
- * Retrieves the set of priors.
- * @return the set of root nodes (priors)
- */
- protected Set<V> getPriors() { return mPriors; }
-
- /**
- * Specifies which vertices are root nodes (priors).
- * @param priors the root nodes
- */
- protected void setPriors(Set<V> priors) { mPriors = priors; }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.importance;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.graph.DirectedGraph;
-
-
-
-/**
- * This algorithm measures the importance of nodes based upon both the number and length of disjoint paths that lead
- * to a given node from each of the nodes in the root set. Specifically the formula for measuring the importance of a
- * node is given by: I(t|R) = sum_i=1_|P(r,t)|_{alpha^|p_i|} where alpha is the path decay coefficient, p_i is path i
- * and P(r,t) is a set of maximum-sized node-disjoint paths from r to t.
- * <p>
- * This algorithm uses heuristic breadth-first search to try and find the maximum-sized set of node-disjoint paths
- * between two nodes. As such, it is not guaranteed to give exact answers.
- * <p>
- * A simple example of usage is:
- * <pre>
- * WeightedNIPaths ranker = new WeightedNIPaths(someGraph,2.0,6,rootSet);
- * ranker.evaluate();
- * ranker.printRankings();
- * </pre>
- *
- * @author Scott White
- * @see "Algorithms for Estimating Relative Importance in Graphs by Scott White and Padhraic Smyth, 2003"
- */
-public class WeightedNIPaths<V,E> extends AbstractRanker<V,E> {
- public final static String WEIGHTED_NIPATHS_KEY = "jung.algorithms.importance.WEIGHTED_NIPATHS_KEY";
- private double mAlpha;
- private int mMaxDepth;
- private Set<V> mPriors;
- private Map<E,Number> pathIndices = new HashMap<E,Number>();
- private Map<Object,V> roots = new HashMap<Object,V>();
- private Map<V,Set<Number>> pathsSeenMap = new HashMap<V,Set<Number>>();
- private Factory<V> vertexFactory;
- private Factory<E> edgeFactory;
-
- /**
- * Constructs and initializes the algorithm.
- * @param graph the graph whose nodes are being measured for their importance
- * @param alpha the path decay coefficient (>= 1); 2 is recommended
- * @param maxDepth the maximal depth to search out from the root set
- * @param priors the root set (starting vertices)
- */
- public WeightedNIPaths(DirectedGraph<V,E> graph, Factory<V> vertexFactory,
- Factory<E> edgeFactory, double alpha, int maxDepth, Set<V> priors) {
- super.initialize(graph, true,false);
- this.vertexFactory = vertexFactory;
- this.edgeFactory = edgeFactory;
- mAlpha = alpha;
- mMaxDepth = maxDepth;
- mPriors = priors;
- for (V v : graph.getVertices()) {
- super.setVertexRankScore(v, 0.0);
- }
- }
-
- protected void incrementRankScore(V v, double rankValue) {
- setVertexRankScore(v, getVertexRankScore(v) + rankValue);
- }
-
- protected void computeWeightedPathsFromSource(V root, int depth) {
-
- int pathIdx = 1;
-
- for (E e : getGraph().getOutEdges(root)) {
- this.pathIndices.put(e, pathIdx);
- this.roots.put(e, root);
- newVertexEncountered(pathIdx, getGraph().getEndpoints(e).getSecond(), root);
- pathIdx++;
- }
-
- List<E> edges = new ArrayList<E>();
-
- V virtualNode = vertexFactory.create();
- getGraph().addVertex(virtualNode);
- E virtualSinkEdge = edgeFactory.create();
-
- getGraph().addEdge(virtualSinkEdge, virtualNode, root);
- edges.add(virtualSinkEdge);
-
- int currentDepth = 0;
- while (currentDepth <= depth) {
-
- double currentWeight = Math.pow(mAlpha, -1.0 * currentDepth);
- for (E currentEdge : edges) {
- incrementRankScore(getGraph().getEndpoints(currentEdge).getSecond(),//
- currentWeight);
- }
-
- if ((currentDepth == depth) || (edges.size() == 0)) break;
-
- List<E> newEdges = new ArrayList<E>();
-
- for (E currentSourceEdge : edges) { //Iterator sourceEdgeIt = edges.iterator(); sourceEdgeIt.hasNext();) {
- Number sourcePathIndex = this.pathIndices.get(currentSourceEdge);
-
- // from the currentSourceEdge, get its opposite end
- // then iterate over the out edges of that opposite end
- V newDestVertex = getGraph().getEndpoints(currentSourceEdge).getSecond();
- Collection<E> outs = getGraph().getOutEdges(newDestVertex);
- for (E currentDestEdge : outs) {
- V destEdgeRoot = this.roots.get(currentDestEdge);
- V destEdgeDest = getGraph().getEndpoints(currentDestEdge).getSecond();
-
- if (currentSourceEdge == virtualSinkEdge) {
- newEdges.add(currentDestEdge);
- continue;
- }
- if (destEdgeRoot == root) {
- continue;
- }
- if (destEdgeDest == getGraph().getEndpoints(currentSourceEdge).getFirst()) {//currentSourceEdge.getSource()) {
- continue;
- }
- Set<Number> pathsSeen = this.pathsSeenMap.get(destEdgeDest);
-
- if (pathsSeen == null) {
- newVertexEncountered(sourcePathIndex.intValue(), destEdgeDest, root);
- } else if (roots.get(destEdgeDest) != root) {
- roots.put(destEdgeDest,root);
- pathsSeen.clear();
- pathsSeen.add(sourcePathIndex);
- } else if (!pathsSeen.contains(sourcePathIndex)) {
- pathsSeen.add(sourcePathIndex);
- } else {
- continue;
- }
-
- this.pathIndices.put(currentDestEdge, sourcePathIndex);
- this.roots.put(currentDestEdge, root);
- newEdges.add(currentDestEdge);
- }
- }
-
- edges = newEdges;
- currentDepth++;
- }
-
- getGraph().removeVertex(virtualNode);
- }
-
- private void newVertexEncountered(int sourcePathIndex, V dest, V root) {
- Set<Number> pathsSeen = new HashSet<Number>();
- pathsSeen.add(sourcePathIndex);
- this.pathsSeenMap.put(dest, pathsSeen);
- roots.put(dest, root);
- }
-
- @Override
- public void step() {
- for (V v : mPriors) {
- computeWeightedPathsFromSource(v, mMaxDepth);
- }
-
- normalizeRankings();
-// return 0;
- }
-
- /**
- * Given a node, returns the corresponding rank score. This implementation of <code>getRankScore</code> assumes
- * the decoration representing the rank score is of type <code>MutableDouble</code>.
- * @return the rank score for this node
- */
- @Override
- public String getRankScoreKey() {
- return WEIGHTED_NIPATHS_KEY;
- }
-
- @Override
- protected void onFinalize(Object udc) {
- pathIndices.remove(udc);
- roots.remove(udc);
- pathsSeenMap.remove(udc);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Jul 7, 2003
- *
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ChainedTransformer;
-import org.apache.commons.collections15.functors.CloneTransformer;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Abstract class for implementations of {@code Layout}. It handles some of the
- * basic functions: storing coordinates, maintaining the dimensions, initializing
- * the locations, maintaining locked vertices.
- *
- * @author Danyel Fisher, Scott White
- * @author Tom Nelson - converted to jung2
- * @param <V> the vertex type
- * @param <E> the edge type
- */
-abstract public class AbstractLayout<V, E> implements Layout<V,E> {
-
- /**
- * a set of vertices that should not move in relation to the
- * other vertices
- */
- private Set<V> dontmove = new HashSet<V>();
-
- protected Dimension size;
- protected Graph<V, E> graph;
- protected boolean initialized;
-
- protected Map<V, Point2D> locations =
- LazyMap.decorate(new HashMap<V, Point2D>(),
- new Transformer<V,Point2D>() {
- public Point2D transform(V arg0) {
- return new Point2D.Double();
- }});
-
-
- /**
- * Creates an instance which does not initialize the vertex locations.
- *
- * @param graph the graph for which the layout algorithm is to be created.
- */
- protected AbstractLayout(Graph<V, E> graph) {
- if (graph == null)
- {
- throw new IllegalArgumentException("Graph must be non-null");
- }
- this.graph = graph;
- }
-
- @SuppressWarnings("unchecked")
- protected AbstractLayout(Graph<V,E> graph, Transformer<V,Point2D> initializer) {
- this.graph = graph;
- Transformer<V, ? extends Object> chain =
- ChainedTransformer.getInstance(initializer, CloneTransformer.getInstance());
- this.locations = LazyMap.decorate(new HashMap<V,Point2D>(), (Transformer<V,Point2D>)chain);
- initialized = true;
- }
-
- protected AbstractLayout(Graph<V,E> graph, Dimension size) {
- this.graph = graph;
- this.size = size;
- }
-
- @SuppressWarnings("unchecked")
- protected AbstractLayout(Graph<V,E> graph, Transformer<V,Point2D> initializer, Dimension size) {
- this.graph = graph;
- Transformer<V, ? extends Object> chain =
- ChainedTransformer.getInstance(initializer, CloneTransformer.getInstance());
- this.locations = LazyMap.decorate(new HashMap<V,Point2D>(), (Transformer<V,Point2D>)chain);
- this.size = size;
- }
-
- public void setGraph(Graph<V,E> graph) {
- this.graph = graph;
- if(size != null && graph != null) {
- initialize();
- }
- }
-
- /**
- * When a visualization is resized, it presumably wants to fix the
- * locations of the vertices and possibly to reinitialize its data. The
- * current method calls <tt>initializeLocations</tt> followed by <tt>initialize_local</tt>.
- */
- public void setSize(Dimension size) {
-
- if(size != null && graph != null) {
-
- Dimension oldSize = this.size;
- this.size = size;
- initialize();
-
- if(oldSize != null) {
- adjustLocations(oldSize, size);
- }
- }
- }
-
- private void adjustLocations(Dimension oldSize, Dimension size) {
-
- int xOffset = (size.width - oldSize.width) / 2;
- int yOffset = (size.height - oldSize.height) / 2;
-
- // now, move each vertex to be at the new screen center
- while(true) {
- try {
- for(V v : getGraph().getVertices()) {
- offsetVertex(v, xOffset, yOffset);
- }
- break;
- } catch(ConcurrentModificationException cme) {
- }
- }
- }
-
- public boolean isLocked(V v) {
- return dontmove.contains(v);
- }
-
- @SuppressWarnings("unchecked")
- public void setInitializer(Transformer<V,Point2D> initializer) {
- if(this.equals(initializer)) {
- throw new IllegalArgumentException("Layout cannot be initialized with itself");
- }
- Transformer<V, ? extends Object> chain =
- ChainedTransformer.getInstance(initializer, CloneTransformer.getInstance());
- this.locations = LazyMap.decorate(new HashMap<V,Point2D>(), (Transformer<V, Point2D>)chain);
- initialized = true;
- }
-
- /**
- * Returns the current size of the visualization space, accoring to the
- * last call to resize().
- *
- * @return the current size of the screen
- */
- public Dimension getSize() {
- return size;
- }
-
- /**
- * Returns the Coordinates object that stores the vertex' x and y location.
- *
- * @param v
- * A Vertex that is a part of the Graph being visualized.
- * @return A Coordinates object with x and y locations.
- */
- private Point2D getCoordinates(V v) {
- return locations.get(v);
- }
-
- public Point2D transform(V v) {
- return getCoordinates(v);
- }
-
- /**
- * Returns the x coordinate of the vertex from the Coordinates object.
- * in most cases you will be better off calling transform(v).
- */
- public double getX(V v) {
- assert getCoordinates(v) != null : "Cannot getX for an unmapped vertex "+v;
- return getCoordinates(v).getX();
- }
-
- /**
- * Returns the y coordinate of the vertex from the Coordinates object.
- * In most cases you will be better off calling transform(v).
- */
- public double getY(V v) {
- assert getCoordinates(v) != null : "Cannot getY for an unmapped vertex "+v;
- return getCoordinates(v).getY();
- }
-
- /**
- * @param v
- * @param xOffset
- * @param yOffset
- */
- protected void offsetVertex(V v, double xOffset, double yOffset) {
- Point2D c = getCoordinates(v);
- c.setLocation(c.getX()+xOffset, c.getY()+yOffset);
- setLocation(v, c);
- }
-
- /**
- * Accessor for the graph that represets all vertices.
- *
- * @return the graph that contains all vertices.
- */
- public Graph<V, E> getGraph() {
- return graph;
- }
-
- /**
- * Forcibly moves a vertex to the (x,y) location by setting its x and y
- * locations to the inputted location. Does not add the vertex to the
- * "dontmove" list, and (in the default implementation) does not make any
- * adjustments to the rest of the graph.
- */
- public void setLocation(V picked, double x, double y) {
- Point2D coord = getCoordinates(picked);
- coord.setLocation(x, y);
- }
-
- public void setLocation(V picked, Point2D p) {
- Point2D coord = getCoordinates(picked);
- coord.setLocation(p);
- }
-
- /**
- * Locks {@code v} in place if {@code state} is {@code true}, otherwise unlocks it.
- */
- public void lock(V v, boolean state) {
- if(state == true)
- dontmove.add(v);
- else
- dontmove.remove(v);
- }
-
- /**
- * Locks all vertices in place if {@code lock} is {@code true}, otherwise unlocks all vertices.
- */
- public void lock(boolean lock) {
- for(V v : graph.getVertices()) {
- lock(v, lock);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- *
- *
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.AffineTransform;
-import java.awt.geom.Point2D;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * A {@code Layout} implementation that combines
- * multiple other layouts so that they may be manipulated
- * as one layout. The relaxer thread will step each layout
- * in sequence.
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- * @param <V> the vertex type
- * @param <E> the edge type
- */
-public class AggregateLayout<V, E> implements Layout<V,E>, IterativeContext {
-
- protected Layout<V,E> delegate;
- protected Map<Layout<V,E>,Point2D> layouts = new HashMap<Layout<V,E>,Point2D>();
-
- /**
- * Creates an instance backed by the specified {@code delegate}.
- * @param delegate
- */
- public AggregateLayout(Layout<V, E> delegate) {
- this.delegate = delegate;
- }
-
- /**
- * @return the delegate
- */
- public Layout<V, E> getDelegate() {
- return delegate;
- }
-
- /**
- * @param delegate the delegate to set
- */
- public void setDelegate(Layout<V, E> delegate) {
- this.delegate = delegate;
- }
-
- /**
- * adds the passed layout as a sublayout, also specifying
- * the center of where this sublayout should appear
- * @param layout
- * @param center
- */
- public void put(Layout<V,E> layout, Point2D center) {
- layouts.put(layout,center);
- }
-
- /**
- * Returns the center of the passed layout.
- * @param layout
- * @return the center of the passed layout
- */
- public Point2D get(Layout<V,E> layout) {
- return layouts.get(layout);
- }
-
- /**
- * Removes {@code layout} from this instance.
- */
- public void remove(Layout<V,E> layout) {
- layouts.remove(layout);
- }
-
- /**
- * Removes all layouts from this instance.
- */
- public void removeAll() {
- layouts.clear();
- }
-
- /**
- * Returns the graph for which this layout is defined.
- * @return the graph for which this layout is defined
- * @see edu.uci.ics.jung.algorithms.layout.Layout#getGraph()
- */
- public Graph<V, E> getGraph() {
- return delegate.getGraph();
- }
-
- /**
- * Returns the size of the underlying layout.
- * @return the size of the underlying layout
- * @see edu.uci.ics.jung.algorithms.layout.Layout#getSize()
- */
- public Dimension getSize() {
- return delegate.getSize();
- }
-
- /**
- *
- * @see edu.uci.ics.jung.algorithms.layout.Layout#initialize()
- */
- public void initialize() {
- delegate.initialize();
- for(Layout<V,E> layout : layouts.keySet()) {
- layout.initialize();
- }
- }
-
- /**
- * Override to test if the passed vertex is locked in
- * any of the layouts.
- * @param v
- * @return true if v is locked in any of the layouts, and false otherwise
- * @see edu.uci.ics.jung.algorithms.layout.Layout#isLocked(java.lang.Object)
- */
- public boolean isLocked(V v) {
- boolean locked = false;
- for(Layout<V,E> layout : layouts.keySet()) {
- locked |= layout.isLocked(v);
- }
- locked |= delegate.isLocked(v);
- return locked;
- }
-
- /**
- * override to lock or unlock this vertex in any layout with
- * a subgraph containing it
- * @param v
- * @param state
- * @see edu.uci.ics.jung.algorithms.layout.Layout#lock(java.lang.Object, boolean)
- */
- public void lock(V v, boolean state) {
- for(Layout<V,E> layout : layouts.keySet()) {
- if(layout.getGraph().getVertices().contains(v)) {
- layout.lock(v, state);
- }
- }
- delegate.lock(v, state);
- }
-
- /**
- *
- * @see edu.uci.ics.jung.algorithms.layout.Layout#reset()
- */
- public void reset() {
- for(Layout<V,E> layout : layouts.keySet()) {
- layout.reset();
- }
- delegate.reset();
- }
-
- /**
- * @param graph
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setGraph(edu.uci.ics.jung.graph.Graph)
- */
- public void setGraph(Graph<V, E> graph) {
- delegate.setGraph(graph);
- }
-
- /**
- * @param initializer
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setInitializer(org.apache.commons.collections15.Transformer)
- */
- public void setInitializer(Transformer<V, Point2D> initializer) {
- delegate.setInitializer(initializer);
- }
-
- /**
- * @param v
- * @param location
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setLocation(java.lang.Object, java.awt.geom.Point2D)
- */
- public void setLocation(V v, Point2D location) {
- boolean wasInSublayout = false;
- for(Layout<V,E> layout : layouts.keySet()) {
- if(layout.getGraph().getVertices().contains(v)) {
- Point2D center = layouts.get(layout);
- // transform by the layout itself, but offset to the
- // center of the sublayout
- Dimension d = layout.getSize();
-
- AffineTransform at =
- AffineTransform.getTranslateInstance(-center.getX()+d.width/2,-center.getY()+d.height/2);
- Point2D localLocation = at.transform(location, null);
- layout.setLocation(v, localLocation);
- wasInSublayout = true;
- }
- }
- if(wasInSublayout == false && getGraph().getVertices().contains(v)) {
- delegate.setLocation(v, location);
- }
- }
-
- /**
- * @param d
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setSize(java.awt.Dimension)
- */
- public void setSize(Dimension d) {
- delegate.setSize(d);
- }
-
- /**
- * Returns a map from each {@code Layout} instance to its center point.
- */
- public Map<Layout<V,E>,Point2D> getLayouts() {
- return layouts;
- }
-
- /**
- * Returns the location of the vertex. The location is specified first
- * by the sublayouts, and then by the base layout if no sublayouts operate
- * on this vertex.
- * @return the location of the vertex
- * @see org.apache.commons.collections15.Transformer#transform(java.lang.Object)
- */
- public Point2D transform(V v) {
- boolean wasInSublayout = false;
- for(Layout<V,E> layout : layouts.keySet()) {
- if(layout.getGraph().getVertices().contains(v)) {
- wasInSublayout = true;
- Point2D center = layouts.get(layout);
- // transform by the layout itself, but offset to the
- // center of the sublayout
- Dimension d = layout.getSize();
- AffineTransform at =
- AffineTransform.getTranslateInstance(center.getX()-d.width/2,
- center.getY()-d.height/2);
- return at.transform(layout.transform(v),null);
- }
- }
- if(wasInSublayout == false) {
- return delegate.transform(v);
- }
- return null;
-
- }
-
- /**
- * Check all sublayouts.keySet() and the delegate layout, returning
- * done == true iff all are done.
- */
- public boolean done() {
- boolean done = true;
- for(Layout<V,E> layout : layouts.keySet()) {
- if(layout instanceof IterativeContext) {
- done &= ((IterativeContext)layout).done();
- }
- }
- if(delegate instanceof IterativeContext) {
- done &= ((IterativeContext)delegate).done();
- }
- return done;
- }
-
- /**
- * call step on any sublayout that is also an IterativeContext
- * and is not done
- */
- public void step() {
- for(Layout<V,E> layout : layouts.keySet()) {
- if(layout instanceof IterativeContext) {
- IterativeContext context = (IterativeContext)layout;
- if(context.done() == false) {
- context.step();
- }
- }
- }
- if(delegate instanceof IterativeContext) {
- IterativeContext context = (IterativeContext)delegate;
- if(context.done() == false) {
- context.step();
- }
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Jul 9, 2005
- */
-
-package edu.uci.ics.jung.algorithms.layout;
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.graph.Forest;
-import edu.uci.ics.jung.graph.util.TreeUtils;
-
-/**
- * A {@code Layout} implementation that assigns positions to {@code Tree} or
- * {@code Forest} vertices using associations with nested circles ("balloons").
- * A balloon is nested inside another balloon if the first balloon's subtree
- * is a subtree of the second balloon's subtree.
- *
- * @author Tom Nelson
- *
- */
-public class BalloonLayout<V,E> extends TreeLayout<V,E> {
-
- protected Map<V,PolarPoint> polarLocations =
- LazyMap.decorate(new HashMap<V, PolarPoint>(),
- new Transformer<V,PolarPoint>() {
- public PolarPoint transform(V arg0) {
- return new PolarPoint();
- }});
-
- protected Map<V,Double> radii = new HashMap<V,Double>();
-
- /**
- * Creates an instance based on the input forest.
- */
- public BalloonLayout(Forest<V,E> g)
- {
- super(g);
- }
-
- protected void setRootPolars()
- {
- List<V> roots = TreeUtils.getRoots(graph);
- if(roots.size() == 1) {
- // its a Tree
- V root = roots.get(0);
- setRootPolar(root);
- setPolars(new ArrayList<V>(graph.getChildren(root)),
- getCenter(), getSize().width/2);
- } else if (roots.size() > 1) {
- // its a Forest
- setPolars(roots, getCenter(), getSize().width/2);
- }
- }
-
- protected void setRootPolar(V root) {
- PolarPoint pp = new PolarPoint(0,0);
- Point2D p = getCenter();
- polarLocations.put(root, pp);
- locations.put(root, p);
- }
-
-
- protected void setPolars(List<V> kids, Point2D parentLocation, double parentRadius) {
-
- int childCount = kids.size();
- if(childCount == 0) return;
- // handle the 1-child case with 0 limit on angle.
- double angle = Math.max(0, Math.PI / 2 * (1 - 2.0/childCount));
- double childRadius = parentRadius*Math.cos(angle) / (1 + Math.cos(angle));
- double radius = parentRadius - childRadius;
-
- double rand = Math.random();
-
- for(int i=0; i< childCount; i++) {
- V child = kids.get(i);
- double theta = i* 2*Math.PI/childCount + rand;
- radii.put(child, childRadius);
-
- PolarPoint pp = new PolarPoint(theta, radius);
- polarLocations.put(child, pp);
-
- Point2D p = PolarPoint.polarToCartesian(pp);
- p.setLocation(p.getX()+parentLocation.getX(), p.getY()+parentLocation.getY());
- locations.put(child, p);
- setPolars(new ArrayList<V>(graph.getChildren(child)), p, childRadius);
- }
- }
-
- @Override
- public void setSize(Dimension size) {
- this.size = size;
- setRootPolars();
- }
-
- /**
- * Returns the coordinates of {@code v}'s parent, or the
- * center of this layout's area if it's a root.
- */
- public Point2D getCenter(V v) {
- V parent = graph.getParent(v);
- if(parent == null) {
- return getCenter();
- }
- return locations.get(parent);
- }
-
- @Override
- public void setLocation(V v, Point2D location) {
- Point2D c = getCenter(v);
- Point2D pv = new Point2D.Double(location.getX()-c.getX(),location.getY()-c.getY());
- PolarPoint newLocation = PolarPoint.cartesianToPolar(pv);
- polarLocations.get(v).setLocation(newLocation);
-
- Point2D center = getCenter(v);
- pv.setLocation(pv.getX()+center.getX(), pv.getY()+center.getY());
- locations.put(v, pv);
- }
-
- @Override
- public Point2D transform(V v) {
- return locations.get(v);
- }
-
- /**
- * @return the radii
- */
- public Map<V, Double> getRadii() {
- return radii;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Dec 4, 2003
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.graph.Graph;
-
-
-
-/**
- * A {@code Layout} implementation that positions vertices equally spaced on a regular circle.
- *
- * @author Masanori Harada
- */
-public class CircleLayout<V, E> extends AbstractLayout<V,E> {
-
- private double radius;
- private List<V> vertex_ordered_list;
-
- Map<V, CircleVertexData> circleVertexDataMap =
- LazyMap.decorate(new HashMap<V,CircleVertexData>(),
- new Factory<CircleVertexData>() {
- public CircleVertexData create() {
- return new CircleVertexData();
- }});
-
- /**
- * Creates an instance for the specified graph.
- */
- public CircleLayout(Graph<V,E> g) {
- super(g);
- }
-
- /**
- * Returns the radius of the circle.
- */
- public double getRadius() {
- return radius;
- }
-
- /**
- * Sets the radius of the circle. Must be called before
- * {@code initialize()} is called.
- */
- public void setRadius(double radius) {
- this.radius = radius;
- }
-
- /**
- * Sets the order of the vertices in the layout according to the ordering
- * specified by {@code comparator}.
- */
- public void setVertexOrder(Comparator<V> comparator)
- {
- if (vertex_ordered_list == null)
- vertex_ordered_list = new ArrayList<V>(getGraph().getVertices());
- Collections.sort(vertex_ordered_list, comparator);
- }
-
- /**
- * Sets the order of the vertices in the layout according to the ordering
- * of {@code vertex_list}.
- */
- public void setVertexOrder(List<V> vertex_list)
- {
- if (!vertex_list.containsAll(getGraph().getVertices()))
- throw new IllegalArgumentException("Supplied list must include " +
- "all vertices of the graph");
- this.vertex_ordered_list = vertex_list;
- }
-
- public void reset() {
- initialize();
- }
-
- public void initialize()
- {
- Dimension d = getSize();
-
- if (d != null)
- {
- if (vertex_ordered_list == null)
- setVertexOrder(new ArrayList<V>(getGraph().getVertices()));
-
- double height = d.getHeight();
- double width = d.getWidth();
-
- if (radius <= 0) {
- radius = 0.45 * (height < width ? height : width);
- }
-
- int i = 0;
- for (V v : vertex_ordered_list)
- {
- Point2D coord = transform(v);
-
- double angle = (2 * Math.PI * i) / vertex_ordered_list.size();
-
- coord.setLocation(Math.cos(angle) * radius + width / 2,
- Math.sin(angle) * radius + height / 2);
-
- CircleVertexData data = getCircleData(v);
- data.setAngle(angle);
- i++;
- }
- }
- }
-
- protected CircleVertexData getCircleData(V v) {
- return circleVertexDataMap.get(v);
- }
-
- protected static class CircleVertexData {
- private double angle;
-
- protected double getAngle() {
- return angle;
- }
-
- protected void setAngle(double angle) {
- this.angle = angle;
- }
-
- @Override
- public String toString() {
- return "CircleVertexData: angle=" + angle;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Dec 4, 2003
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * An implementation of {@code Layout} suitable for tree-like directed
- * acyclic graphs. Parts of it will probably not terminate if the graph is
- * cyclic! The layout will result in directed edges pointing generally upwards.
- * Any vertices with no successors are considered to be level 0, and tend
- * towards the top of the layout. Any vertex has a level one greater than the
- * maximum level of all its successors.
- *
- *
- * @author John Yesberg
- */
-public class DAGLayout<V, E> extends SpringLayout<V,E> {
-
- /**
- * Each vertex has a minimumLevel. Any vertex with no successors has
- * minimumLevel of zero. The minimumLevel of any vertex must be strictly
- * greater than the minimumLevel of its parents. (Vertex A is a parent of
- * Vertex B iff there is an edge from B to A.) Typically, a vertex will
- * have a minimumLevel which is one greater than the minimumLevel of its
- * parent's. However, if the vertex has two parents, its minimumLevel will
- * be one greater than the maximum of the parents'. We need to calculate
- * the minimumLevel for each vertex. When we layout the graph, vertices
- * cannot be drawn any higher than the minimumLevel. The graphHeight of a
- * graph is the greatest minimumLevel that is used. We will modify the
- * SpringLayout calculations so that nodes cannot move above their assigned
- * minimumLevel.
- */
- private Map<V,Number> minLevels = new HashMap<V,Number>();
- // Simpler than the "pair" technique.
- static int graphHeight;
- static int numRoots;
- final double SPACEFACTOR = 1.3;
- // How much space do we allow for additional floating at the bottom.
- final double LEVELATTRACTIONRATE = 0.8;
-
- /**
- * A bunch of parameters to help work out when to stop quivering.
- *
- * If the MeanSquareVel(ocity) ever gets below the MSV_THRESHOLD, then we
- * will start a final cool-down phase of COOL_DOWN_INCREMENT increments. If
- * the MeanSquareVel ever exceeds the threshold, we will exit the cool down
- * phase, and continue looking for another opportunity.
- */
- final double MSV_THRESHOLD = 10.0;
- double meanSquareVel;
- boolean stoppingIncrements = false;
- int incrementsLeft;
- final int COOL_DOWN_INCREMENTS = 200;
-
- /**
- * Creates an instance for the specified graph.
- */
- public DAGLayout(Graph<V,E> g) {
- super(g);
- }
-
- /**
- * setRoot calculates the level of each vertex in the graph. Level 0 is
- * allocated to any vertex with no successors. Level n+1 is allocated to
- * any vertex whose successors' maximum level is n.
- */
- public void setRoot(Graph<V,E> g) {
- numRoots = 0;
- for(V v : g.getVertices()) {
- Collection<V> successors = getGraph().getSuccessors(v);
- if (successors.size() == 0) {
- setRoot(v);
- numRoots++;
- }
- }
- }
-
- /**
- * Set vertex v to be level 0.
- */
- public void setRoot(V v) {
- minLevels.put(v, new Integer(0));
- // set all the levels.
- propagateMinimumLevel(v);
- }
-
- /**
- * A recursive method for allocating the level for each vertex. Ensures
- * that all predecessors of v have a level which is at least one greater
- * than the level of v.
- *
- * @param v
- */
- public void propagateMinimumLevel(V v) {
- int level = minLevels.get(v).intValue();
- for(V child : getGraph().getPredecessors(v)) {
- int oldLevel, newLevel;
- Number o = minLevels.get(child);
- if (o != null)
- oldLevel = o.intValue();
- else
- oldLevel = 0;
- newLevel = Math.max(oldLevel, level + 1);
- minLevels.put(child, new Integer(newLevel));
-
- if (newLevel > graphHeight)
- graphHeight = newLevel;
- propagateMinimumLevel(child);
- }
- }
-
- /**
- * Sets random locations for a vertex within the dimensions of the space.
- * This overrides the method in AbstractLayout
- *
- * @param coord
- * @param d
- */
- private void initializeLocation(
- V v,
- Point2D coord,
- Dimension d) {
-
- int level = minLevels.get(v).intValue();
- int minY = (int) (level * d.getHeight() / (graphHeight * SPACEFACTOR));
- double x = Math.random() * d.getWidth();
- double y = Math.random() * (d.getHeight() - minY) + minY;
- coord.setLocation(x,y);
- }
-
- @Override
- public void setSize(Dimension size) {
- super.setSize(size);
- for(V v : getGraph().getVertices()) {
- initializeLocation(v,transform(v),getSize());
- }
- }
-
- /**
- * Had to override this one as well, to ensure that setRoot() is called.
- */
- @Override
- public void initialize() {
- super.initialize();
- setRoot(getGraph());
- }
-
- /**
- * Override the moveNodes() method from SpringLayout. The only change we
- * need to make is to make sure that nodes don't float higher than the minY
- * coordinate, as calculated by their minimumLevel.
- */
- @Override
- protected void moveNodes() {
- // Dimension d = currentSize;
- double oldMSV = meanSquareVel;
- meanSquareVel = 0;
-
- synchronized (getSize()) {
-
- for(V v : getGraph().getVertices()) {
- if (isLocked(v))
- continue;
- SpringLayout.SpringVertexData vd = springVertexData.get(v);
- Point2D xyd = transform(v);
-
- int width = getSize().width;
- int height = getSize().height;
-
- // (JY addition: three lines are new)
- int level =
- minLevels.get(v).intValue();
- int minY = (int) (level * height / (graphHeight * SPACEFACTOR));
- int maxY =
- level == 0
- ? (int) (height / (graphHeight * SPACEFACTOR * 2))
- : height;
-
- // JY added 2* - double the sideways repulsion.
- vd.dx += 2 * vd.repulsiondx + vd.edgedx;
- vd.dy += vd.repulsiondy + vd.edgedy;
-
- // JY Addition: Attract the vertex towards it's minimumLevel
- // height.
- double delta = xyd.getY() - minY;
- vd.dy -= delta * LEVELATTRACTIONRATE;
- if (level == 0)
- vd.dy -= delta * LEVELATTRACTIONRATE;
- // twice as much at the top.
-
- // JY addition:
- meanSquareVel += (vd.dx * vd.dx + vd.dy * vd.dy);
-
- // keeps nodes from moving any faster than 5 per time unit
- xyd.setLocation(xyd.getX()+Math.max(-5, Math.min(5, vd.dx)) , xyd.getY()+Math.max(-5, Math.min(5, vd.dy)) );
-
- if (xyd.getX() < 0) {
- xyd.setLocation(0, xyd.getY());
- } else if (xyd.getX() > width) {
- xyd.setLocation(width, xyd.getY());
- }
-
- // (JY addition: These two lines replaced 0 with minY)
- if (xyd.getY() < minY) {
- xyd.setLocation(xyd.getX(), minY);
- // (JY addition: replace height with maxY)
- } else if (xyd.getY() > maxY) {
- xyd.setLocation(xyd.getX(), maxY);
- }
-
- // (JY addition: if there's only one root, anchor it in the
- // middle-top of the screen)
- if (numRoots == 1 && level == 0) {
- xyd.setLocation(width/2, xyd.getY());
- }
- }
- }
- //System.out.println("MeanSquareAccel="+meanSquareVel);
- if (!stoppingIncrements
- && Math.abs(meanSquareVel - oldMSV) < MSV_THRESHOLD) {
- stoppingIncrements = true;
- incrementsLeft = COOL_DOWN_INCREMENTS;
- } else if (
- stoppingIncrements
- && Math.abs(meanSquareVel - oldMSV) <= MSV_THRESHOLD) {
- incrementsLeft--;
- if (incrementsLeft <= 0)
- incrementsLeft = 0;
- }
- }
-
- /**
- * Override incrementsAreDone so that we can eventually stop.
- */
- @Override
- public boolean done() {
- if (stoppingIncrements && incrementsLeft == 0)
- return true;
- else
- return false;
- }
-
- /**
- * Override forceMove so that if someone moves a node, we can re-layout
- * everything.
- */
- @Override
- public void setLocation(V picked, double x, double y) {
- Point2D coord = transform(picked);
- coord.setLocation(x,y);
- stoppingIncrements = false;
- }
-
- /**
- * Override forceMove so that if someone moves a node, we can re-layout
- * everything.
- */
- @Override
- public void setLocation(V picked, Point2D p) {
- Point2D coord = transform(picked);
- coord.setLocation(p);
- stoppingIncrements = false;
- }
-
- /**
- * Overridden relaxEdges. This one reduces the effect of edges between
- * greatly different levels.
- *
- */
- @Override
- protected void relaxEdges() {
- for(E e : getGraph().getEdges()) {
- Pair<V> endpoints = getGraph().getEndpoints(e);
- V v1 = endpoints.getFirst();
- V v2 = endpoints.getSecond();
-
- Point2D p1 = transform(v1);
- Point2D p2 = transform(v2);
- double vx = p1.getX() - p2.getX();
- double vy = p1.getY() - p2.getY();
- double len = Math.sqrt(vx * vx + vy * vy);
-
- // JY addition.
- int level1 =
- minLevels.get(v1).intValue();
- int level2 =
- minLevels.get(v2).intValue();
-
- // desiredLen *= Math.pow( 1.1, (v1.degree() + v2.degree()) );
-// double desiredLen = getLength(e);
- double desiredLen = lengthFunction.transform(e);
-
- // round from zero, if needed [zero would be Bad.].
- len = (len == 0) ? .0001 : len;
-
- // force factor: optimal length minus actual length,
- // is made smaller as the current actual length gets larger.
- // why?
-
- // System.out.println("Desired : " + getLength( e ));
- double f = force_multiplier * (desiredLen - len) / len;
-
- f = f * Math.pow(stretch / 100.0,
- (getGraph().degree(v1) + getGraph().degree(v2) -2));
-
- // JY addition. If this is an edge which stretches a long way,
- // don't be so concerned about it.
- if (level1 != level2)
- f = f / Math.pow(Math.abs(level2 - level1), 1.5);
-
- // f= Math.min( 0, f );
-
- // the actual movement distance 'dx' is the force multiplied by the
- // distance to go.
- double dx = f * vx;
- double dy = f * vy;
- SpringVertexData v1D, v2D;
- v1D = springVertexData.get(v1);
- v2D = springVertexData.get(v2);
-
-// SpringEdgeData<E> sed = getSpringEdgeData(e);
-// sed.f = f;
-
- v1D.edgedx += dx;
- v1D.edgedy += dy;
- v2D.edgedx += -dx;
- v2D.edgedy += -dy;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import edu.uci.ics.jung.algorithms.layout.util.RandomLocationTransformer;
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.map.LazyMap;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Implements the Fruchterman-Reingold force-directed algorithm for node layout.
- *
- * <p>Behavior is determined by the following settable parameters:
- * <ul>
- * <li/>attraction multiplier: how much edges try to keep their vertices together
- * <li/>repulsion multiplier: how much vertices try to push each other apart
- * <li/>maximum iterations: how many iterations this algorithm will use before stopping
- * </ul>
- * Each of the first two defaults to 0.75; the maximum number of iterations defaults to 700.
- *
- * @see "Fruchterman and Reingold, 'Graph Drawing by Force-directed Placement'"
- * @see "http://i11www.ilkd.uni-karlsruhe.de/teaching/SS_04/visualisierung/papers/fruchterman91graph.pdf"
- * @author Scott White, Yan-Biao Boey, Danyel Fisher
- */
-public class FRLayout<V, E> extends AbstractLayout<V, E> implements IterativeContext {
-
- private double forceConstant;
-
- private double temperature;
-
- private int currentIteration;
-
- private int mMaxIterations = 700;
-
- private Map<V, FRVertexData> frVertexData =
- LazyMap.decorate(new HashMap<V,FRVertexData>(), new Factory<FRVertexData>() {
- public FRVertexData create() {
- return new FRVertexData();
- }});
-
- private double attraction_multiplier = 0.75;
-
- private double attraction_constant;
-
- private double repulsion_multiplier = 0.75;
-
- private double repulsion_constant;
-
- private double max_dimension;
-
- /**
- * Creates an instance for the specified graph.
- */
- public FRLayout(Graph<V, E> g) {
- super(g);
- }
-
- /**
- * Creates an instance of size {@code d} for the specified graph.
- */
- public FRLayout(Graph<V, E> g, Dimension d) {
- super(g, new RandomLocationTransformer<V>(d), d);
- initialize();
- max_dimension = Math.max(d.height, d.width);
- }
-
- @Override
- public void setSize(Dimension size) {
- if(initialized == false) {
- setInitializer(new RandomLocationTransformer<V>(size));
- }
- super.setSize(size);
- max_dimension = Math.max(size.height, size.width);
- }
-
- /**
- * Sets the attraction multiplier.
- */
- public void setAttractionMultiplier(double attraction) {
- this.attraction_multiplier = attraction;
- }
-
- /**
- * Sets the repulsion multiplier.
- */
- public void setRepulsionMultiplier(double repulsion) {
- this.repulsion_multiplier = repulsion;
- }
-
- public void reset() {
- doInit();
- }
-
- public void initialize() {
- doInit();
- }
-
- private void doInit() {
- Graph<V,E> graph = getGraph();
- Dimension d = getSize();
- if(graph != null && d != null) {
- currentIteration = 0;
- temperature = d.getWidth() / 10;
-
- forceConstant =
- Math
- .sqrt(d.getHeight()
- * d.getWidth()
- / graph.getVertexCount());
-
- attraction_constant = attraction_multiplier * forceConstant;
- repulsion_constant = repulsion_multiplier * forceConstant;
- }
- }
-
- private double EPSILON = 0.000001D;
-
- /**
- * Moves the iteration forward one notch, calculation attraction and
- * repulsion between vertices and edges and cooling the temperature.
- */
- public synchronized void step() {
- currentIteration++;
-
- /**
- * Calculate repulsion
- */
- while(true) {
-
- try {
- for(V v1 : getGraph().getVertices()) {
- calcRepulsion(v1);
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
-
- /**
- * Calculate attraction
- */
- while(true) {
- try {
- for(E e : getGraph().getEdges()) {
-
- calcAttraction(e);
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
-
-
- while(true) {
- try {
- for(V v : getGraph().getVertices()) {
- if (isLocked(v)) continue;
- calcPositions(v);
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- cool();
- }
-
- protected synchronized void calcPositions(V v) {
- FRVertexData fvd = getFRData(v);
- if(fvd == null) return;
- Point2D xyd = transform(v);
- double deltaLength = Math.max(EPSILON, fvd.norm());
-
- double newXDisp = fvd.getX() / deltaLength
- * Math.min(deltaLength, temperature);
-
- if (Double.isNaN(newXDisp)) {
- throw new IllegalArgumentException(
- "Unexpected mathematical result in FRLayout:calcPositions [xdisp]"); }
-
- double newYDisp = fvd.getY() / deltaLength
- * Math.min(deltaLength, temperature);
- xyd.setLocation(xyd.getX()+newXDisp, xyd.getY()+newYDisp);
-
- double borderWidth = getSize().getWidth() / 50.0;
- double newXPos = xyd.getX();
- if (newXPos < borderWidth) {
- newXPos = borderWidth + Math.random() * borderWidth * 2.0;
- } else if (newXPos > (getSize().getWidth() - borderWidth)) {
- newXPos = getSize().getWidth() - borderWidth - Math.random()
- * borderWidth * 2.0;
- }
-
- double newYPos = xyd.getY();
- if (newYPos < borderWidth) {
- newYPos = borderWidth + Math.random() * borderWidth * 2.0;
- } else if (newYPos > (getSize().getHeight() - borderWidth)) {
- newYPos = getSize().getHeight() - borderWidth
- - Math.random() * borderWidth * 2.0;
- }
-
- xyd.setLocation(newXPos, newYPos);
- }
-
- protected void calcAttraction(E e) {
- Pair<V> endpoints = getGraph().getEndpoints(e);
- V v1 = endpoints.getFirst();
- V v2 = endpoints.getSecond();
- boolean v1_locked = isLocked(v1);
- boolean v2_locked = isLocked(v2);
-
- if(v1_locked && v2_locked) {
- // both locked, do nothing
- return;
- }
- Point2D p1 = transform(v1);
- Point2D p2 = transform(v2);
- if(p1 == null || p2 == null) return;
- double xDelta = p1.getX() - p2.getX();
- double yDelta = p1.getY() - p2.getY();
-
- double deltaLength = Math.max(EPSILON, Math.sqrt((xDelta * xDelta)
- + (yDelta * yDelta)));
-
- double force = (deltaLength * deltaLength) / attraction_constant;
-
- if (Double.isNaN(force)) { throw new IllegalArgumentException(
- "Unexpected mathematical result in FRLayout:calcPositions [force]"); }
-
- double dx = (xDelta / deltaLength) * force;
- double dy = (yDelta / deltaLength) * force;
- if(v1_locked == false) {
- FRVertexData fvd1 = getFRData(v1);
- fvd1.offset(-dx, -dy);
- }
- if(v2_locked == false) {
- FRVertexData fvd2 = getFRData(v2);
- fvd2.offset(dx, dy);
- }
- }
-
- protected void calcRepulsion(V v1) {
- FRVertexData fvd1 = getFRData(v1);
- if(fvd1 == null)
- return;
- fvd1.setLocation(0, 0);
-
- try {
- for(V v2 : getGraph().getVertices()) {
-
-// if (isLocked(v2)) continue;
- if (v1 != v2) {
- Point2D p1 = transform(v1);
- Point2D p2 = transform(v2);
- if(p1 == null || p2 == null) continue;
- double xDelta = p1.getX() - p2.getX();
- double yDelta = p1.getY() - p2.getY();
-
- double deltaLength = Math.max(EPSILON, Math
- .sqrt((xDelta * xDelta) + (yDelta * yDelta)));
-
- double force = (repulsion_constant * repulsion_constant) / deltaLength;
-
- if (Double.isNaN(force)) { throw new RuntimeException(
- "Unexpected mathematical result in FRLayout:calcPositions [repulsion]"); }
-
- fvd1.offset((xDelta / deltaLength) * force,
- (yDelta / deltaLength) * force);
- }
- }
- } catch(ConcurrentModificationException cme) {
- calcRepulsion(v1);
- }
- }
-
- private void cool() {
- temperature *= (1.0 - currentIteration / (double) mMaxIterations);
- }
-
- /**
- * Sets the maximum number of iterations.
- */
- public void setMaxIterations(int maxIterations) {
- mMaxIterations = maxIterations;
- }
-
- protected FRVertexData getFRData(V v) {
- return frVertexData.get(v);
- }
-
- /**
- * This one is an incremental visualization.
- */
- public boolean isIncremental() {
- return true;
- }
-
- /**
- * Returns true once the current iteration has passed the maximum count,
- * <tt>MAX_ITERATIONS</tt>.
- */
- public boolean done() {
- if (currentIteration > mMaxIterations || temperature < 1.0/max_dimension)
- {
- return true;
- }
- return false;
- }
-
- protected static class FRVertexData extends Point2D.Double
- {
- protected void offset(double x, double y)
- {
- this.x += x;
- this.y += y;
- }
-
- protected double norm()
- {
- return Math.sqrt(x*x + y*y);
- }
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.awt.geom.Rectangle2D;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.algorithms.layout.util.RandomLocationTransformer;
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * Implements the Fruchterman-Reingold force-directed algorithm for node layout.
- * This is an experimental attempt at optimizing {@code FRLayout}; if it is successful
- * it will be folded back into {@code FRLayout} (and this class will disappear).
- *
- * <p>Behavior is determined by the following settable parameters:
- * <ul>
- * <li/>attraction multiplier: how much edges try to keep their vertices together
- * <li/>repulsion multiplier: how much vertices try to push each other apart
- * <li/>maximum iterations: how many iterations this algorithm will use before stopping
- * </ul>
- * Each of the first two defaults to 0.75; the maximum number of iterations defaults to 700.
-
- *
- * @see "Fruchterman and Reingold, 'Graph Drawing by Force-directed Placement'"
- * @see http://i11www.ilkd.uni-karlsruhe.de/teaching/SS_04/visualisierung/papers/fruchterman91graph.pdf
- *
- * @author Tom Nelson
- * @author Scott White, Yan-Biao Boey, Danyel Fisher
- */
-public class FRLayout2<V, E> extends AbstractLayout<V, E> implements IterativeContext {
-
- private double forceConstant;
-
- private double temperature;
-
- private int currentIteration;
-
- private int maxIterations = 700;
-
- private Map<V, Point2D> frVertexData =
- LazyMap.decorate(new HashMap<V,Point2D>(), new Factory<Point2D>() {
- public Point2D create() {
- return new Point2D.Double();
- }});
-
- private double attraction_multiplier = 0.75;
-
- private double attraction_constant;
-
- private double repulsion_multiplier = 0.75;
-
- private double repulsion_constant;
-
- private double max_dimension;
-
- private Rectangle2D innerBounds = new Rectangle2D.Double();
-
- private boolean checked = false;
-
- /**
- * Creates an instance for the specified graph.
- */
- public FRLayout2(Graph<V, E> g) {
- super(g);
- }
-
- /**
- * Creates an instance of size {@code d} for the specified graph.
- */
- public FRLayout2(Graph<V, E> g, Dimension d) {
- super(g, new RandomLocationTransformer<V>(d), d);
- max_dimension = Math.max(d.height, d.width);
- initialize();
- }
-
- @Override
- public void setSize(Dimension size) {
- if(initialized == false)
- setInitializer(new RandomLocationTransformer<V>(size));
- super.setSize(size);
- double t = size.width/50.0;
- innerBounds.setFrameFromDiagonal(t,t,size.width-t,size.height-t);
- max_dimension = Math.max(size.height, size.width);
- }
-
- /**
- * Sets the attraction multiplier.
- */
- public void setAttractionMultiplier(double attraction) {
- this.attraction_multiplier = attraction;
- }
-
- /**
- * Sets the repulsion multiplier.
- */
- public void setRepulsionMultiplier(double repulsion) {
- this.repulsion_multiplier = repulsion;
- }
-
- public void reset() {
- doInit();
- }
-
- public void initialize() {
- doInit();
- }
-
- private void doInit() {
- Graph<V,E> graph = getGraph();
- Dimension d = getSize();
- if(graph != null && d != null) {
- currentIteration = 0;
- temperature = d.getWidth() / 10;
-
- forceConstant =
- Math
- .sqrt(d.getHeight()
- * d.getWidth()
- / graph.getVertexCount());
-
- attraction_constant = attraction_multiplier * forceConstant;
- repulsion_constant = repulsion_multiplier * forceConstant;
- }
- }
-
- private double EPSILON = 0.000001D;
-
- /**
- * Moves the iteration forward one notch, calculation attraction and
- * repulsion between vertices and edges and cooling the temperature.
- */
- public synchronized void step() {
- currentIteration++;
-
- /**
- * Calculate repulsion
- */
- while(true) {
-
- try {
- for(V v1 : getGraph().getVertices()) {
- calcRepulsion(v1);
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
-
- /**
- * Calculate attraction
- */
- while(true) {
- try {
- for(E e : getGraph().getEdges()) {
- calcAttraction(e);
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
-
-
- while(true) {
- try {
- for(V v : getGraph().getVertices()) {
- if (isLocked(v)) continue;
- calcPositions(v);
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- cool();
- }
-
- protected synchronized void calcPositions(V v) {
- Point2D fvd = this.frVertexData.get(v);
- if(fvd == null) return;
- Point2D xyd = transform(v);
- double deltaLength = Math.max(EPSILON,
- Math.sqrt(fvd.getX()*fvd.getX()+fvd.getY()*fvd.getY()));
-
- double newXDisp = fvd.getX() / deltaLength
- * Math.min(deltaLength, temperature);
-
- assert Double.isNaN(newXDisp) == false : "Unexpected mathematical result in FRLayout:calcPositions [xdisp]";
-
- double newYDisp = fvd.getY() / deltaLength
- * Math.min(deltaLength, temperature);
- double newX = xyd.getX()+Math.max(-5, Math.min(5,newXDisp));
- double newY = xyd.getY()+Math.max(-5, Math.min(5,newYDisp));
-
- newX = Math.max(innerBounds.getMinX(), Math.min(newX, innerBounds.getMaxX()));
- newY = Math.max(innerBounds.getMinY(), Math.min(newY, innerBounds.getMaxY()));
-
- xyd.setLocation(newX, newY);
-
- }
-
- protected void calcAttraction(E e) {
- Pair<V> endpoints = getGraph().getEndpoints(e);
- V v1 = endpoints.getFirst();
- V v2 = endpoints.getSecond();
- boolean v1_locked = isLocked(v1);
- boolean v2_locked = isLocked(v2);
-
- if(v1_locked && v2_locked) {
- // both locked, do nothing
- return;
- }
- Point2D p1 = transform(v1);
- Point2D p2 = transform(v2);
- if(p1 == null || p2 == null) return;
- double xDelta = p1.getX() - p2.getX();
- double yDelta = p1.getY() - p2.getY();
-
- double deltaLength = Math.max(EPSILON, p1.distance(p2));
-
- double force = deltaLength / attraction_constant;
-
- assert Double.isNaN(force) == false : "Unexpected mathematical result in FRLayout:calcPositions [force]";
-
- double dx = xDelta * force;
- double dy = yDelta * force;
- Point2D fvd1 = frVertexData.get(v1);
- Point2D fvd2 = frVertexData.get(v2);
- if(v2_locked) {
- // double the offset for v1, as v2 will not be moving in
- // the opposite direction
- fvd1.setLocation(fvd1.getX()-2*dx, fvd1.getY()-2*dy);
- } else {
- fvd1.setLocation(fvd1.getX()-dx, fvd1.getY()-dy);
- }
- if(v1_locked) {
- // double the offset for v2, as v1 will not be moving in
- // the opposite direction
- fvd2.setLocation(fvd2.getX()+2*dx, fvd2.getY()+2*dy);
- } else {
- fvd2.setLocation(fvd2.getX()+dx, fvd2.getY()+dy);
- }
- }
-
- protected void calcRepulsion(V v1) {
- Point2D fvd1 = frVertexData.get(v1);
- if(fvd1 == null) return;
- fvd1.setLocation(0, 0);
- boolean v1_locked = isLocked(v1);
-
- try {
- for(V v2 : getGraph().getVertices()) {
-
- boolean v2_locked = isLocked(v2);
- if (v1_locked && v2_locked) continue;
- if (v1 != v2) {
- Point2D p1 = transform(v1);
- Point2D p2 = transform(v2);
- if(p1 == null || p2 == null) continue;
- double xDelta = p1.getX() - p2.getX();
- double yDelta = p1.getY() - p2.getY();
-
- double deltaLength = Math.max(EPSILON, p1.distanceSq(p2));
-
- double force = (repulsion_constant * repulsion_constant);// / deltaLength;
-
- double forceOverDeltaLength = force / deltaLength;
-
- assert Double.isNaN(force) == false : "Unexpected mathematical result in FRLayout:calcPositions [repulsion]";
-
- if(v2_locked) {
- // double the offset for v1, as v2 will not be moving in
- // the opposite direction
- fvd1.setLocation(fvd1.getX()+2 * xDelta * forceOverDeltaLength,
- fvd1.getY()+ 2 * yDelta * forceOverDeltaLength);
- } else {
- fvd1.setLocation(fvd1.getX()+xDelta * forceOverDeltaLength,
- fvd1.getY()+yDelta * forceOverDeltaLength);
- }
- }
- }
- } catch(ConcurrentModificationException cme) {
- calcRepulsion(v1);
- }
- }
-
- private void cool() {
- temperature *= (1.0 - currentIteration / (double) maxIterations);
- }
-
- /**
- * Sets the maximum number of iterations.
- */
- public void setMaxIterations(int maxIterations) {
- this.maxIterations = maxIterations;
- }
-
- /**
- * This one is an incremental visualization.
- */
- public boolean isIncremental() {
- return true;
- }
-
- /**
- * Returns true once the current iteration has passed the maximum count,
- * <tt>MAX_ITERATIONS</tt>.
- */
- public boolean done() {
- if (currentIteration > maxIterations || temperature < 1.0/max_dimension) {
- if (!checked)
- {
-// System.out.println("current iteration: " + currentIteration);
-// System.out.println("temperature: " + temperature);
- checked = true;
- }
- return true;
- }
- return false;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- *
- * Created on Apr 12, 2005
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Shape;
-import java.util.Collection;
-
-/**
- * Interface for coordinate-based selection of graph components.
- * @author Tom Nelson
- * @author Joshua O'Madadhain
- */
-public interface GraphElementAccessor<V, E>
-{
- /**
- * Returns a vertex which is associated with the
- * location <code>(x,y)</code>. This is typically determined
- * with respect to the vertex's location as specified
- * by a <code>Layout</code>.
- */
- V getVertex(Layout<V,E> layout, double x, double y);
-
- /**
- * Returns the vertices contained within {@code rectangle} relative
- * to {@code layout}.
- */
- Collection<V> getVertices(Layout<V,E> layout, Shape rectangle);
-
- /**
- * Returns an edge which is associated with the
- * location <code>(x,y)</code>. This is typically determined
- * with respect to the edge's location as specified
- * by a {@code Layout}.
- */
- E getEdge(Layout<V,E> layout, double x, double y);
-
-}
\ No newline at end of file
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.layout;
-
-import edu.uci.ics.jung.algorithms.layout.util.RandomLocationTransformer;
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.map.LazyMap;
-
-import java.awt.geom.Point2D;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Implements a self-organizing map layout algorithm, based on Meyer's
- * self-organizing graph methods.
- *
- * @author Yan Biao Boey
- */
-public class ISOMLayout<V, E> extends AbstractLayout<V,E> implements IterativeContext {
-
- Map<V, ISOMVertexData> isomVertexData =
- LazyMap.decorate(new HashMap<V, ISOMVertexData>(),
- new Factory<ISOMVertexData>() {
- public ISOMVertexData create() {
- return new ISOMVertexData();
- }});
-
- private int maxEpoch;
- private int epoch;
-
- private int radiusConstantTime;
- private int radius;
- private int minRadius;
-
- private double adaption;
- private double initialAdaption;
- private double minAdaption;
-
- protected GraphElementAccessor<V,E> elementAccessor =
- new RadiusGraphElementAccessor<V,E>();
-
- private double coolingFactor;
-
- private List<V> queue = new ArrayList<V>();
- private String status = null;
-
- /**
- * Returns the current number of epochs and execution status, as a string.
- */
- public String getStatus() {
- return status;
- }
-
- /**
- * Creates an <code>ISOMLayout</code> instance for the specified graph <code>g</code>.
- * @param g
- */
- public ISOMLayout(Graph<V,E> g) {
- super(g);
- }
-
- public void initialize() {
-
- setInitializer(new RandomLocationTransformer<V>(getSize()));
- maxEpoch = 2000;
- epoch = 1;
-
- radiusConstantTime = 100;
- radius = 5;
- minRadius = 1;
-
- initialAdaption = 90.0D / 100.0D;
- adaption = initialAdaption;
- minAdaption = 0;
-
- //factor = 0; //Will be set later on
- coolingFactor = 2;
-
- //temperature = 0.03;
- //initialJumpRadius = 100;
- //jumpRadius = initialJumpRadius;
-
- //delay = 100;
- }
-
-
- /**
- * Advances the current positions of the graph elements.
- */
- public void step() {
- status = "epoch: " + epoch + "; ";
- if (epoch < maxEpoch) {
- adjust();
- updateParameters();
- status += " status: running";
-
- } else {
- status += "adaption: " + adaption + "; ";
- status += "status: done";
-// done = true;
- }
- }
-
- private synchronized void adjust() {
- //Generate random position in graph space
- Point2D tempXYD = new Point2D.Double();
-
- // creates a new XY data location
- tempXYD.setLocation(10 + Math.random() * getSize().getWidth(),
- 10 + Math.random() * getSize().getHeight());
-
- //Get closest vertex to random position
- V winner = elementAccessor.getVertex(this, tempXYD.getX(), tempXYD.getY());
-
- while(true) {
- try {
- for(V v : getGraph().getVertices()) {
- ISOMVertexData ivd = getISOMVertexData(v);
- ivd.distance = 0;
- ivd.visited = false;
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- adjustVertex(winner, tempXYD);
- }
-
- private synchronized void updateParameters() {
- epoch++;
- double factor = Math.exp(-1 * coolingFactor * (1.0 * epoch / maxEpoch));
- adaption = Math.max(minAdaption, factor * initialAdaption);
- //jumpRadius = (int) factor * jumpRadius;
- //temperature = factor * temperature;
- if ((radius > minRadius) && (epoch % radiusConstantTime == 0)) {
- radius--;
- }
- }
-
- private synchronized void adjustVertex(V v, Point2D tempXYD) {
- queue.clear();
- ISOMVertexData ivd = getISOMVertexData(v);
- ivd.distance = 0;
- ivd.visited = true;
- queue.add(v);
- V current;
-
- while (!queue.isEmpty()) {
- current = queue.remove(0);
- ISOMVertexData currData = getISOMVertexData(current);
- Point2D currXYData = transform(current);
-
- double dx = tempXYD.getX() - currXYData.getX();
- double dy = tempXYD.getY() - currXYData.getY();
- double factor = adaption / Math.pow(2, currData.distance);
-
- currXYData.setLocation(currXYData.getX()+(factor*dx), currXYData.getY()+(factor*dy));
-
- if (currData.distance < radius) {
- Collection<V> s = getGraph().getNeighbors(current);
- while(true) {
- try {
- for(V child : s) {
- ISOMVertexData childData = getISOMVertexData(child);
- if (childData != null && !childData.visited) {
- childData.visited = true;
- childData.distance = currData.distance + 1;
- queue.add(child);
- }
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- }
- }
- }
-
- protected ISOMVertexData getISOMVertexData(V v) {
- return isomVertexData.get(v);
- }
-
- /**
- * This one is an incremental visualization.
- * @return <code>true</code> is the layout algorithm is incremental, <code>false</code> otherwise
- */
- public boolean isIncremental() {
- return true;
- }
-
- /**
- * Returns <code>true</code> if the vertex positions are no longer being
- * updated. Currently <code>ISOMLayout</code> stops updating vertex
- * positions after a certain number of iterations have taken place.
- * @return <code>true</code> if the vertex position updates have stopped,
- * <code>false</code> otherwise
- */
- public boolean done() {
- return epoch >= maxEpoch;
- }
-
- protected static class ISOMVertexData {
- int distance;
- boolean visited;
-
- protected ISOMVertexData() {
- distance = 0;
- visited = false;
- }
- }
-
- /**
- * Resets the layout iteration count to 0, which allows the layout algorithm to
- * continue updating vertex positions.
- */
- public void reset() {
- epoch = 0;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout;
-/*
- * This source is under the same license with JUNG.
- * http://jung.sourceforge.net/license.txt for a description.
- */
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.ConcurrentModificationException;
-
-import edu.uci.ics.jung.algorithms.layout.util.RandomLocationTransformer;
-import edu.uci.ics.jung.algorithms.shortestpath.Distance;
-import edu.uci.ics.jung.algorithms.shortestpath.DistanceStatistics;
-import edu.uci.ics.jung.algorithms.shortestpath.UnweightedShortestPath;
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Implements the Kamada-Kawai algorithm for node layout.
- * Does not respect filter calls, and sometimes crashes when the view changes to it.
- *
- * @see "Tomihisa Kamada and Satoru Kawai: An algorithm for drawing general indirect graphs. Information Processing Letters 31(1):7-15, 1989"
- * @see "Tomihisa Kamada: On visualization of abstract objects and relations. Ph.D. dissertation, Dept. of Information Science, Univ. of Tokyo, Dec. 1988."
- *
- * @author Masanori Harada
- */
-public class KKLayout<V,E> extends AbstractLayout<V,E> implements IterativeContext {
-
- private double EPSILON = 0.1d;
-
- private int currentIteration;
- private int maxIterations = 2000;
- private String status = "KKLayout";
-
- private double L; // the ideal length of an edge
- private double K = 1; // arbitrary const number
- private double[][] dm; // distance matrix
-
- private boolean adjustForGravity = true;
- private boolean exchangeVertices = true;
-
- private V[] vertices;
- private Point2D[] xydata;
-
- /**
- * Retrieves graph distances between vertices of the visible graph
- */
- protected Distance<V> distance;
-
- /**
- * The diameter of the visible graph. In other words, the maximum over all pairs
- * of vertices of the length of the shortest path between a and bf the visible graph.
- */
- protected double diameter;
-
- /**
- * A multiplicative factor which partly specifies the "preferred" length of an edge (L).
- */
- private double length_factor = 0.9;
-
- /**
- * A multiplicative factor which specifies the fraction of the graph's diameter to be
- * used as the inter-vertex distance between disconnected vertices.
- */
- private double disconnected_multiplier = 0.5;
-
- /**
- * Creates an instance for the specified graph.
- */
- public KKLayout(Graph<V,E> g)
- {
- this(g, new UnweightedShortestPath<V,E>(g));
- }
-
- /**
- * Creates an instance for the specified graph and distance metric.
- */
- public KKLayout(Graph<V,E> g, Distance<V> distance){
- super(g);
- this.distance = distance;
- }
-
- /**
- * Sets a multiplicative factor which
- * partly specifies the "preferred" length of an edge (L).
- */
- public void setLengthFactor(double length_factor){
- this.length_factor = length_factor;
- }
-
- /**
- * Sets a multiplicative factor that specifies the fraction of the graph's diameter to be
- * used as the inter-vertex distance between disconnected vertices.
- */
- public void setDisconnectedDistanceMultiplier(double disconnected_multiplier){
- this.disconnected_multiplier = disconnected_multiplier;
- }
-
- /**
- * Returns a string with information about the current status of the algorithm.
- */
- public String getStatus() {
- return status + this.getSize();
- }
-
- /**
- * Sets the maximum number of iterations.
- */
- public void setMaxIterations(int maxIterations) {
- this.maxIterations = maxIterations;
- }
-
- /**
- * This one is an incremental visualization.
- */
- public boolean isIncremental() {
- return true;
- }
-
- /**
- * Returns true once the current iteration has passed the maximum count.
- */
- public boolean done() {
- if (currentIteration > maxIterations) {
- return true;
- }
- return false;
- }
-
- @SuppressWarnings("unchecked")
- public void initialize() {
- currentIteration = 0;
-
- if(graph != null && size != null) {
-
- double height = size.getHeight();
- double width = size.getWidth();
-
- int n = graph.getVertexCount();
- dm = new double[n][n];
- vertices = (V[])graph.getVertices().toArray();
- xydata = new Point2D[n];
-
- // assign IDs to all visible vertices
- while(true) {
- try {
- int index = 0;
- for(V v : graph.getVertices()) {
- Point2D xyd = transform(v);
- vertices[index] = v;
- xydata[index] = xyd;
- index++;
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
-
- diameter = DistanceStatistics.<V,E>diameter(graph, distance, true);
-
- double L0 = Math.min(height, width);
- L = (L0 / diameter) * length_factor; // length_factor used to be hardcoded to 0.9
- //L = 0.75 * Math.sqrt(height * width / n);
-
- for (int i = 0; i < n - 1; i++) {
- for (int j = i + 1; j < n; j++) {
- Number d_ij = distance.getDistance(vertices[i], vertices[j]);
- Number d_ji = distance.getDistance(vertices[j], vertices[i]);
- double dist = diameter * disconnected_multiplier;
- if (d_ij != null)
- dist = Math.min(d_ij.doubleValue(), dist);
- if (d_ji != null)
- dist = Math.min(d_ji.doubleValue(), dist);
- dm[i][j] = dm[j][i] = dist;
- }
- }
- }
- }
-
- public void step() {
- try {
- currentIteration++;
- double energy = calcEnergy();
- status = "Kamada-Kawai V=" + getGraph().getVertexCount()
- + "(" + getGraph().getVertexCount() + ")"
- + " IT: " + currentIteration
- + " E=" + energy
- ;
-
- int n = getGraph().getVertexCount();
- if (n == 0)
- return;
-
- double maxDeltaM = 0;
- int pm = -1; // the node having max deltaM
- for (int i = 0; i < n; i++) {
- if (isLocked(vertices[i]))
- continue;
- double deltam = calcDeltaM(i);
-
- if (maxDeltaM < deltam) {
- maxDeltaM = deltam;
- pm = i;
- }
- }
- if (pm == -1)
- return;
-
- for (int i = 0; i < 100; i++) {
- double[] dxy = calcDeltaXY(pm);
- xydata[pm].setLocation(xydata[pm].getX()+dxy[0], xydata[pm].getY()+dxy[1]);
-
- double deltam = calcDeltaM(pm);
- if (deltam < EPSILON)
- break;
- }
-
- if (adjustForGravity)
- adjustForGravity();
-
- if (exchangeVertices && maxDeltaM < EPSILON) {
- energy = calcEnergy();
- for (int i = 0; i < n - 1; i++) {
- if (isLocked(vertices[i]))
- continue;
- for (int j = i + 1; j < n; j++) {
- if (isLocked(vertices[j]))
- continue;
- double xenergy = calcEnergyIfExchanged(i, j);
- if (energy > xenergy) {
- double sx = xydata[i].getX();
- double sy = xydata[i].getY();
- xydata[i].setLocation(xydata[j]);
- xydata[j].setLocation(sx, sy);
- return;
- }
- }
- }
- }
- }
- finally {
-// fireStateChanged();
- }
- }
-
- /**
- * Shift all vertices so that the center of gravity is located at
- * the center of the screen.
- */
- public void adjustForGravity() {
- Dimension d = getSize();
- double height = d.getHeight();
- double width = d.getWidth();
- double gx = 0;
- double gy = 0;
- for (int i = 0; i < xydata.length; i++) {
- gx += xydata[i].getX();
- gy += xydata[i].getY();
- }
- gx /= xydata.length;
- gy /= xydata.length;
- double diffx = width / 2 - gx;
- double diffy = height / 2 - gy;
- for (int i = 0; i < xydata.length; i++) {
- xydata[i].setLocation(xydata[i].getX()+diffx, xydata[i].getY()+diffy);
- }
- }
-
- /* (non-Javadoc)
- * @see edu.uci.ics.jung.visualization.layout.AbstractLayout#setSize(java.awt.Dimension)
- */
- @Override
- public void setSize(Dimension size) {
- if(initialized == false)
- setInitializer(new RandomLocationTransformer<V>(size));
- super.setSize(size);
- }
-
- /**
- * Enable or disable gravity point adjusting.
- */
- public void setAdjustForGravity(boolean on) {
- adjustForGravity = on;
- }
-
- /**
- * Returns true if gravity point adjusting is enabled.
- */
- public boolean getAdjustForGravity() {
- return adjustForGravity;
- }
-
- /**
- * Enable or disable the local minimum escape technique by
- * exchanging vertices.
- */
- public void setExchangeVertices(boolean on) {
- exchangeVertices = on;
- }
-
- /**
- * Returns true if the local minimum escape technique by
- * exchanging vertices is enabled.
- */
- public boolean getExchangeVertices() {
- return exchangeVertices;
- }
-
- /**
- * Determines a step to new position of the vertex m.
- */
- private double[] calcDeltaXY(int m) {
- double dE_dxm = 0;
- double dE_dym = 0;
- double d2E_d2xm = 0;
- double d2E_dxmdym = 0;
- double d2E_dymdxm = 0;
- double d2E_d2ym = 0;
-
- for (int i = 0; i < vertices.length; i++) {
- if (i != m) {
-
- double dist = dm[m][i];
- double l_mi = L * dist;
- double k_mi = K / (dist * dist);
- double dx = xydata[m].getX() - xydata[i].getX();
- double dy = xydata[m].getY() - xydata[i].getY();
- double d = Math.sqrt(dx * dx + dy * dy);
- double ddd = d * d * d;
-
- dE_dxm += k_mi * (1 - l_mi / d) * dx;
- dE_dym += k_mi * (1 - l_mi / d) * dy;
- d2E_d2xm += k_mi * (1 - l_mi * dy * dy / ddd);
- d2E_dxmdym += k_mi * l_mi * dx * dy / ddd;
- d2E_d2ym += k_mi * (1 - l_mi * dx * dx / ddd);
- }
- }
- // d2E_dymdxm equals to d2E_dxmdym.
- d2E_dymdxm = d2E_dxmdym;
-
- double denomi = d2E_d2xm * d2E_d2ym - d2E_dxmdym * d2E_dymdxm;
- double deltaX = (d2E_dxmdym * dE_dym - d2E_d2ym * dE_dxm) / denomi;
- double deltaY = (d2E_dymdxm * dE_dxm - d2E_d2xm * dE_dym) / denomi;
- return new double[]{deltaX, deltaY};
- }
-
- /**
- * Calculates the gradient of energy function at the vertex m.
- */
- private double calcDeltaM(int m) {
- double dEdxm = 0;
- double dEdym = 0;
- for (int i = 0; i < vertices.length; i++) {
- if (i != m) {
- double dist = dm[m][i];
- double l_mi = L * dist;
- double k_mi = K / (dist * dist);
-
- double dx = xydata[m].getX() - xydata[i].getX();
- double dy = xydata[m].getY() - xydata[i].getY();
- double d = Math.sqrt(dx * dx + dy * dy);
-
- double common = k_mi * (1 - l_mi / d);
- dEdxm += common * dx;
- dEdym += common * dy;
- }
- }
- return Math.sqrt(dEdxm * dEdxm + dEdym * dEdym);
- }
-
- /**
- * Calculates the energy function E.
- */
- private double calcEnergy() {
- double energy = 0;
- for (int i = 0; i < vertices.length - 1; i++) {
- for (int j = i + 1; j < vertices.length; j++) {
- double dist = dm[i][j];
- double l_ij = L * dist;
- double k_ij = K / (dist * dist);
- double dx = xydata[i].getX() - xydata[j].getX();
- double dy = xydata[i].getY() - xydata[j].getY();
- double d = Math.sqrt(dx * dx + dy * dy);
-
-
- energy += k_ij / 2 * (dx * dx + dy * dy + l_ij * l_ij -
- 2 * l_ij * d);
- }
- }
- return energy;
- }
-
- /**
- * Calculates the energy function E as if positions of the
- * specified vertices are exchanged.
- */
- private double calcEnergyIfExchanged(int p, int q) {
- if (p >= q)
- throw new RuntimeException("p should be < q");
- double energy = 0; // < 0
- for (int i = 0; i < vertices.length - 1; i++) {
- for (int j = i + 1; j < vertices.length; j++) {
- int ii = i;
- int jj = j;
- if (i == p) ii = q;
- if (j == q) jj = p;
-
- double dist = dm[i][j];
- double l_ij = L * dist;
- double k_ij = K / (dist * dist);
- double dx = xydata[ii].getX() - xydata[jj].getX();
- double dy = xydata[ii].getY() - xydata[jj].getY();
- double d = Math.sqrt(dx * dx + dy * dy);
-
- energy += k_ij / 2 * (dx * dx + dy * dy + l_ij * l_ij -
- 2 * l_ij * d);
- }
- }
- return energy;
- }
-
- public void reset() {
- currentIteration = 0;
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * A generalized interface is a mechanism for returning (x,y) coordinates
- * from vertices. In general, most of these methods are used to both control and
- * get information from the layout algorithm.
- * <p>
- * @author danyelf
- * @author tom nelson
- */
-public interface Layout<V, E> extends Transformer<V,Point2D> {
-
- /**
- * Initializes fields in the node that may not have
- * been set during the constructor. Must be called before
- * the iterations begin.
- */
- void initialize();
-
- /**
- * provides initial locations for all vertices.
- * @param initializer
- */
- void setInitializer(Transformer<V,Point2D> initializer);
-
- /**
- * setter for graph
- * @param graph
- */
- void setGraph(Graph<V,E> graph);
-
- /**
- * Returns the full graph (the one that was passed in at
- * construction time) that this Layout refers to.
- *
- */
- Graph<V,E> getGraph();
-
- /**
- *
- *
- */
- void reset();
-
- /**
- * @param d
- */
- void setSize(Dimension d);
-
- /**
- * Returns the current size of the visualization's space.
- */
- Dimension getSize();
-
-
- /**
- * Sets a flag which fixes this vertex in place.
- *
- * @param v vertex
- */
- void lock(V v, boolean state);
-
- /**
- * Returns <code>true</code> if the position of vertex <code>v</code>
- * is locked.
- */
- boolean isLocked(V v);
-
- /**
- * set the location of a vertex
- * @param v
- * @param location
- */
- void setLocation(V v, Point2D location);
-
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Aug 23, 2005
- */
-
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * a pure decorator for the Layout interface. Intended to be overridden
- * to provide specific behavior decoration
- *
- * @author Tom Nelson
- *
- */
-public abstract class LayoutDecorator<V, E> implements Layout<V, E>, IterativeContext {
-
- protected Layout<V, E> delegate;
-
- /**
- * Creates an instance backed by the specified delegate layout.
- */
- public LayoutDecorator(Layout<V, E> delegate) {
- this.delegate = delegate;
- }
-
- /**
- * Returns the backing (delegate) layout.
- */
- public Layout<V,E> getDelegate() {
- return delegate;
- }
-
- /**
- * Sets the backing (delegate) layout.
- */
- public void setDelegate(Layout<V,E> delegate) {
- this.delegate = delegate;
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.util.IterativeContext#done()
- */
- public void step() {
- if(delegate instanceof IterativeContext) {
- ((IterativeContext)delegate).step();
- }
- }
-
- /**
- *
- * @see edu.uci.ics.jung.algorithms.layout.Layout#initialize()
- */
- public void initialize() {
- delegate.initialize();
- }
-
- /**
- * @param initializer
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setInitializer(org.apache.commons.collections15.Transformer)
- */
- public void setInitializer(Transformer<V, Point2D> initializer) {
- delegate.setInitializer(initializer);
- }
-
- /**
- * @param v
- * @param location
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setLocation(java.lang.Object, java.awt.geom.Point2D)
- */
- public void setLocation(V v, Point2D location) {
- delegate.setLocation(v, location);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#getSize()
- */
- public Dimension getSize() {
- return delegate.getSize();
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#getGraph()
- */
- public Graph<V, E> getGraph() {
- return delegate.getGraph();
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#transform(Object)
- */
- public Point2D transform(V v) {
- return delegate.transform(v);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.util.IterativeContext#done()
- */
- public boolean done() {
- if(delegate instanceof IterativeContext) {
- return ((IterativeContext)delegate).done();
- }
- return true;
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#lock(Object, boolean)
- */
- public void lock(V v, boolean state) {
- delegate.lock(v, state);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#isLocked(Object)
- */
- public boolean isLocked(V v) {
- return delegate.isLocked(v);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#setSize(Dimension)
- */
- public void setSize(Dimension d) {
- delegate.setSize(d);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.layout.Layout#reset()
- */
- public void reset() {
- delegate.reset();
- }
-
- public void setGraph(Graph<V, E> graph) {
- delegate.setGraph(graph);
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.geom.Point2D;
-
-/**
- * Represents a point in polar coordinates: distance and angle from the origin.
- * Includes conversions between polar and Cartesian
- * coordinates (Point2D).
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- */
-public class PolarPoint
-{
- double theta;
- double radius;
-
- /**
- * Creates a new instance with radius and angle each 0.
- */
- public PolarPoint() {
- this(0,0);
- }
-
- /**
- * Creates a new instance with radius {@code radius} and angle {@code theta}.
- */
- public PolarPoint(double theta, double radius) {
- this.theta = theta;
- this.radius = radius;
- }
-
- /**
- * Returns the angle for this point.
- */
- public double getTheta() { return theta; }
-
- /**
- * Returns the radius for this point.
- */
- public double getRadius() { return radius; }
-
- /**
- * Sets the angle for this point to {@code theta}.
- */
- public void setTheta(double theta) { this.theta = theta; }
-
- /**
- * Sets the radius for this point to {@code theta}.
- */
- public void setRadius(double radius) { this.radius = radius; }
-
- /**
- * Returns the result of converting <code>polar</code> to Cartesian coordinates.
- */
- public static Point2D polarToCartesian(PolarPoint polar) {
- return polarToCartesian(polar.getTheta(), polar.getRadius());
- }
-
- /**
- * Returns the result of converting <code>(theta, radius)</code> to Cartesian coordinates.
- */
- public static Point2D polarToCartesian(double theta, double radius) {
- return new Point2D.Double(radius*Math.cos(theta), radius*Math.sin(theta));
- }
-
- /**
- * Returns the result of converting <code>point</code> to polar coordinates.
- */
- public static PolarPoint cartesianToPolar(Point2D point) {
- return cartesianToPolar(point.getX(), point.getY());
- }
-
- /**
- * Returns the result of converting <code>(x, y)</code> to polar coordinates.
- */
- public static PolarPoint cartesianToPolar(double x, double y) {
- double theta = Math.atan2(y,x);
- double radius = Math.sqrt(x*x+y*y);
- return new PolarPoint(theta, radius);
- }
-
- @Override
- public String toString() {
- return "PolarPoint[" + radius + "," + theta +"]";
- }
-
- /**
- * Sets the angle and radius of this point to those of {@code p}.
- */
- public void setLocation(PolarPoint p) {
- this.theta = p.getTheta();
- this.radius = p.getRadius();
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Jul 9, 2005
- */
-
-package edu.uci.ics.jung.algorithms.layout;
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.HashMap;
-import java.util.Map;
-
-import edu.uci.ics.jung.graph.Forest;
-
-/**
- * A radial layout for Tree or Forest graphs.
- *
- * @author Tom Nelson
- *
- */
-public class RadialTreeLayout<V,E> extends TreeLayout<V,E> {
-
- protected Map<V,PolarPoint> polarLocations;
-
- /**
- * Creates an instance for the specified graph with default X and Y distances.
- */
- public RadialTreeLayout(Forest<V,E> g) {
- this(g, DEFAULT_DISTX, DEFAULT_DISTY);
- }
-
- /**
- * Creates an instance for the specified graph and X distance with
- * default Y distance.
- */
- public RadialTreeLayout(Forest<V,E> g, int distx) {
- this(g, distx, DEFAULT_DISTY);
- }
-
- /**
- * Creates an instance for the specified graph, X distance, and Y distance.
- */
- public RadialTreeLayout(Forest<V,E> g, int distx, int disty) {
- super(g, distx, disty);
- }
-
- @Override
- protected void buildTree() {
- super.buildTree();
- this.polarLocations = new HashMap<V, PolarPoint>();
- setRadialLocations();
- }
-
- @Override
- public void setSize(Dimension size) {
- this.size = size;
- buildTree();
- }
-
- @Override
- protected void setCurrentPositionFor(V vertex) {
- locations.get(vertex).setLocation(m_currentPoint);
- }
-
- @Override
- public void setLocation(V v, Point2D location)
- {
- Point2D c = getCenter();
- Point2D pv = new Point2D.Double(location.getX() - c.getX(),
- location.getY() - c.getY());
- PolarPoint newLocation = PolarPoint.cartesianToPolar(pv);
- PolarPoint currentLocation = polarLocations.get(v);
- if (currentLocation == null)
- polarLocations.put(v, newLocation);
- else
- currentLocation.setLocation(newLocation);
- }
-
- /**
- * Returns the map from vertices to their locations in polar coordinates.
- */
- public Map<V,PolarPoint> getPolarLocations() {
- return polarLocations;
- }
-
- @Override
- public Point2D transform(V v) {
- PolarPoint pp = polarLocations.get(v);
- double centerX = getSize().getWidth()/2;
- double centerY = getSize().getHeight()/2;
- Point2D cartesian = PolarPoint.polarToCartesian(pp);
- cartesian.setLocation(cartesian.getX()+centerX,cartesian.getY()+centerY);
- return cartesian;
- }
-
- private Point2D getMaxXY() {
- double maxx = 0;
- double maxy = 0;
- for(Point2D p : locations.values()) {
- maxx = Math.max(maxx, p.getX());
- maxy = Math.max(maxy, p.getY());
- }
- return new Point2D.Double(maxx,maxy);
- }
-
- private void setRadialLocations() {
- Point2D max = getMaxXY();
- double maxx = max.getX();
- double maxy = max.getY();
- maxx = Math.max(maxx, size.width);
- double theta = 2*Math.PI/maxx;
-
- double deltaRadius = size.width/2/maxy;
- for(Map.Entry<V, Point2D> entry : locations.entrySet()) {
- V v = entry.getKey();
- Point2D p = entry.getValue();
- PolarPoint polarPoint = new PolarPoint(p.getX()*theta, (p.getY() - this.distY)*deltaRadius);
- polarLocations.put(v, polarPoint);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- *
- * Created on Apr 12, 2005
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Shape;
-import java.awt.geom.Point2D;
-import java.util.Collection;
-import java.util.ConcurrentModificationException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-
-import edu.uci.ics.jung.graph.Graph;
-
-
-/**
- * Simple implementation of PickSupport that returns the vertex or edge
- * that is closest to the specified location. This implementation
- * provides the same picking options that were available in
- * previous versions of AbstractLayout.
- *
- * <p>No element will be returned that is farther away than the specified
- * maximum distance.
- *
- * @author Tom Nelson
- * @author Joshua O'Madadhain
- */
-public class RadiusGraphElementAccessor<V, E> implements GraphElementAccessor<V, E> {
-
- protected double maxDistance;
-
- /**
- * Creates an instance with an effectively infinite default maximum distance.
- */
- public RadiusGraphElementAccessor() {
- this(Math.sqrt(Double.MAX_VALUE - 1000));
- }
-
- /**
- * Creates an instance with the specified default maximum distance.
- */
- public RadiusGraphElementAccessor(double maxDistance) {
- this.maxDistance = maxDistance;
- }
-
- /**
- * Gets the vertex nearest to the location of the (x,y) location selected,
- * within a distance of <tt>maxDistance</tt>. Iterates through all
- * visible vertices and checks their distance from the click. Override this
- * method to provde a more efficient implementation.
- */
- public V getVertex(Layout<V,E> layout, double x, double y) {
- return getVertex(layout, x, y, this.maxDistance);
- }
-
- /**
- * Gets the vertex nearest to the location of the (x,y) location selected,
- * within a distance of <tt>maxDistance</tt>. Iterates through all
- * visible vertices and checks their distance from the click. Override this
- * method to provde a more efficient implementation.
- * @param x
- * @param y
- * @param maxDistance temporarily overrides member maxDistance
- */
- public V getVertex(Layout<V,E> layout, double x, double y, double maxDistance) {
- double minDistance = maxDistance * maxDistance;
- V closest = null;
- while(true) {
- try {
- for(V v : layout.getGraph().getVertices()) {
-
- Point2D p = layout.transform(v);
- double dx = p.getX() - x;
- double dy = p.getY() - y;
- double dist = dx * dx + dy * dy;
- if (dist < minDistance) {
- minDistance = dist;
- closest = v;
- }
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- return closest;
- }
-
- public Collection<V> getVertices(Layout<V,E> layout, Shape rectangle) {
- Set<V> pickedVertices = new HashSet<V>();
- while(true) {
- try {
- for(V v : layout.getGraph().getVertices()) {
-
- Point2D p = layout.transform(v);
- if(rectangle.contains(p)) {
- pickedVertices.add(v);
- }
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- return pickedVertices;
- }
-
- /**
- * Gets the edge nearest to the location of the (x,y) location selected.
- * Calls the longer form of the call.
- */
- public E getEdge(Layout<V,E> layout, double x, double y) {
- return getEdge(layout, x, y, this.maxDistance);
- }
-
- /**
- * Gets the edge nearest to the location of the (x,y) location selected,
- * within a distance of <tt>maxDistance</tt>, Iterates through all
- * visible edges and checks their distance from the click. Override this
- * method to provide a more efficient implementation.
- *
- * @param x
- * @param y
- * @param maxDistance temporarily overrides member maxDistance
- * @return Edge closest to the click.
- */
- public E getEdge(Layout<V,E> layout, double x, double y, double maxDistance) {
- double minDistance = maxDistance * maxDistance;
- E closest = null;
- while(true) {
- try {
- for(E e : layout.getGraph().getEdges()) {
-
- // Could replace all this set stuff with getFrom_internal() etc.
- Graph<V, E> graph = layout.getGraph();
- Collection<V> vertices = graph.getIncidentVertices(e);
- Iterator<V> vertexIterator = vertices.iterator();
- V v1 = vertexIterator.next();
- V v2 = vertexIterator.next();
- // Get coords
- Point2D p1 = layout.transform(v1);
- Point2D p2 = layout.transform(v2);
- double x1 = p1.getX();
- double y1 = p1.getY();
- double x2 = p2.getX();
- double y2 = p2.getY();
- // Calculate location on line closest to (x,y)
- // First, check that v1 and v2 are not coincident.
- if (x1 == x2 && y1 == y2)
- continue;
- double b =
- ((y - y1) * (y2 - y1) + (x - x1) * (x2 - x1))
- / ((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
- //
- double distance2; // square of the distance
- if (b <= 0)
- distance2 = (x - x1) * (x - x1) + (y - y1) * (y - y1);
- else if (b >= 1)
- distance2 = (x - x2) * (x - x2) + (y - y2) * (y - y2);
- else {
- double x3 = x1 + b * (x2 - x1);
- double y3 = y1 + b * (y2 - y1);
- distance2 = (x - x3) * (x - x3) + (y - y3) * (y - y3);
- }
-
- if (distance2 < minDistance) {
- minDistance = distance2;
- closest = e;
- }
- }
- break;
- } catch(ConcurrentModificationException cme) {}
- }
- return closest;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import edu.uci.ics.jung.algorithms.layout.util.RandomLocationTransformer;
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-import org.apache.commons.collections15.map.LazyMap;
-
-import java.awt.Dimension;
-import java.awt.event.ComponentAdapter;
-import java.awt.event.ComponentEvent;
-import java.awt.geom.Point2D;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * The SpringLayout package represents a visualization of a set of nodes. The
- * SpringLayout, which is initialized with a Graph, assigns X/Y locations to
- * each node. When called <code>relax()</code>, the SpringLayout moves the
- * visualization forward one step.
- *
- * @author Danyel Fisher
- * @author Joshua O'Madadhain
- */
-public class SpringLayout<V, E> extends AbstractLayout<V,E> implements IterativeContext {
-
- protected double stretch = 0.70;
- protected Transformer<E, Integer> lengthFunction;
- protected int repulsion_range_sq = 100 * 100;
- protected double force_multiplier = 1.0 / 3.0;
-
- protected Map<V, SpringVertexData> springVertexData =
- LazyMap.decorate(new HashMap<V, SpringVertexData>(),
- new Factory<SpringVertexData>() {
- public SpringVertexData create() {
- return new SpringVertexData();
- }});
-
- /**
- * Constructor for a SpringLayout for a raw graph with associated
- * dimension--the input knows how big the graph is. Defaults to the unit
- * length function.
- */
- @SuppressWarnings("unchecked")
- public SpringLayout(Graph<V,E> g) {
- this(g, new ConstantTransformer(30));
- }
-
- /**
- * Constructor for a SpringLayout for a raw graph with associated component.
- *
- * @param g the {@code Graph} to lay out
- * @param length_function provides a length for each edge
- */
- public SpringLayout(Graph<V,E> g, Transformer<E, Integer> length_function)
- {
- super(g);
- this.lengthFunction = length_function;
- }
-
- /**
- * Returns the current value for the stretch parameter.
- * @see #setStretch(double)
- */
- public double getStretch() {
- return stretch;
- }
-
- /**
- * Sets the dimensions of the available space for layout to {@code size}.
- */
- @Override
- public void setSize(Dimension size) {
- if(initialized == false)
- setInitializer(new RandomLocationTransformer<V>(size));
- super.setSize(size);
- }
-
- /**
- * <p>Sets the stretch parameter for this instance. This value
- * specifies how much the degrees of an edge's incident vertices
- * should influence how easily the endpoints of that edge
- * can move (that is, that edge's tendency to change its length).</p>
- *
- * <p>The default value is 0.70. Positive values less than 1 cause
- * high-degree vertices to move less than low-degree vertices, and
- * values > 1 cause high-degree vertices to move more than
- * low-degree vertices. Negative values will have unpredictable
- * and inconsistent results.</p>
- * @param stretch
- */
- public void setStretch(double stretch) {
- this.stretch = stretch;
- }
-
- /**
- * Returns the current value for the node repulsion range.
- * @see #setRepulsionRange(int)
- */
- public int getRepulsionRange() {
- return (int)(Math.sqrt(repulsion_range_sq));
- }
-
- /**
- * Sets the node repulsion range (in drawing area units) for this instance.
- * Outside this range, nodes do not repel each other. The default value
- * is 100. Negative values are treated as their positive equivalents.
- * @param range
- */
- public void setRepulsionRange(int range) {
- this.repulsion_range_sq = range * range;
- }
-
- /**
- * Returns the current value for the edge length force multiplier.
- * @see #setForceMultiplier(double)
- */
- public double getForceMultiplier() {
- return force_multiplier;
- }
-
- /**
- * Sets the force multiplier for this instance. This value is used to
- * specify how strongly an edge "wants" to be its default length
- * (higher values indicate a greater attraction for the default length),
- * which affects how much its endpoints move at each timestep.
- * The default value is 1/3. A value of 0 turns off any attempt by the
- * layout to cause edges to conform to the default length. Negative
- * values cause long edges to get longer and short edges to get shorter; use
- * at your own risk.
- */
- public void setForceMultiplier(double force) {
- this.force_multiplier = force;
- }
-
- public void initialize() {
- }
-
- /**
- * Relaxation step. Moves all nodes a smidge.
- */
- public void step() {
- try {
- for(V v : getGraph().getVertices()) {
- SpringVertexData svd = springVertexData.get(v);
- if (svd == null) {
- continue;
- }
- svd.dx /= 4;
- svd.dy /= 4;
- svd.edgedx = svd.edgedy = 0;
- svd.repulsiondx = svd.repulsiondy = 0;
- }
- } catch(ConcurrentModificationException cme) {
- step();
- }
-
- relaxEdges();
- calculateRepulsion();
- moveNodes();
- }
-
- protected void relaxEdges() {
- try {
- for(E e : getGraph().getEdges()) {
- Pair<V> endpoints = getGraph().getEndpoints(e);
- V v1 = endpoints.getFirst();
- V v2 = endpoints.getSecond();
-
- Point2D p1 = transform(v1);
- Point2D p2 = transform(v2);
- if(p1 == null || p2 == null) continue;
- double vx = p1.getX() - p2.getX();
- double vy = p1.getY() - p2.getY();
- double len = Math.sqrt(vx * vx + vy * vy);
-
- double desiredLen = lengthFunction.transform(e);
-
- // round from zero, if needed [zero would be Bad.].
- len = (len == 0) ? .0001 : len;
-
- double f = force_multiplier * (desiredLen - len) / len;
-
- f = f * Math.pow(stretch, (getGraph().degree(v1) + getGraph().degree(v2) - 2));
-
- // the actual movement distance 'dx' is the force multiplied by the
- // distance to go.
- double dx = f * vx;
- double dy = f * vy;
- SpringVertexData v1D, v2D;
- v1D = springVertexData.get(v1);
- v2D = springVertexData.get(v2);
-
- v1D.edgedx += dx;
- v1D.edgedy += dy;
- v2D.edgedx += -dx;
- v2D.edgedy += -dy;
- }
- } catch(ConcurrentModificationException cme) {
- relaxEdges();
- }
- }
-
- protected void calculateRepulsion() {
- try {
- for (V v : getGraph().getVertices()) {
- if (isLocked(v)) continue;
-
- SpringVertexData svd = springVertexData.get(v);
- if(svd == null) continue;
- double dx = 0, dy = 0;
-
- for (V v2 : getGraph().getVertices()) {
- if (v == v2) continue;
- Point2D p = transform(v);
- Point2D p2 = transform(v2);
- if(p == null || p2 == null) continue;
- double vx = p.getX() - p2.getX();
- double vy = p.getY() - p2.getY();
- double distanceSq = p.distanceSq(p2);
- if (distanceSq == 0) {
- dx += Math.random();
- dy += Math.random();
- } else if (distanceSq < repulsion_range_sq) {
- double factor = 1;
- dx += factor * vx / distanceSq;
- dy += factor * vy / distanceSq;
- }
- }
- double dlen = dx * dx + dy * dy;
- if (dlen > 0) {
- dlen = Math.sqrt(dlen) / 2;
- svd.repulsiondx += dx / dlen;
- svd.repulsiondy += dy / dlen;
- }
- }
- } catch(ConcurrentModificationException cme) {
- calculateRepulsion();
- }
- }
-
- protected void moveNodes()
- {
- synchronized (getSize()) {
- try {
- for (V v : getGraph().getVertices()) {
- if (isLocked(v)) continue;
- SpringVertexData vd = springVertexData.get(v);
- if(vd == null) continue;
- Point2D xyd = transform(v);
-
- vd.dx += vd.repulsiondx + vd.edgedx;
- vd.dy += vd.repulsiondy + vd.edgedy;
-
- // keeps nodes from moving any faster than 5 per time unit
- xyd.setLocation(xyd.getX()+Math.max(-5, Math.min(5, vd.dx)),
- xyd.getY()+Math.max(-5, Math.min(5, vd.dy)));
-
- Dimension d = getSize();
- int width = d.width;
- int height = d.height;
-
- if (xyd.getX() < 0) {
- xyd.setLocation(0, xyd.getY());
- } else if (xyd.getX() > width) {
- xyd.setLocation(width, xyd.getY());
- }
- if (xyd.getY() < 0) {
- xyd.setLocation(xyd.getX(), 0);
- } else if (xyd.getY() > height) {
- xyd.setLocation(xyd.getX(), height);
- }
-
- }
- } catch(ConcurrentModificationException cme) {
- moveNodes();
- }
- }
- }
-
- protected static class SpringVertexData {
- protected double edgedx;
- protected double edgedy;
- protected double repulsiondx;
- protected double repulsiondy;
-
- /** movement speed, x */
- protected double dx;
-
- /** movement speed, y */
- protected double dy;
- }
-
-
- /**
- * Used for changing the size of the layout in response to a component's size.
- */
- public class SpringDimensionChecker extends ComponentAdapter {
- @Override
- public void componentResized(ComponentEvent e) {
- setSize(e.getComponent().getSize());
- }
- }
-
- /**
- * This one is an incremental visualization
- */
- public boolean isIncremental() {
- return true;
- }
-
- /**
- * For now, we pretend it never finishes.
- */
- public boolean done() {
- return false;
- }
-
- /**
- * No effect.
- */
- public void reset() {
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.ConcurrentModificationException;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * The SpringLayout package represents a visualization of a set of nodes. The
- * SpringLayout, which is initialized with a Graph, assigns X/Y locations to
- * each node. When called <code>relax()</code>, the SpringLayout moves the
- * visualization forward one step.
- *
- *
- *
- * @author Danyel Fisher
- * @author Joshua O'Madadhain
- */
-public class SpringLayout2<V, E> extends SpringLayout<V,E>
-{
- protected int currentIteration;
- protected int averageCounter;
- protected int loopCountMax = 4;
- protected boolean done;
-
- protected Point2D averageDelta = new Point2D.Double();
-
- /**
- * Constructor for a SpringLayout for a raw graph with associated
- * dimension--the input knows how big the graph is. Defaults to the unit
- * length function.
- */
- @SuppressWarnings("unchecked")
- public SpringLayout2(Graph<V,E> g) {
- super(g);
- }
-
- /**
- * Constructor for a SpringLayout for a raw graph with associated component.
- *
- * @param g the {@code Graph} to lay out
- * @param length_function provides a length for each edge
- */
- public SpringLayout2(Graph<V,E> g, Transformer<E, Integer> length_function)
- {
- super(g, length_function);
- }
-
- /**
- * Relaxation step. Moves all nodes a smidge.
- */
- @Override
- public void step() {
- super.step();
- currentIteration++;
- testAverageDeltas();
- }
-
- private void testAverageDeltas() {
- double dx = this.averageDelta.getX();
- double dy = this.averageDelta.getY();
- if(Math.abs(dx) < .001 && Math.abs(dy) < .001) {
- done = true;
- System.err.println("done, dx="+dx+", dy="+dy);
- }
- if(currentIteration > loopCountMax) {
- this.averageDelta.setLocation(0,0);
- averageCounter = 0;
- currentIteration = 0;
- }
- }
-
- @Override
- protected void moveNodes() {
- synchronized (getSize()) {
- try {
- for (V v : getGraph().getVertices()) {
- if (isLocked(v)) continue;
- SpringVertexData vd = springVertexData.get(v);
- if(vd == null) continue;
- Point2D xyd = transform(v);
-
- vd.dx += vd.repulsiondx + vd.edgedx;
- vd.dy += vd.repulsiondy + vd.edgedy;
-
-// int currentCount = currentIteration % this.loopCountMax;
-// System.err.println(averageCounter+" --- vd.dx="+vd.dx+", vd.dy="+vd.dy);
-// System.err.println("averageDelta was "+averageDelta);
-
- averageDelta.setLocation(
- ((averageDelta.getX() * averageCounter) + vd.dx) / (averageCounter+1),
- ((averageDelta.getY() * averageCounter) + vd.dy) / (averageCounter+1)
- );
-// System.err.println("averageDelta now "+averageDelta);
-// System.err.println();
- averageCounter++;
-
- // keeps nodes from moving any faster than 5 per time unit
- xyd.setLocation(xyd.getX()+Math.max(-5, Math.min(5, vd.dx)),
- xyd.getY()+Math.max(-5, Math.min(5, vd.dy)));
-
- Dimension d = getSize();
- int width = d.width;
- int height = d.height;
-
- if (xyd.getX() < 0) {
- xyd.setLocation(0, xyd.getY());// setX(0);
- } else if (xyd.getX() > width) {
- xyd.setLocation(width, xyd.getY()); //setX(width);
- }
- if (xyd.getY() < 0) {
- xyd.setLocation(xyd.getX(),0);//setY(0);
- } else if (xyd.getY() > height) {
- xyd.setLocation(xyd.getX(), height); //setY(height);
- }
-
- }
- } catch(ConcurrentModificationException cme) {
- moveNodes();
- }
- }
- }
-
- @Override
- public boolean done() {
- return done;
- }
-
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Created on Jul 21, 2005
- *
- * Copyright (c) 2005, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * StaticLayout places the vertices in the locations specified by its Transformer<V,Point2D>
- * initializer. Vertex locations can be placed in a Map<V,Point2D> and then supplied to
- * this layout as follows:
- * <code>
- Transformer<V,Point2D> vertexLocations =
- TransformerUtils.mapTransformer(map);
- * </code>
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- * @param <V>
- * @param <E>
- */
-public class StaticLayout<V, E> extends AbstractLayout<V,E> {
-
- /**
- * Creates an instance for the specified graph, locations, and size.
- */
- public StaticLayout(Graph<V,E> graph, Transformer<V,Point2D> initializer, Dimension size) {
- super(graph, initializer, size);
- }
-
- /**
- * Creates an instance for the specified graph and locations, with default size.
- */
- public StaticLayout(Graph<V,E> graph, Transformer<V,Point2D> initializer) {
- super(graph, initializer);
- }
-
- /**
- * Creates an instance for the specified graph and default size; vertex locations
- * are randomly assigned.
- */
- public StaticLayout(Graph<V,E> graph) {
- super(graph);
- }
-
- /**
- * Creates an instance for the specified graph and size.
- */
- public StaticLayout(Graph<V,E> graph, Dimension size) {
- super(graph, size);
- }
-
- public void initialize() {}
-
- public void reset() {}
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Jul 9, 2005
- */
-
-package edu.uci.ics.jung.algorithms.layout;
-import java.awt.Dimension;
-import java.awt.Point;
-import java.awt.geom.Point2D;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.graph.Forest;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.TreeUtils;
-
-/**
- * @author Karlheinz Toni
- * @author Tom Nelson - converted to jung2
- *
- */
-
-public class TreeLayout<V,E> implements Layout<V,E> {
-
- protected Dimension size = new Dimension(600,600);
- protected Forest<V,E> graph;
- protected Map<V,Integer> basePositions = new HashMap<V,Integer>();
-
- protected Map<V, Point2D> locations =
- LazyMap.decorate(new HashMap<V, Point2D>(),
- new Transformer<V,Point2D>() {
- public Point2D transform(V arg0) {
- return new Point2D.Double();
- }});
-
- protected transient Set<V> alreadyDone = new HashSet<V>();
-
- /**
- * The default horizontal vertex spacing. Initialized to 50.
- */
- public static int DEFAULT_DISTX = 50;
-
- /**
- * The default vertical vertex spacing. Initialized to 50.
- */
- public static int DEFAULT_DISTY = 50;
-
- /**
- * The horizontal vertex spacing. Defaults to {@code DEFAULT_XDIST}.
- */
- protected int distX = 50;
-
- /**
- * The vertical vertex spacing. Defaults to {@code DEFAULT_YDIST}.
- */
- protected int distY = 50;
-
- protected transient Point m_currentPoint = new Point();
-
- /**
- * Creates an instance for the specified graph with default X and Y distances.
- */
- public TreeLayout(Forest<V,E> g) {
- this(g, DEFAULT_DISTX, DEFAULT_DISTY);
- }
-
- /**
- * Creates an instance for the specified graph and X distance with
- * default Y distance.
- */
- public TreeLayout(Forest<V,E> g, int distx) {
- this(g, distx, DEFAULT_DISTY);
- }
-
- /**
- * Creates an instance for the specified graph, X distance, and Y distance.
- */
- public TreeLayout(Forest<V,E> g, int distx, int disty) {
- if (g == null)
- throw new IllegalArgumentException("Graph must be non-null");
- if (distx < 1 || disty < 1)
- throw new IllegalArgumentException("X and Y distances must each be positive");
- this.graph = g;
- this.distX = distx;
- this.distY = disty;
- buildTree();
- }
-
- protected void buildTree() {
- this.m_currentPoint = new Point(0, 20);
- Collection<V> roots = TreeUtils.getRoots(graph);
- if (roots.size() > 0 && graph != null) {
- calculateDimensionX(roots);
- for(V v : roots) {
- calculateDimensionX(v);
- m_currentPoint.x += this.basePositions.get(v)/2 + this.distX;
- buildTree(v, this.m_currentPoint.x);
- }
- }
- int width = 0;
- for(V v : roots) {
- width += basePositions.get(v);
- }
- }
-
- protected void buildTree(V v, int x) {
-
- if (!alreadyDone.contains(v)) {
- alreadyDone.add(v);
-
- //go one level further down
- this.m_currentPoint.y += this.distY;
- this.m_currentPoint.x = x;
-
- this.setCurrentPositionFor(v);
-
- int sizeXofCurrent = basePositions.get(v);
-
- int lastX = x - sizeXofCurrent / 2;
-
- int sizeXofChild;
- int startXofChild;
-
- for (V element : graph.getSuccessors(v)) {
- sizeXofChild = this.basePositions.get(element);
- startXofChild = lastX + sizeXofChild / 2;
- buildTree(element, startXofChild);
- lastX = lastX + sizeXofChild + distX;
- }
- this.m_currentPoint.y -= this.distY;
- }
- }
-
- private int calculateDimensionX(V v) {
-
- int size = 0;
- int childrenNum = graph.getSuccessors(v).size();
-
- if (childrenNum != 0) {
- for (V element : graph.getSuccessors(v)) {
- size += calculateDimensionX(element) + distX;
- }
- }
- size = Math.max(0, size - distX);
- basePositions.put(v, size);
-
- return size;
- }
-
- private int calculateDimensionX(Collection<V> roots) {
-
- int size = 0;
- for(V v : roots) {
- int childrenNum = graph.getSuccessors(v).size();
-
- if (childrenNum != 0) {
- for (V element : graph.getSuccessors(v)) {
- size += calculateDimensionX(element) + distX;
- }
- }
- size = Math.max(0, size - distX);
- basePositions.put(v, size);
- }
-
- return size;
- }
-
- /**
- * This method is not supported by this class. The size of the layout
- * is determined by the topology of the tree, and by the horizontal
- * and vertical spacing (optionally set by the constructor).
- */
- public void setSize(Dimension size) {
- throw new UnsupportedOperationException("Size of TreeLayout is set" +
- " by vertex spacing in constructor");
- }
-
- protected void setCurrentPositionFor(V vertex) {
- int x = m_currentPoint.x;
- int y = m_currentPoint.y;
- if(x < 0) size.width -= x;
-
- if(x > size.width-distX)
- size.width = x + distX;
-
- if(y < 0) size.height -= y;
- if(y > size.height-distY)
- size.height = y + distY;
- locations.get(vertex).setLocation(m_currentPoint);
-
- }
-
- public Graph<V,E> getGraph() {
- return graph;
- }
-
- public Dimension getSize() {
- return size;
- }
-
- public void initialize() {
-
- }
-
- public boolean isLocked(V v) {
- return false;
- }
-
- public void lock(V v, boolean state) {
- }
-
- public void reset() {
- }
-
- public void setGraph(Graph<V,E> graph) {
- if(graph instanceof Forest) {
- this.graph = (Forest<V,E>)graph;
- buildTree();
- } else {
- throw new IllegalArgumentException("graph must be a Forest");
- }
- }
-
- public void setInitializer(Transformer<V, Point2D> initializer) {
- }
-
- /**
- * Returns the center of this layout's area.
- */
- public Point2D getCenter() {
- return new Point2D.Double(size.getWidth()/2,size.getHeight()/2);
- }
-
- public void setLocation(V v, Point2D location) {
- locations.get(v).setLocation(location);
- }
-
- public Point2D transform(V v) {
- return locations.get(v);
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Algorithms for assigning 2D coordinates (typically used for graph visualizations)
-to vertices.
-Current layout algorithms include:
-<ul>
-<li/><code>Layout, AbstractLayout</code>: interface and abstract class defining the Layout contract and handling
-some common implementation details
-<li/><code>AggregateLayout</code>: allows multiple layouts to be combined and manipulated as one layout
-<li/><code>BalloonLayout</code>: places vertices on nested circles (trees/forests only)
-<li/><code>CircleLayout</code>: places vertices on a circle
-<li/><code>DAGLayout</code>: places vertices in a hierarchy (directed acyclic graphs only)
-<li/><code>FRLayout</code>: Fruchterman-Reingold algorithm (force-directed)
-<li/><code>ISOMLayout</code>: self-organizing map layout
-<li/><code>KKLayout</code>: Kamada-Kawai algorithm (tries to maintain specified distances)
-<li/><code>RadialTreeLayout</code>: places vertices on concentric circles (trees only)
-<li/><code>SpringLayout</code>: simple force-directed layout
-<li/><code>StaticLayout</code>: places vertices at user-specified locations
-<li/><code>TreeLayout</code>: simple tree/forest layout
-</ul>
-
-Rendering and other aspects of visualization are handled in the <code>visualization</code> package.
-
-</body>
-</html>
-
+++ /dev/null
-/*
- * Created on Jul 19, 2005
- *
- * Copyright (c) 2005, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.layout.util;
-
-import java.awt.Dimension;
-import java.awt.geom.Point2D;
-import java.util.Date;
-import java.util.Random;
-
-import org.apache.commons.collections15.Transformer;
-
-/**
- * Transforms the input type into a random location within
- * the bounds of the Dimension property.
- * This is used as the backing Transformer for the LazyMap
- * for many Layouts,
- * and provides a random location for unmapped vertices
- * the first time they are accessed.
- *
- * @author Tom Nelson
- *
- * @param <V>
- */
-public class RandomLocationTransformer<V> implements Transformer<V,Point2D> {
-
- Dimension d;
- Random random;
-
- /**
- * Creates an instance with the specified size which uses the current time
- * as the random seed.
- */
- public RandomLocationTransformer(Dimension d) {
- this(d, new Date().getTime());
- }
-
- /**
- * Creates an instance with the specified dimension and random seed.
- * @param d
- * @param seed
- */
- public RandomLocationTransformer(final Dimension d, long seed) {
- this.d = d;
- this.random = new Random(seed);
- }
-
- public Point2D transform(V v) {
- return new Point2D.Double(random.nextDouble() * d.width, random.nextDouble() * d.height);
- }
-}
+++ /dev/null
-package edu.uci.ics.jung.algorithms.layout.util;
-
-/**
- * Interface for operating the relax iterations on a layout.
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- */
-public interface Relaxer {
-
- /**
- * Execute a loop of steps in a new Thread,
- * firing an event after each step.
- */
- void relax();
-
- /**
- * Execute a loop of steps in the calling
- * thread, firing no events.
- */
- void prerelax();
-
- /**
- * Make the relaxer thread wait.
- */
- void pause();
-
- /**
- * Make the relaxer thread resume.
- *
- */
- void resume();
-
- /**
- * Set flags to stop the relaxer thread.
- */
- void stop();
-
- /**
- * Sets the sleep time.
- */
- void setSleepTime(long i);
-}
+++ /dev/null
-/*
- * Copyright (c) 2005, the JUNG Project and the Regents of the University of
- * California All rights reserved.
- *
- * This software is open-source under the BSD license; see either "license.txt"
- * or http://jung.sourceforge.net/license.txt for a description.
- *
- *
- */
-package edu.uci.ics.jung.algorithms.layout.util;
-
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-
-/**
- *
- * Implementation of a relaxer thread for layouts.
- * Extracted from the {@code VisualizationModel} in previous
- * versions of JUNG.
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- */
-public class VisRunner implements Relaxer, Runnable {
-
- protected boolean running;
- protected IterativeContext process;
- protected boolean stop;
- protected boolean manualSuspend;
- protected Thread thread;
-
- /**
- * how long the relaxer thread pauses between iteration loops.
- */
- protected long sleepTime = 100L;
-
-
- /**
- * Creates an instance for the specified process.
- */
- public VisRunner(IterativeContext process) {
- this.process = process;
- }
-
- /**
- * @return the relaxerThreadSleepTime
- */
- public long getSleepTime() {
- return sleepTime;
- }
-
- /**
- * @param sleepTime the sleep time to set for this thread
- */
- public void setSleepTime(long sleepTime) {
- this.sleepTime = sleepTime;
- }
-
- public void prerelax() {
- manualSuspend = true;
- long timeNow = System.currentTimeMillis();
- while (System.currentTimeMillis() - timeNow < 500 && !process.done()) {
- process.step();
- }
- manualSuspend = false;
- }
-
- public void pause() {
- manualSuspend = true;
- }
-
- public void relax() {
- // in case its running
- stop();
- stop = false;
- thread = new Thread(this);
- thread.setPriority(Thread.MIN_PRIORITY);
- thread.start();
- }
-
- /**
- * Used for synchronization.
- */
- public Object pauseObject = new String("PAUSE OBJECT");
-
- public void resume() {
- manualSuspend = false;
- if(running == false) {
- prerelax();
- relax();
- } else {
- synchronized(pauseObject) {
- pauseObject.notifyAll();
- }
- }
- }
-
- public synchronized void stop() {
- if(thread != null) {
- manualSuspend = false;
- stop = true;
- // interrupt the relaxer, in case it is paused or sleeping
- // this should ensure that visRunnerIsRunning gets set to false
- try { thread.interrupt(); }
- catch(Exception ex) {
- // the applet security manager may have prevented this.
- // just sleep for a second to let the thread stop on its own
- try { Thread.sleep(1000); }
- catch(InterruptedException ie) {} // ignore
- }
- synchronized (pauseObject) {
- pauseObject.notifyAll();
- }
- }
- }
-
- public void run() {
- running = true;
- try {
- while (!process.done() && !stop) {
- synchronized (pauseObject) {
- while (manualSuspend && !stop) {
- try {
- pauseObject.wait();
- } catch (InterruptedException e) {
- // ignore
- }
- }
- }
- process.step();
-
- if (stop)
- return;
-
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException ie) {
- // ignore
- }
- }
-
- } finally {
- running = false;
- }
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Utility classes for updating layout positions.
-
-</body>
-</html>
-
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.matrix;
-
-import java.util.Map;
-
-
-/**
- * An interface for specifying the behavior of graph/matrix operations
- * for a particular element type.
- * <P>
- * Graph/matrix multiplication requires the definition of two operations:
- * <p>
- * <ol>
- * <li>
- * Calculating an aggregate property of paths of length 2 between two
- * vertices v1 and v2 (analogous to element multiplication in matrix
- * arithmetic); this is handled by computePathData().
- * </li>
- * <li>
- * Aggregating the properties of all such paths, and assigning the result to
- * a new edge in the output graph (analogous to element addition in matrix
- * arithmetic); this is handled by mergePaths().
- * </li>
- * </ol>
- * <p>
- * Together, computePathData() and mergePaths() specify how the equivalent of
- * the vector inner (dot) product is to function.
- * <p>
- * For instance, to implement the equivalent of standard matrix multiplication
- * on two graphs, computePathData() should return the products of the
- * weights of a two-edge path, and mergePaths() should add
- * the output of computePathData() to an existing edge (or possibly create such
- * an edge if none exists).
- *
- * @author Joshua O'Madadhain
- */
-public interface MatrixElementOperations<E>
-{
- /**
- * If either e or pathData is null, the effect of mergePaths() is
- * implementation-dependent.
- *
- * @param e (possibly) existing edge in the output graph which
- * represents a path in the input graph(s)
- *
- * @param pathData data (which represents another path with the same source
- * and destination as e in the input graphs) which is to be merged into e
- */
- public void mergePaths(E e, Object pathData);
-
- /**
- * If either e1 or e2 is null, the Object reference returned should be null.
- *
- * @param e1 first edge from 2-edge path in input graph(s)
- * @param e2 second edge from 2-edge path in input graph(s)
- * @return aggregation of data from the edges of the 2-edge path
- * (from source of e1 to destination of e2) comprised of (e1, e2)
- */
- public Number computePathData(E e1, E e2);
-
- /**
- * Returns a map from edges to values.
- */
- public Map<E,Number> getEdgeData();
-}
\ No newline at end of file
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.matrix;
-
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Implements the basic matrix operations on double-precision values. Assumes
- * that the edges have a MutableDouble value.
- *
- * @author Joshua O'Madadhain
- */
-public class RealMatrixElementOperations<E> implements MatrixElementOperations<E>
-{
- private Map<E,Number> edgeData = new HashMap<E,Number>();
-
- /**
- * Creates an instance using the specified edge values.
- */
- public RealMatrixElementOperations(Map<E,Number> edgeData)
- {
- this.edgeData = edgeData;
- }
-
- /**
- * @see MatrixElementOperations#mergePaths(Object, Object)
- */
- public void mergePaths(E e, Object pathData)
- {
-
- Number pd = (Number)pathData;
- Number ed = edgeData.get(e);
- if (ed == null) {
- edgeData.put(e, pd);
-
- } else {
- edgeData.put(e, ed.doubleValue()+pd.doubleValue());
-
- }
-
- }
-
- /**
- * @see MatrixElementOperations#computePathData(Object, Object)
- */
- public Number computePathData(E e1, E e2)
- {
- double d1 = edgeData.get(e1).doubleValue();
- double d2 = edgeData.get(e2).doubleValue();
- return d1*d2;
- }
-
- /**
- * @return the edgeData
- */
- public Map<E, Number> getEdgeData() {
- return edgeData;
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Mechanisms for dealing with graphs as matrices. These include conversion to and
-from Colt matrices, and some matrix algorithms.
-</body>
-</html>
+++ /dev/null
-/**
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- * Created on Jun 7, 2008
- *
- */
-package edu.uci.ics.jung.algorithms.metrics;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * A class consisting of static methods for calculating graph metrics.
- */
-public class Metrics
-{
- /**
- * Returns a <code>Map</code> of vertices to their clustering coefficients.
- * The clustering coefficient cc(v) of a vertex v is defined as follows:
- * <ul>
- * <li/><code>degree(v) == {0,1}</code>: 0
- * <li/><code>degree(v) == n, n >= 2</code>: given S, the set of neighbors
- * of <code>v</code>: cc(v) = (the sum over all w in S of the number of
- * other elements of w that are neighbors of w) / ((|S| * (|S| - 1) / 2).
- * Less formally, the fraction of <code>v</code>'s neighbors that are also
- * neighbors of each other.
- * <p><b>Note</b>: This algorithm treats its argument as an undirected graph;
- * edge direction is ignored.
- * @param graph the graph whose clustering coefficients are to be calculated
- * @see "The structure and function of complex networks, M.E.J. Newman, aps.arxiv.org/abs/cond-mat/0303516"
- */
- public static <V,E> Map<V, Double> clusteringCoefficients(Graph<V,E> graph)
- {
- Map<V,Double> coefficients = new HashMap<V,Double>();
-
- for (V v : graph.getVertices())
- {
- int n = graph.getNeighborCount(v);
- if (n < 2)
- coefficients.put(v, new Double(0));
- else
- {
- // how many of v's neighbors are connected to each other?
- ArrayList<V> neighbors = new ArrayList<V>(graph.getNeighbors(v));
- double edge_count = 0;
- for (int i = 0; i < n; i++)
- {
- V w = neighbors.get(i);
- for (int j = i+1; j < n; j++ )
- {
- V x = neighbors.get(j);
- edge_count += graph.isNeighbor(w, x) ? 1 : 0;
- }
- }
- double possible_edges = (n * (n - 1))/2.0;
- coefficients.put(v, new Double(edge_count / possible_edges));
- }
- }
-
- return coefficients;
- }
-}
+++ /dev/null
-/*
- * Created on Sep 19, 2005
- *
- * Copyright (c) 2005, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.metrics;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * Calculates some of the measures from Burt's text "Structural Holes:
- * The Social Structure of Competition".
- *
- * <p><b>Notes</b>:
- * <ul>
- * <li/>Each of these measures assumes that each edge has an associated
- * non-null weight whose value is accessed through the specified
- * <code>Transformer</code> instance.
- * <li/>Nonexistent edges are treated as edges with weight 0 for purposes
- * of edge weight calculations.
- * </ul>
- *
- * <p>Based on code donated by Jasper Voskuilen and
- * Diederik van Liere of the Department of Information and Decision Sciences
- * at Erasmus University.</p>
- *
- * @author Joshua O'Madadhain
- * @author Jasper Voskuilen
- * @see "Ronald Burt, Structural Holes: The Social Structure of Competition"
- * @author Tom Nelson - converted to jung2
- */
-public class StructuralHoles<V,E> {
-
- protected Transformer<E, ? extends Number> edge_weight;
- protected Graph<V,E> g;
-
- /**
- * Creates a <code>StructuralHoles</code> instance based on the
- * edge weights specified by <code>nev</code>.
- */
- public StructuralHoles(Graph<V,E> graph, Transformer<E, ? extends Number> nev)
- {
- this.g = graph;
- this.edge_weight = nev;
- }
-
- /**
- * Burt's measure of the effective size of a vertex's network. Essentially, the
- * number of neighbors minus the average degree of those in <code>v</code>'s neighbor set,
- * not counting ties to <code>v</code>. Formally:
- * <pre>
- * effectiveSize(v) = v.degree() - (sum_{u in N(v)} sum_{w in N(u), w !=u,v} p(v,w)*m(u,w))
- * </pre>
- * where
- * <ul>
- * <li/><code>N(a) = a.getNeighbors()</code>
- * <li/><code>p(v,w) =</code> normalized mutual edge weight of v and w
- * <li/><code>m(u,w)</code> = maximum-scaled mutual edge weight of u and w
- * </ul>
- * @see #normalizedMutualEdgeWeight(Object, Object)
- * @see #maxScaledMutualEdgeWeight(Object, Object)
- */
- public double effectiveSize(V v)
- {
- double result = g.degree(v);
- for(V u : g.getNeighbors(v)) {
-
- for(V w : g.getNeighbors(u)) {
-
- if (w != v && w != u)
- result -= normalizedMutualEdgeWeight(v,w) *
- maxScaledMutualEdgeWeight(u,w);
- }
- }
- return result;
- }
-
- /**
- * Returns the effective size of <code>v</code> divided by the number of
- * alters in <code>v</code>'s network. (In other words,
- * <code>effectiveSize(v) / v.degree()</code>.)
- * If <code>v.degree() == 0</code>, returns 0.
- */
- public double efficiency(V v) {
- double degree = g.degree(v);
-
- if (degree == 0)
- return 0;
- else
- return effectiveSize(v) / degree;
- }
-
- /**
- * Burt's constraint measure (equation 2.4, page 55 of Burt, 1992). Essentially a
- * measure of the extent to which <code>v</code> is invested in people who are invested in
- * other of <code>v</code>'s alters (neighbors). The "constraint" is characterized
- * by a lack of primary holes around each neighbor. Formally:
- * <pre>
- * constraint(v) = sum_{w in MP(v), w != v} localConstraint(v,w)
- * </pre>
- * where MP(v) is the subset of v's neighbors that are both predecessors and successors of v.
- * @see #localConstraint(Object, Object)
- */
- public double constraint(V v) {
- double result = 0;
- for(V w : g.getSuccessors(v)) {
-
- if (v != w && g.isPredecessor(v,w))
- {
- result += localConstraint(v, w);
- }
- }
-
- return result;
- }
-
-
- /**
- * Calculates the hierarchy value for a given vertex. Returns <code>NaN</code> when
- * <code>v</code>'s degree is 0, and 1 when <code>v</code>'s degree is 1.
- * Formally:
- * <pre>
- * hierarchy(v) = (sum_{v in N(v), w != v} s(v,w) * log(s(v,w))}) / (v.degree() * Math.log(v.degree())
- * </pre>
- * where
- * <ul>
- * <li/><code>N(v) = v.getNeighbors()</code>
- * <li/><code>s(v,w) = localConstraint(v,w) / (aggregateConstraint(v) / v.degree())</code>
- * </ul>
- * @see #localConstraint(Object, Object)
- * @see #aggregateConstraint(Object)
- */
- public double hierarchy(V v)
- {
- double v_degree = g.degree(v);
-
- if (v_degree == 0)
- return Double.NaN;
- if (v_degree == 1)
- return 1;
-
- double v_constraint = aggregateConstraint(v);
-
- double numerator = 0;
- for (V w : g.getNeighbors(v)) {
-
- if (v != w)
- {
- double sl_constraint = localConstraint(v, w) / (v_constraint / v_degree);
- numerator += sl_constraint * Math.log(sl_constraint);
- }
- }
-
- return numerator / (v_degree * Math.log(v_degree));
- }
-
- /**
- * Returns the local constraint on <code>v</code> from a lack of primary holes
- * around its neighbor <code>v2</code>.
- * Based on Burt's equation 2.4. Formally:
- * <pre>
- * localConstraint(v1, v2) = ( p(v1,v2) + ( sum_{w in N(v)} p(v1,w) * p(w, v2) ) )^2
- * </pre>
- * where
- * <ul>
- * <li/><code>N(v) = v.getNeighbors()</code>
- * <li/><code>p(v,w) =</code> normalized mutual edge weight of v and w
- * </ul>
- * @see #normalizedMutualEdgeWeight(Object, Object)
- */
- public double localConstraint(V v1, V v2)
- {
- double nmew_vw = normalizedMutualEdgeWeight(v1, v2);
- double inner_result = 0;
- for (V w : g.getNeighbors(v1)) {
-
- inner_result += normalizedMutualEdgeWeight(v1,w) *
- normalizedMutualEdgeWeight(w,v2);
- }
- return (nmew_vw + inner_result) * (nmew_vw + inner_result);
- }
-
- /**
- * The aggregate constraint on <code>v</code>. Based on Burt's equation 2.7.
- * Formally:
- * <pre>
- * aggregateConstraint(v) = sum_{w in N(v)} localConstraint(v,w) * O(w)
- * </pre>
- * where
- * <ul>
- * <li/><code>N(v) = v.getNeighbors()</code>
- * <li/><code>O(w) = organizationalMeasure(w)</code>
- * </ul>
- */
- public double aggregateConstraint(V v)
- {
- double result = 0;
- for (V w : g.getNeighbors(v)) {
-
- result += localConstraint(v, w) * organizationalMeasure(g, w);
- }
- return result;
- }
-
- /**
- * A measure of the organization of individuals within the subgraph
- * centered on <code>v</code>. Burt's text suggests that this is
- * in some sense a measure of how "replaceable" <code>v</code> is by
- * some other element of this subgraph. Should be a number in the
- * closed interval [0,1].
- *
- * <p>This implementation returns 1. Users may wish to override this
- * method in order to define their own behavior.</p>
- */
- protected double organizationalMeasure(Graph<V,E> g, V v) {
- return 1.0;
- }
-
-
- /**
- * Returns the proportion of <code>v1</code>'s network time and energy invested
- * in the relationship with <code>v2</code>. Formally:
- * <pre>
- * normalizedMutualEdgeWeight(a,b) = mutual_weight(a,b) / (sum_c mutual_weight(a,c))
- * </pre>
- * Returns 0 if either numerator or denominator = 0, or if <code>v1 == v2</code>.
- * @see #mutualWeight(Object, Object)
- */
- protected double normalizedMutualEdgeWeight(V v1, V v2)
- {
- if (v1 == v2)
- return 0;
-
- double numerator = mutualWeight(v1, v2);
-
- if (numerator == 0)
- return 0;
-
- double denominator = 0;
- for (V v : g.getNeighbors(v1)) {
- denominator += mutualWeight(v1, v);
- }
- if (denominator == 0)
- return 0;
-
- return numerator / denominator;
- }
-
- /**
- * Returns the weight of the edge from <code>v1</code> to <code>v2</code>
- * plus the weight of the edge from <code>v2</code> to <code>v1</code>;
- * if either edge does not exist, it is treated as an edge with weight 0.
- * Undirected edges are treated as two antiparallel directed edges (that
- * is, if there is one undirected edge with weight <i>w</i> connecting
- * <code>v1</code> to <code>v2</code>, the value returned is 2<i>w</i>).
- * Ignores parallel edges; if there are any such, one is chosen at random.
- * Throws <code>NullPointerException</code> if either edge is
- * present but not assigned a weight by the constructor-specified
- * <code>NumberEdgeValue</code>.
- */
- protected double mutualWeight(V v1, V v2)
- {
- E e12 = g.findEdge(v1,v2);
- E e21 = g.findEdge(v2,v1);
- double w12 = (e12 != null ? edge_weight.transform(e12).doubleValue() : 0);
- double w21 = (e21 != null ? edge_weight.transform(e21).doubleValue() : 0);
-
- return w12 + w21;
- }
-
- /**
- * The marginal strength of v1's relation with contact vertex2.
- * Formally:
- * <pre>
- * normalized_mutual_weight = mutual_weight(a,b) / (max_c mutual_weight(a,c))
- * </pre>
- * Returns 0 if either numerator or denominator is 0, or if <code>v1 == v2</code>.
- * @see #mutualWeight(Object, Object)
- */
- protected double maxScaledMutualEdgeWeight(V v1, V v2)
- {
- if (v1 == v2)
- return 0;
-
- double numerator = mutualWeight(v1, v2);
-
- if (numerator == 0)
- return 0;
-
- double denominator = 0;
- for (V w : g.getNeighbors(v1)) {
-
- if (v2 != w)
- denominator = Math.max(numerator, mutualWeight(v1, w));
- }
-
- if (denominator == 0)
- return 0;
-
- return numerator / denominator;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.metrics;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.collections15.CollectionUtils;
-
-import edu.uci.ics.jung.graph.DirectedGraph;
-import edu.uci.ics.jung.graph.Graph;
-
-
-/**
- * TriadicCensus is a standard social network tool that counts, for each of the
- * different possible configurations of three vertices, the number of times
- * that that configuration occurs in the given graph.
- * This may then be compared to the set of expected counts for this particular
- * graph or to an expected sample. This is often used in p* modeling.
- * <p>
- * To use this class,
- * <pre>
- * long[] triad_counts = TriadicCensus(dg);
- * </pre>
- * where <code>dg</code> is a <code>DirectedGraph</code>.
- * ith element of the array (for i in [1,16]) is the number of
- * occurrences of the corresponding triad type.
- * (The 0th element is not meaningful; this array is effectively 1-based.)
- * To get the name of the ith triad (e.g. "003"),
- * look at the global constant array c.TRIAD_NAMES[i]
- * <p>
- * Triads are named as
- * (number of pairs that are mutually tied)
- * (number of pairs that are one-way tied)
- * (number of non-tied pairs)
- * in the triple. Since there are be only three pairs, there is a finite
- * set of these possible triads.
- * <p>
- * In fact, there are exactly 16, conventionally sorted by the number of
- * realized edges in the triad:
- * <table>
- * <tr><th>Number</th> <th>Configuration</th> <th>Notes</th></tr>
- * <tr><td>1</td><td>003</td><td>The empty triad</td></tr>
- * <tr><td>2</td><td>012</td><td></td></tr>
- * <tr><td>3</td><td>102</td><td></td></tr>
- * <tr><td>4</td><td>021D</td><td>"Down": the directed edges point away</td></tr>
- * <tr><td>5</td><td>021U</td><td>"Up": the directed edges meet</td></tr>
- * <tr><td>6</td><td>021C</td><td>"Circle": one in, one out</td></tr>
- * <tr><td>7</td><td>111D</td><td>"Down": 021D but one edge is mutual</td></tr>
- * <tr><td>8</td><td>111U</td><td>"Up": 021U but one edge is mutual</td></tr>
- * <tr><td>9</td><td>030T</td><td>"Transitive": two point to the same vertex</td></tr>
- * <tr><td>10</td><td>030C</td><td>"Circle": A->B->C->A</td></tr>
- * <tr><td>11</td><td>201</td><td></td></tr>
- * <tr><td>12</td><td>120D</td><td>"Down": 021D but the third edge is mutual</td></tr>
- * <tr><td>13</td><td>120U</td><td>"Up": 021U but the third edge is mutual</td></tr>
- * <tr><td>14</td><td>120C</td><td>"Circle": 021C but the third edge is mutual</td></tr>
- * <tr><td>15</td><td>210</td><td></td></tr>
- * <tr><td>16</td><td>300</td><td>The complete</td></tr>
- * </table>
- * <p>
- * This implementation takes O( m ), m is the number of edges in the graph.
- * <br>
- * It is based on
- * <a href="http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf">
- * A subquadratic triad census algorithm for large sparse networks
- * with small maximum degree</a>
- * Vladimir Batagelj and Andrej Mrvar, University of Ljubljana
- * Published in Social Networks.
- * @author Danyel Fisher
- * @author Tom Nelson - converted to jung2
- *
- */
-public class TriadicCensus {
-
- // NOTE THAT THIS RETURNS STANDARD 1-16 COUNT!
-
- // and their types
- public static final String[] TRIAD_NAMES = { "N/A", "003", "012", "102", "021D",
- "021U", "021C", "111D", "111U", "030T", "030C", "201", "120D",
- "120U", "120C", "210", "300" };
-
- public static final int MAX_TRIADS = TRIAD_NAMES.length;
-
- /**
- * Returns an array whose ith element (for i in [1,16]) is the number of
- * occurrences of the corresponding triad type in <code>g</code>.
- * (The 0th element is not meaningful; this array is effectively 1-based.)
- *
- * @param g
- */
- public static <V,E> long[] getCounts(DirectedGraph<V,E> g) {
- long[] count = new long[MAX_TRIADS];
-
- List<V> id = new ArrayList<V>(g.getVertices());
-
- // apply algorithm to each edge, one at at time
- for (int i_v = 0; i_v < g.getVertexCount(); i_v++) {
- V v = id.get(i_v);
- for(V u : g.getNeighbors(v)) {
- int triType = -1;
- if (id.indexOf(u) <= i_v)
- continue;
- Set<V> neighbors = new HashSet<V>(CollectionUtils.union(g.getNeighbors(u), g.getNeighbors(v)));
- neighbors.remove(u);
- neighbors.remove(v);
- if (g.isSuccessor(v,u) && g.isSuccessor(u,v)) {
- triType = 3;
- } else {
- triType = 2;
- }
- count[triType] += g.getVertexCount() - neighbors.size() - 2;
- for (V w : neighbors) {
- if (shouldCount(g, id, u, v, w)) {
- count [ triType ( triCode(g, u, v, w) ) ] ++;
- }
- }
- }
- }
- int sum = 0;
- for (int i = 2; i <= 16; i++) {
- sum += count[i];
- }
- int n = g.getVertexCount();
- count[1] = n * (n-1) * (n-2) / 6 - sum;
- return count;
- }
-
- /**
- * This is the core of the technique in the paper. Returns an int from 0 to
- * 65 based on: WU -> 32 UW -> 16 WV -> 8 VW -> 4 UV -> 2 VU -> 1
- *
- */
- public static <V,E> int triCode(Graph<V,E> g, V u, V v, V w) {
- int i = 0;
- i += link(g, v, u ) ? 1 : 0;
- i += link(g, u, v ) ? 2 : 0;
- i += link(g, v, w ) ? 4 : 0;
- i += link(g, w, v ) ? 8 : 0;
- i += link(g, u, w ) ? 16 : 0;
- i += link(g, w, u ) ? 32 : 0;
- return i;
- }
-
- protected static <V,E> boolean link(Graph<V,E> g, V a, V b) {
- return g.isPredecessor(b, a);
- }
-
-
- /**
- * Simply returns the triCode.
- * @param triCode
- * @return the string code associated with the numeric type
- */
- public static int triType( int triCode ) {
- return codeToType[ triCode ];
- }
-
- /**
- * For debugging purposes, this is copied straight out of the paper which
- * means that they refer to triad types 1-16.
- */
- protected static final int[] codeToType = { 1, 2, 2, 3, 2, 4, 6, 8, 2, 6, 5, 7, 3, 8,
- 7, 11, 2, 6, 4, 8, 5, 9, 9, 13, 6, 10, 9, 14, 7, 14, 12, 15, 2, 5,
- 6, 7, 6, 9, 10, 14, 4, 9, 9, 12, 8, 13, 14, 15, 3, 7, 8, 11, 7, 12,
- 14, 15, 8, 14, 13, 15, 11, 15, 15, 16 };
-
- /**
- * Make sure we have a canonical ordering: Returns true if u < w, or v < w <
- * u and v doesn't link to w
- *
- * @param id
- * @param u
- * @param v
- * @param w
- * @return true if u < w, or if v < w < u and v doesn't link to w; false otherwise
- */
- protected static <V,E> boolean shouldCount(Graph<V,E> g, List<V> id, V u, V v, V w) {
- int i_u = id.indexOf(u);
- int i_w = id.indexOf(w);
- if (i_u < i_w)
- return true;
- int i_v = id.indexOf(v);
- if ((i_v < i_w) && (i_w < i_u) && (!g.isNeighbor(w,v)))
- return true;
- return false;
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Specialized measures for graph properties. These currently include:
-
-<ul>
-<li/><code>StructuralHoles</code>: calculates some of Burt's 'structural holes'
-measures (e.g. efficiency, hierarchy, constraint).
-<li/><code>TriadicCensus</code>: returns counts for each triad type found in a
-graph.
-</ul>
-
-</body>
-</html>
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved.
- Permission to use, copy, modify, and distribute this software and its documentation
- for educational, research and non-profit purposes, without fee, and without a written
- agreement is hereby granted, provided that the above copyright notice, this paragraph
- and the following two paragraphs appear in all copies. This software program and
- documentation are copyrighted by The Regents of the University of California
- ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING
-SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA
-DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR
-ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH
-PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT,
-INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
-ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE
-UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
-BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
-AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
-SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-<p>Algorithms for graphs and networks.</p>
-
-<p>These algorithms are divided into categories as follows:
-<ul>
-<li/><b>blockmodel</b>: dividing graph elements (typically vertices) into
-equivalence classes,
-generally by topological properties (e.g. structural equivalence)
-<li/><b>cluster</b>: identifying coherent (not necessarily disjoint) groups of elements
-(e.g. weakly connected components, edge betweenness clustering)
-<li/><b>filters</b>: removing parts of a graph according to specified criteria
-<li/><b>flows</b>: calculating properties relating to network flows
-(e.g. max flow/min cut)
-<li/><b>generators</b>: creating graphs with certain properties
-<li/><b>importance (<i>deprecated</i>)</b>: assigning values to vertices/edges
-based on topological properties
-<li/><b>layout</b>: arrangement of graph elements, generally for visualization
-<li/><b>metrics</b>: calculating structural properties (triad census, structural
-holes)
-<li/><b>scoring</b>: assigning values (denoting significance, influence,
-centrality, etc.) to vertices/edges based on topological properties,
-e.g. PageRank, HITS, betweenness centrality (replaces "importance", above)
-<li/><b>shortestpath</b>: calculation of shortest paths between vertices
-<li/><b>util</b>: low-level utility classes used in a variety of algorithms
-</ul>
-
-</body>
-</html>
+++ /dev/null
-/*
- * Created on Jul 6, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.util.DelegateToEdgeTransformer;
-import edu.uci.ics.jung.algorithms.scoring.util.VEPair;
-import edu.uci.ics.jung.algorithms.util.IterativeContext;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * An abstract class for algorithms that assign scores to vertices based on iterative methods.
- * Generally, any (concrete) subclass will function by creating an instance, and then either calling
- * <code>evaluate</code> (if the user wants to iterate until the algorithms is 'done') or
- * repeatedly call <code>step</code> (if the user wants to observe the values at each step).
- */
-public abstract class AbstractIterativeScorer<V,E,T> implements IterativeContext, VertexScorer<V,T>
-{
- /**
- * Maximum number of iterations to use before terminating. Defaults to 100.
- */
- protected int max_iterations;
-
- /**
- * Minimum change from one step to the next; if all changes are <= tolerance,
- * no further updates will occur.
- * Defaults to 0.001.
- */
- protected double tolerance;
-
- /**
- * The graph on which the calculations are to be made.
- */
- protected Hypergraph<V,E> graph;
-
- /**
- * The total number of iterations used so far.
- */
- protected int total_iterations;
-
- /**
- * The edge weights used by this algorithm.
- */
- protected Transformer<VEPair<V,E>, ? extends Number> edge_weights;
-
- /**
- * Indicates whether the output and current values are in a 'swapped' state.
- * Intended for internal use only.
- */
- protected boolean output_reversed;
-
- /**
- * The map in which the output values are stored.
- */
- private Map<V, T> output;
-
- /**
- * The map in which the current values are stored.
- */
- private Map<V, T> current_values;
-
- /**
- * A flag representing whether this instance tolerates disconnected graphs.
- * Instances that do not accept disconnected graphs may have unexpected behavior
- * on disconnected graphs; they are not guaranteed to do an explicit check.
- * Defaults to true.
- */
- private boolean accept_disconnected_graph;
-
-
- protected boolean hyperedges_are_self_loops = false;
-
- /**
- * Sets the output value for this vertex.
- * @param v the vertex whose output value is to be set
- * @param value the value to set
- */
- protected void setOutputValue(V v, T value)
- {
- output.put(v, value);
- }
-
- /**
- * Gets the output value for this vertex.
- * @param v the vertex whose output value is to be retrieved
- * @return the output value for this vertex
- */
- protected T getOutputValue(V v)
- {
- return output.get(v);
- }
-
- /**
- * Gets the current value for this vertex
- * @param v the vertex whose current value is to be retrieved
- * @return the current value for this vertex
- */
- protected T getCurrentValue(V v)
- {
- return current_values.get(v);
- }
-
- /**
- * Sets the current value for this vertex.
- * @param v the vertex whose current value is to be set
- * @param value the current value to set
- */
- protected void setCurrentValue(V v, T value)
- {
- current_values.put(v, value);
- }
-
- /**
- * The largest change seen so far among all vertex scores.
- */
- protected double max_delta;
-
- /**
- * Creates an instance for the specified graph and edge weights.
- * @param g the graph for which the instance is to be created
- * @param edge_weights the edge weights for this instance
- */
- public AbstractIterativeScorer(Hypergraph<V,E> g, Transformer<E, ? extends Number> edge_weights)
- {
- this.graph = g;
- this.max_iterations = 100;
- this.tolerance = 0.001;
- this.accept_disconnected_graph = true;
- setEdgeWeights(edge_weights);
- }
-
- /**
- * Creates an instance for the specified graph <code>g</code>.
- * NOTE: This constructor does not set the internal
- * <code>edge_weights</code> variable. If this variable is used by
- * the subclass which invoked this constructor, it must be initialized
- * by that subclass.
- * @param g the graph for which the instance is to be created
- */
- public AbstractIterativeScorer(Hypergraph<V,E> g)
- {
- this.graph = g;
- this.max_iterations = 100;
- this.tolerance = 0.001;
- this.accept_disconnected_graph = true;
- }
-
- /**
- * Initializes the internal state for this instance.
- */
- protected void initialize()
- {
- this.total_iterations = 0;
- this.max_delta = Double.MIN_VALUE;
- this.output_reversed = true;
- this.current_values = new HashMap<V, T>();
- this.output = new HashMap<V, T>();
- }
-
- /**
- * Steps through this scoring algorithm until a termination condition is reached.
- */
- public void evaluate()
- {
- do
- step();
- while (!done());
- }
-
- /**
- * Returns true if the total number of iterations is greater than or equal to
- * <code>max_iterations</code>
- * or if the maximum value change observed is less than <code>tolerance</code>.
- */
- public boolean done()
- {
- return total_iterations >= max_iterations || max_delta < tolerance;
- }
-
- /**
- * Performs one step of this algorithm; updates the state (value) for each vertex.
- */
- public void step()
- {
- swapOutputForCurrent();
-
- for (V v : graph.getVertices())
- {
- double diff = update(v);
- updateMaxDelta(v, diff);
- }
- total_iterations++;
- afterStep();
- }
-
- /**
- *
- */
- protected void swapOutputForCurrent()
- {
- Map<V, T> tmp = output;
- output = current_values;
- current_values = tmp;
- output_reversed = !output_reversed;
- }
-
- /**
- * Updates the value for <code>v</code>.
- * This is the key
- * @param v the vertex whose value is to be updated
- * @return
- */
- protected abstract double update(V v);
-
- protected void updateMaxDelta(V v, double diff)
- {
- max_delta = Math.max(max_delta, diff);
- }
-
- protected void afterStep() {}
-
- public T getVertexScore(V v)
- {
- if (!graph.containsVertex(v))
- throw new IllegalArgumentException("Vertex " + v + " not an element of this graph");
-
- return output.get(v);
- }
-
- /**
- * Returns the maximum number of iterations that this instance will use.
- * @return the maximum number of iterations that <code>evaluate</code> will use
- * prior to terminating
- */
- public int getMaxIterations()
- {
- return max_iterations;
- }
-
- /**
- * Returns the number of iterations that this instance has used so far.
- * @return the number of iterations that this instance has used so far
- */
- public int getIterations()
- {
- return total_iterations;
- }
-
- /**
- * Sets the maximum number of times that <code>evaluate</code> will call <code>step</code>.
- * @param max_iterations the maximum
- */
- public void setMaxIterations(int max_iterations)
- {
- this.max_iterations = max_iterations;
- }
-
- /**
- * Gets the size of the largest change (difference between the current and previous values)
- * for any vertex that can be tolerated. Once all changes are less than this value,
- * <code>evaluate</code> will terminate.
- * @return the size of the largest change that evaluate() will permit
- */
- public double getTolerance()
- {
- return tolerance;
- }
-
- /**
- * Sets the size of the largest change (difference between the current and previous values)
- * for any vertex that can be tolerated.
- * @param tolerance the size of the largest change that evaluate() will permit
- */
- public void setTolerance(double tolerance)
- {
- this.tolerance = tolerance;
- }
-
- /**
- * Returns the Transformer that this instance uses to associate edge weights with each edge.
- * @return the Transformer that associates an edge weight with each edge
- */
- public Transformer<VEPair<V,E>, ? extends Number> getEdgeWeights()
- {
- return edge_weights;
- }
-
- /**
- * Sets the Transformer that this instance uses to associate edge weights with each edge
- * @param edge_weights the Transformer to use to associate an edge weight with each edge
- * @see edu.uci.ics.jung.algorithms.scoring.util.UniformDegreeWeight
- */
- public void setEdgeWeights(Transformer<E, ? extends Number> edge_weights)
- {
- this.edge_weights = new DelegateToEdgeTransformer<V,E>(edge_weights);
- }
-
- /**
- * Gets the edge weight for <code>e</code> in the context of its (incident) vertex <code>v</code>.
- * @param v the vertex incident to e as a context in which the edge weight is to be calculated
- * @param e the edge whose weight is to be returned
- * @return the edge weight for <code>e</code> in the context of its (incident) vertex <code>v</code>
- */
- protected Number getEdgeWeight(V v, E e)
- {
- return edge_weights.transform(new VEPair<V,E>(v,e));
- }
-
- /**
- * Collects the 'potential' from v (its current value) if it has no outgoing edges; this
- * can then be redistributed among the other vertices as a means of normalization.
- * @param v
- */
- protected void collectDisappearingPotential(V v) {}
-
- /**
- * Specifies whether this instance should accept vertices with no outgoing edges.
- * @param accept true if this instance should accept vertices with no outgoing edges, false otherwise
- */
- public void acceptDisconnectedGraph(boolean accept)
- {
- this.accept_disconnected_graph = accept;
- }
-
- /**
- * Returns true if this instance accepts vertices with no outgoing edges, and false otherwise.
- * @return true if this instance accepts vertices with no outgoing edges, otherwise false
- */
- public boolean isDisconnectedGraphOK()
- {
- return this.accept_disconnected_graph;
- }
-
- /**
- * Specifies whether hyperedges are to be treated as self-loops. If they
- * are, then potential will flow along a hyperedge a vertex to itself,
- * just as it does to all other vertices incident to that hyperedge.
- * @param arg if {@code true}, hyperedges are treated as self-loops
- */
- public void setHyperedgesAreSelfLoops(boolean arg)
- {
- this.hyperedges_are_self_loops = arg;
- }
-
- /**
- * Returns the effective number of vertices incident to this edge. If
- * the graph is a binary relation or if hyperedges are treated as self-loops,
- * the value returned is {@code graph.getIncidentCount(e)}; otherwise it is
- * {@code graph.getIncidentCount(e) - 1}.
- */
- protected int getAdjustedIncidentCount(E e)
- {
- return graph.getIncidentCount(e) - (hyperedges_are_self_loops ? 0 : 1);
- }
-}
+++ /dev/null
-/*
- * Created on Jul 14, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * An abstract class for iterative random-walk-based vertex scoring algorithms
- * that have a
- * fixed probability, for each vertex, of 'jumping' to that vertex at each
- * step in the algorithm (rather than following a link out of that vertex).
- *
- * @param <V> the vertex type
- * @param <E> the edge type
- * @param <S> the score type
- */
-public abstract class AbstractIterativeScorerWithPriors<V,E,S> extends
- AbstractIterativeScorer<V,E,S> implements VertexScorer<V,S>
-{
- /**
- * The prior probability of each vertex being visited on a given
- * 'jump' (non-link-following) step.
- */
- protected Transformer<V,? extends S> vertex_priors;
-
- /**
- * The probability of making a 'jump' at each step.
- */
- protected double alpha;
-
- /**
- * Creates an instance for the specified graph, edge weights, vertex
- * priors, and jump probability.
- * @param g the graph whose vertices are to be assigned scores
- * @param edge_weights the edge weights to use in the score assignment
- * @param vertex_priors the prior probabilities of each vertex being 'jumped' to
- * @param alpha the probability of making a 'jump' at each step
- */
- public AbstractIterativeScorerWithPriors(Hypergraph<V,E> g,
- Transformer<E,? extends Number> edge_weights,
- Transformer<V,? extends S> vertex_priors, double alpha)
- {
- super(g, edge_weights);
- this.vertex_priors = vertex_priors;
- this.alpha = alpha;
- initialize();
- }
-
- /**
- * Creates an instance for the specified graph, vertex priors, and jump
- * probability, with edge weights specified by the subclass.
- * @param g the graph whose vertices are to be assigned scores
- * @param vertex_priors the prior probabilities of each vertex being 'jumped' to
- * @param alpha the probability of making a 'jump' at each step
- */
- public AbstractIterativeScorerWithPriors(Hypergraph<V,E> g,
- Transformer<V,? extends S> vertex_priors, double alpha)
- {
- super(g);
- this.vertex_priors = vertex_priors;
- this.alpha = alpha;
- initialize();
- }
-
- /**
- * Initializes the state of this instance.
- */
- @Override
- public void initialize()
- {
- super.initialize();
- // initialize output values to priors
- // (output and current are swapped before each step(), so current will
- // have priors when update()s start happening)
- for (V v : graph.getVertices())
- setOutputValue(v, getVertexPrior(v));
- }
-
- /**
- * Returns the prior probability for <code>v</code>.
- * @param v the vertex whose prior probability is being queried
- * @return the prior probability for <code>v</code>
- */
- protected S getVertexPrior(V v)
- {
- return vertex_priors.transform(v);
- }
-
- /**
- * Returns a Transformer which maps each vertex to its prior probability.
- * @return a Transformer which maps each vertex to its prior probability
- */
- public Transformer<V, ? extends S> getVertexPriors()
- {
- return vertex_priors;
- }
-
- /**
- * Returns the probability of making a 'jump' (non-link-following step).
- * @return the probability of making a 'jump' (non-link-following step)
- */
- public double getAlpha()
- {
- return alpha;
- }
-}
+++ /dev/null
-/*
- * Created on Jul 12, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.shortestpath.Distance;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Assigns scores to each vertex according to the sum of its distances to all other vertices.
- */
-public class BarycenterScorer<V,E> extends DistanceCentralityScorer<V, E>
-{
- /**
- * Creates an instance with the specified graph and distance metric.
- * @param graph the input graph
- * @param distance the distance metric to use
- */
- public BarycenterScorer(Hypergraph<V,E> graph, Distance<V> distance)
- {
- super(graph, distance, false);
- }
-
- /**
- * Creates an instance with the specified graph and edge weights.
- * Will generate a <code>Distance</code> metric internally based on the edge weights.
- * @param graph the input graph
- * @param edge_weights the edge weights to use to calculate vertex/vertex distances
- */
- public BarycenterScorer(Hypergraph<V,E> graph, Transformer<E, ? extends Number> edge_weights)
- {
- super(graph, edge_weights, false);
- }
-
- /**
- * Creates an instance with the specified graph.
- * Will generate a <code>Distance</code> metric internally assuming that the
- * graph is unweighted.
- * @param graph the input graph
- */
- public BarycenterScorer(Hypergraph<V,E> graph)
- {
- super(graph, false);
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- * Created on Sep 16, 2008
- *
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Stack;
-
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-
-import edu.uci.ics.jung.algorithms.util.MapBinaryHeap;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.UndirectedGraph;
-
-/**
- * Computes betweenness centrality for each vertex and edge in the graph.
- *
- * @see "Ulrik Brandes: A Faster Algorithm for Betweenness Centrality. Journal of Mathematical Sociology 25(2):163-177, 2001."
- */
-public class BetweennessCentrality<V, E>
- implements VertexScorer<V, Double>, EdgeScorer<E, Double>
-{
- protected Graph<V,E> graph;
- protected Map<V, Double> vertex_scores;
- protected Map<E, Double> edge_scores;
- protected Map<V, BetweennessData> vertex_data;
-
- /**
- * Calculates betweenness scores based on the all-pairs unweighted shortest paths
- * in the graph.
- * @param graph the graph for which the scores are to be calculated
- */
- @SuppressWarnings("unchecked")
- public BetweennessCentrality(Graph<V, E> graph)
- {
- initialize(graph);
- computeBetweenness(new LinkedList<V>(), new ConstantTransformer(1));
- }
-
- /**
- * Calculates betweenness scores based on the all-pairs weighted shortest paths in the
- * graph.
- *
- * <p>NOTE: This version of the algorithm may not work correctly on all graphs; we're still
- * working out the bugs. Use at your own risk.
- * @param graph the graph for which the scores are to be calculated
- * @param edge_weights the edge weights to be used in the path length calculations
- */
- public BetweennessCentrality(Graph<V, E> graph,
- Transformer<E, ? extends Number> edge_weights)
- {
- // reject negative-weight edges up front
- for (E e : graph.getEdges())
- {
- double e_weight = edge_weights.transform(e).doubleValue();
- if (e_weight < 0)
- throw new IllegalArgumentException(String.format(
- "Weight for edge '%s' is < 0: %d", e, e_weight));
- }
-
- initialize(graph);
- computeBetweenness(new MapBinaryHeap<V>(new BetweennessComparator()),
- edge_weights);
- }
-
- protected void initialize(Graph<V,E> graph)
- {
- this.graph = graph;
- this.vertex_scores = new HashMap<V, Double>();
- this.edge_scores = new HashMap<E, Double>();
- this.vertex_data = new HashMap<V, BetweennessData>();
-
- for (V v : graph.getVertices())
- this.vertex_scores.put(v, 0.0);
-
- for (E e : graph.getEdges())
- this.edge_scores.put(e, 0.0);
- }
-
- protected void computeBetweenness(Queue<V> queue,
- Transformer<E, ? extends Number> edge_weights)
- {
- for (V v : graph.getVertices())
- {
- // initialize the betweenness data for this new vertex
- for (V s : graph.getVertices())
- this.vertex_data.put(s, new BetweennessData());
-
-// if (v.equals(new Integer(0)))
-// System.out.println("pause");
-
- vertex_data.get(v).numSPs = 1;
- vertex_data.get(v).distance = 0;
-
- Stack<V> stack = new Stack<V>();
-// Buffer<V> queue = new UnboundedFifoBuffer<V>();
-// queue.add(v);
- queue.offer(v);
-
- while (!queue.isEmpty())
- {
-// V w = queue.remove();
- V w = queue.poll();
- stack.push(w);
- BetweennessData w_data = vertex_data.get(w);
-
- for (E e : graph.getOutEdges(w))
- {
- // TODO (jrtom): change this to getOtherVertices(w, e)
- V x = graph.getOpposite(w, e);
- if (x.equals(w))
- continue;
- double wx_weight = edge_weights.transform(e).doubleValue();
-
-
-// for(V x : graph.getSuccessors(w))
-// {
-// if (x.equals(w))
-// continue;
-
- // FIXME: the other problem is that I need to
- // keep putting the neighbors of things we've just
- // discovered in the queue, if they're undiscovered or
- // at greater distance.
-
- // FIXME: this is the problem, right here, I think:
- // need to update position in queue if distance changes
- // (which can only happen with weighted edges).
- // for each outgoing edge e from w, get other end x
- // if x not already visited (dist x < 0)
- // set x's distance to w's dist + edge weight
- // add x to queue; pri in queue is x's dist
- // if w's dist + edge weight < x's dist
- // update x's dist
- // update x in queue (MapBinaryHeap)
- // clear x's incoming edge list
- // if w's dist + edge weight = x's dist
- // add e to x's incoming edge list
-
- BetweennessData x_data = vertex_data.get(x);
- double x_potential_dist = w_data.distance + wx_weight;
-
- if (x_data.distance < 0)
- {
-// queue.add(x);
-// vertex_data.get(x).distance = vertex_data.get(w).distance + 1;
- x_data.distance = x_potential_dist;
- queue.offer(x);
- }
-
- // note:
- // (1) this can only happen with weighted edges
- // (2) x's SP count and incoming edges are updated below
- if (x_data.distance > x_potential_dist)
- {
- x_data.distance = x_potential_dist;
- // invalidate previously identified incoming edges
- // (we have a new shortest path distance to x)
- x_data.incomingEdges.clear();
- // update x's position in queue
- ((MapBinaryHeap<V>)queue).update(x);
- }
-// if (vertex_data.get(x).distance == vertex_data.get(w).distance + 1)
- //
-// if (x_data.distance == x_potential_dist)
-// {
-// x_data.numSPs += w_data.numSPs;
-//// vertex_data.get(x).predecessors.add(w);
-// x_data.incomingEdges.add(e);
-// }
- }
- for (E e: graph.getOutEdges(w))
- {
- V x = graph.getOpposite(w, e);
- if (x.equals(w))
- continue;
- double e_weight = edge_weights.transform(e).doubleValue();
- BetweennessData x_data = vertex_data.get(x);
- double x_potential_dist = w_data.distance + e_weight;
- if (x_data.distance == x_potential_dist)
- {
- x_data.numSPs += w_data.numSPs;
-// vertex_data.get(x).predecessors.add(w);
- x_data.incomingEdges.add(e);
- }
- }
- }
- while (!stack.isEmpty())
- {
- V x = stack.pop();
-
-// for (V w : vertex_data.get(x).predecessors)
- for (E e : vertex_data.get(x).incomingEdges)
- {
- V w = graph.getOpposite(x, e);
- double partialDependency =
- vertex_data.get(w).numSPs / vertex_data.get(x).numSPs *
- (1.0 + vertex_data.get(x).dependency);
- vertex_data.get(w).dependency += partialDependency;
-// E w_x = graph.findEdge(w, x);
-// double w_x_score = edge_scores.get(w_x).doubleValue();
-// w_x_score += partialDependency;
-// edge_scores.put(w_x, w_x_score);
- double e_score = edge_scores.get(e).doubleValue();
- edge_scores.put(e, e_score + partialDependency);
- }
- if (!x.equals(v))
- {
- double x_score = vertex_scores.get(x).doubleValue();
- x_score += vertex_data.get(x).dependency;
- vertex_scores.put(x, x_score);
- }
- }
- }
-
- if(graph instanceof UndirectedGraph)
- {
- for (V v : graph.getVertices()) {
- double v_score = vertex_scores.get(v).doubleValue();
- v_score /= 2.0;
- vertex_scores.put(v, v_score);
- }
- for (E e : graph.getEdges()) {
- double e_score = edge_scores.get(e).doubleValue();
- e_score /= 2.0;
- edge_scores.put(e, e_score);
- }
- }
-
- vertex_data.clear();
- }
-
-// protected void computeWeightedBetweenness(Transformer<E, ? extends Number> edge_weights)
-// {
-// for (V v : graph.getVertices())
-// {
-// // initialize the betweenness data for this new vertex
-// for (V s : graph.getVertices())
-// this.vertex_data.put(s, new BetweennessData());
-// vertex_data.get(v).numSPs = 1;
-// vertex_data.get(v).distance = 0;
-//
-// Stack<V> stack = new Stack<V>();
-//// Buffer<V> queue = new UnboundedFifoBuffer<V>();
-// SortedSet<V> pqueue = new TreeSet<V>(new BetweennessComparator());
-//// queue.add(v);
-// pqueue.add(v);
-//
-//// while (!queue.isEmpty())
-// while (!pqueue.isEmpty())
-// {
-//// V w = queue.remove();
-// V w = pqueue.first();
-// pqueue.remove(w);
-// stack.push(w);
-//
-//// for(V x : graph.getSuccessors(w))
-// for (E e : graph.getOutEdges(w))
-// {
-// // TODO (jrtom): change this to getOtherVertices(w, e)
-// V x = graph.getOpposite(w, e);
-// if (x.equals(w))
-// continue;
-// double e_weight = edge_weights.transform(e).doubleValue();
-//
-// if (vertex_data.get(x).distance < 0)
-// {
-//// queue.add(x);
-// pqueue.add(v);
-//// vertex_data.get(x).distance = vertex_data.get(w).distance + 1;
-// vertex_data.get(x).distance =
-// vertex_data.get(w).distance + e_weight;
-// }
-//
-//// if (vertex_data.get(x).distance == vertex_data.get(w).distance + 1)
-// if (vertex_data.get(x).distance ==
-// vertex_data.get(w).distance + e_weight)
-// {
-// vertex_data.get(x).numSPs += vertex_data.get(w).numSPs;
-// vertex_data.get(x).predecessors.add(w);
-// }
-// }
-// }
-// updateScores(v, stack);
-// }
-//
-// if(graph instanceof UndirectedGraph)
-// adjustUndirectedScores();
-//
-// vertex_data.clear();
-// }
-
- public Double getVertexScore(V v)
- {
- return vertex_scores.get(v);
- }
-
- public Double getEdgeScore(E e)
- {
- return edge_scores.get(e);
- }
-
- private class BetweennessData
- {
- double distance;
- double numSPs;
-// List<V> predecessors;
- List<E> incomingEdges;
- double dependency;
-
- BetweennessData()
- {
- distance = -1;
- numSPs = 0;
-// predecessors = new ArrayList<V>();
- incomingEdges = new ArrayList<E>();
- dependency = 0;
- }
-
- @Override
- public String toString()
- {
- return "[d:" + distance + ", sp:" + numSPs +
- ", p:" + incomingEdges + ", d:" + dependency + "]\n";
-// ", p:" + predecessors + ", d:" + dependency + "]\n";
- }
- }
-
- private class BetweennessComparator implements Comparator<V>
- {
- public int compare(V v1, V v2)
- {
- return vertex_data.get(v1).distance > vertex_data.get(v2).distance ? 1 : -1;
- }
- }
-}
+++ /dev/null
-/*
- * Created on Jul 12, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.shortestpath.Distance;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Assigns scores to each vertex based on the mean distance to each other vertex.
- *
- * @author Joshua O'Madadhain
- */
-public class ClosenessCentrality<V,E> extends DistanceCentralityScorer<V,E>
-{
-
- /**
- * Creates an instance using the specified vertex/vertex distance metric.
- * @param graph the input
- * @param distance the vertex/vertex distance metric.
- */
- public ClosenessCentrality(Hypergraph<V,E> graph, Distance<V> distance)
- {
- super(graph, distance, true);
- }
-
- /**
- * Creates an instance which measures distance using the specified edge weights.
- * @param graph the input graph
- * @param edge_weights the edge weights to be used to determine vertex/vertex distances
- */
- public ClosenessCentrality(Hypergraph<V,E> graph, Transformer<E, ? extends Number> edge_weights)
- {
- super(graph, edge_weights, true);
- }
-
- /**
- * Creates an instance which measures distance on the graph without edge weights.
- * @param graph
- */
- public ClosenessCentrality(Hypergraph<V,E> graph)
- {
- super(graph, true);
- }
-}
+++ /dev/null
-/*
- * Created on Jul 6, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Assigns a score to each vertex equal to its degree.
- *
- * @param <V> the vertex type
- */
-public class DegreeScorer<V> implements VertexScorer<V,Integer>
-{
- /**
- * The graph for which scores are to be generated.
- */
- protected Hypergraph<V,?> graph;
-
- /**
- * Creates an instance for the specified graph.
- * @param graph the input graph
- */
- public DegreeScorer(Hypergraph<V,?> graph)
- {
- this.graph = graph;
- }
-
- /**
- * Returns the degree of the vertex.
- * @return the degree of the vertex
- */
- public Integer getVertexScore(V v)
- {
- return graph.degree(v);
- }
-}
+++ /dev/null
-/*
- * Created on Jul 10, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.shortestpath.DijkstraDistance;
-import edu.uci.ics.jung.algorithms.shortestpath.Distance;
-import edu.uci.ics.jung.algorithms.shortestpath.UnweightedShortestPath;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Assigns scores to vertices based on their distances to each other vertex
- * in the graph.
- *
- * This class optionally normalizes its results based on the value of its
- * 'averaging' constructor parameter. If it is <code>true</code>,
- * then the value returned for vertex v is 1 / (_average_ distance from v to all other vertices);
- * this is sometimes called <i>closeness centrality</i>.
- * If it is <code>false</code>, then the value returned is 1 / (_total_ distance from
- * v to all other vertices); this is sometimes referred to as <i>barycenter centrality</i>.
- * (If the average/total distance is 0, the value returned is {@code Double.POSITIVE_INFINITY}.)
- *
- * @see BarycenterScorer
- * @see ClosenessCentrality
- */
-public class DistanceCentralityScorer<V,E> implements VertexScorer<V, Double>
-{
- /**
- * The graph on which the vertex scores are to be calculated.
- */
- protected Hypergraph<V, E> graph;
-
- /**
- * The metric to use for specifying the distance between pairs of vertices.
- */
- protected Distance<V> distance;
-
- /**
- * The cache for the output results. Null encodes "not yet calculated",
- * < 0 encodes "no such distance exists".
- */
- protected Map<V, Double> output;
-
- /**
- * Specifies whether the values returned are the sum of the v-distances
- * or the mean v-distance.
- */
- protected boolean averaging;
-
- /**
- * Specifies whether, for a vertex <code>v</code> with missing (null) distances,
- * <code>v</code>'s score should ignore the missing values or be set to 'null'.
- * Defaults to 'true'.
- */
- protected boolean ignore_missing;
-
- /**
- * Specifies whether the values returned should ignore self-distances
- * (distances from <code>v</code> to itself).
- * Defaults to 'true'.
- */
- protected boolean ignore_self_distances;
-
- /**
- * Creates an instance with the specified graph, distance metric, and
- * averaging behavior.
- *
- * @param graph The graph on which the vertex scores are to be calculated.
- * @param distance The metric to use for specifying the distance between
- * pairs of vertices.
- * @param averaging Specifies whether the values returned is the sum of all
- * v-distances or the mean v-distance.
- * @param ignore_missing Specifies whether scores for missing distances
- * are to ignore missing distances or be set to null.
- * @param ignore_self_distances Specifies whether distances from a vertex
- * to itself should be included in its score.
- */
- public DistanceCentralityScorer(Hypergraph<V,E> graph, Distance<V> distance,
- boolean averaging, boolean ignore_missing,
- boolean ignore_self_distances)
- {
- this.graph = graph;
- this.distance = distance;
- this.averaging = averaging;
- this.ignore_missing = ignore_missing;
- this.ignore_self_distances = ignore_self_distances;
- this.output = new HashMap<V, Double>();
- }
-
- /**
- * Equivalent to <code>this(graph, distance, averaging, true, true)</code>.
- *
- * @param graph The graph on which the vertex scores are to be calculated.
- * @param distance The metric to use for specifying the distance between
- * pairs of vertices.
- * @param averaging Specifies whether the values returned is the sum of all
- * v-distances or the mean v-distance.
- */
- public DistanceCentralityScorer(Hypergraph<V,E> graph, Distance<V> distance,
- boolean averaging)
- {
- this(graph, distance, averaging, true, true);
- }
-
- /**
- * Creates an instance with the specified graph and averaging behavior
- * whose vertex distances are calculated based on the specified edge
- * weights.
- *
- * @param graph The graph on which the vertex scores are to be
- * calculated.
- * @param edge_weights The edge weights to use for specifying the distance
- * between pairs of vertices.
- * @param averaging Specifies whether the values returned is the sum of
- * all v-distances or the mean v-distance.
- * @param ignore_missing Specifies whether scores for missing distances
- * are to ignore missing distances or be set to null.
- * @param ignore_self_distances Specifies whether distances from a vertex
- * to itself should be included in its score.
- */
- public DistanceCentralityScorer(Hypergraph<V,E> graph,
- Transformer<E, ? extends Number> edge_weights, boolean averaging,
- boolean ignore_missing, boolean ignore_self_distances)
- {
- this(graph, new DijkstraDistance<V,E>(graph, edge_weights), averaging,
- ignore_missing, ignore_self_distances);
- }
-
- /**
- * Equivalent to <code>this(graph, edge_weights, averaging, true, true)</code>.
- * @param graph The graph on which the vertex scores are to be
- * calculated.
- * @param edge_weights The edge weights to use for specifying the distance
- * between pairs of vertices.
- * @param averaging Specifies whether the values returned is the sum of
- * all v-distances or the mean v-distance.
- */
- public DistanceCentralityScorer(Hypergraph<V,E> graph,
- Transformer<E, ? extends Number> edge_weights, boolean averaging)
- {
- this(graph, new DijkstraDistance<V,E>(graph, edge_weights), averaging,
- true, true);
- }
-
- /**
- * Creates an instance with the specified graph and averaging behavior
- * whose vertex distances are calculated on the unweighted graph.
- *
- * @param graph The graph on which the vertex scores are to be
- * calculated.
- * @param averaging Specifies whether the values returned is the sum of
- * all v-distances or the mean v-distance.
- * @param ignore_missing Specifies whether scores for missing distances
- * are to ignore missing distances or be set to null.
- * @param ignore_self_distances Specifies whether distances from a vertex
- * to itself should be included in its score.
- */
- public DistanceCentralityScorer(Hypergraph<V,E> graph, boolean averaging,
- boolean ignore_missing, boolean ignore_self_distances)
- {
- this(graph, new UnweightedShortestPath<V,E>(graph), averaging,
- ignore_missing, ignore_self_distances);
- }
-
- /**
- * Equivalent to <code>this(graph, averaging, true, true)</code>.
- * @param graph The graph on which the vertex scores are to be
- * calculated.
- * @param averaging Specifies whether the values returned is the sum of
- * all v-distances or the mean v-distance.
- */
- public DistanceCentralityScorer(Hypergraph<V,E> graph, boolean averaging)
- {
- this(graph, new UnweightedShortestPath<V,E>(graph), averaging, true, true);
- }
-
- /**
- * Calculates the score for the specified vertex. Returns {@code null} if
- * there are missing distances and such are not ignored by this instance.
- */
- public Double getVertexScore(V v)
- {
- Double value = output.get(v);
- if (value != null)
- {
- if (value < 0)
- return null;
- return value;
- }
-
- Map<V, Number> v_distances = new HashMap<V, Number>(distance.getDistanceMap(v));
- if (ignore_self_distances)
- v_distances.remove(v);
-
- // if we don't ignore missing distances and there aren't enough
- // distances, output null (shortcut)
- if (!ignore_missing)
- {
- int num_dests = graph.getVertexCount() -
- (ignore_self_distances ? 1 : 0);
- if (v_distances.size() != num_dests)
- {
- output.put(v, -1.0);
- return null;
- }
- }
-
- Double sum = 0.0;
- for (V w : graph.getVertices())
- {
- if (w.equals(v) && ignore_self_distances)
- continue;
- Number w_distance = v_distances.get(w);
- if (w_distance == null)
- if (ignore_missing)
- continue;
- else
- {
- output.put(v, -1.0);
- return null;
- }
- else
- sum += w_distance.doubleValue();
- }
- value = sum;
- if (averaging)
- value /= v_distances.size();
-
- double score = value == 0 ?
- Double.POSITIVE_INFINITY :
- 1.0 / value;
- output.put(v, score);
-
- return score;
- }
-}
+++ /dev/null
-/*
- * Created on Jul 6, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-
-/**
- * An interface for algorithms that assign scores to edges.
- *
- * @param <E> the edge type
- * @param <S> the score type
- */
-public interface EdgeScorer<E, S>
-{
- /**
- * Returns the algorithm's score for this edge.
- * @return the algorithm's score for this edge
- */
- public S getEdgeScore(E e);
-}
+++ /dev/null
-/*
- * Created on Jul 12, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Calculates eigenvector centrality for each vertex in the graph.
- * The 'eigenvector centrality' for a vertex is defined as the fraction of
- * time that a random walk(er) will spend at that vertex over an infinite
- * time horizon.
- * Assumes that the graph is strongly connected.
- */
-public class EigenvectorCentrality<V,E> extends PageRank<V,E>
-{
- /**
- * Creates an instance with the specified graph and edge weights.
- * The outgoing edge weights for each edge must sum to 1.
- * (See <code>UniformDegreeWeight</code> for one way to handle this for
- * undirected graphs.)
- * @param graph the graph for which the centrality is to be calculated
- * @param edge_weights the edge weights
- */
- public EigenvectorCentrality(Hypergraph<V,E> graph,
- Transformer<E, ? extends Number> edge_weights)
- {
- super(graph, edge_weights, 0);
- acceptDisconnectedGraph(false);
- }
-
- /**
- * Creates an instance with the specified graph and default edge weights.
- * (Default edge weights: <code>UniformDegreeWeight</code>.)
- * @param graph the graph for which the centrality is to be calculated.
- */
- public EigenvectorCentrality(Hypergraph<V,E> graph)
- {
- super(graph, 0);
- acceptDisconnectedGraph(false);
- }
-}
+++ /dev/null
-/*
- * Created on Jul 15, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import edu.uci.ics.jung.algorithms.scoring.util.ScoringUtils;
-import edu.uci.ics.jung.graph.Graph;
-
-import org.apache.commons.collections15.Transformer;
-
-/**
- * Assigns hub and authority scores to each vertex depending on the topology of
- * the network. The essential idea is that a vertex is a hub to the extent
- * that it links to authoritative vertices, and is an authority to the extent
- * that it links to 'hub' vertices.
- *
- * <p>The classic HITS algorithm essentially proceeds as follows:
- * <pre>
- * assign equal initial hub and authority values to each vertex
- * repeat
- * for each vertex w:
- * w.hub = sum over successors x of x.authority
- * w.authority = sum over predecessors v of v.hub
- * normalize hub and authority scores so that the sum of the squares of each = 1
- * until scores converge
- * </pre>
- *
- * HITS is somewhat different from random walk/eigenvector-based algorithms
- * such as PageRank in that:
- * <ul>
- * <li/>there are two mutually recursive scores being calculated, rather than
- * a single value
- * <li/>the edge weights are effectively all 1, i.e., they can't be interpreted
- * as transition probabilities. This means that the more inlinks and outlinks
- * that a vertex has, the better, since adding an inlink (or outlink) does
- * not dilute the influence of the other inlinks (or outlinks) as in
- * random walk-based algorithms.
- * <li/>the scores cannot be interpreted as posterior probabilities (due to the different
- * normalization)
- * </ul>
- *
- * This implementation has the classic behavior by default. However, it has
- * been generalized somewhat so that it can act in a more "PageRank-like" fashion:
- * <ul>
- * <li/>this implementation has an optional 'random jump probability' parameter analogous
- * to the 'alpha' parameter used by PageRank. Varying this value between 0 and 1
- * allows the user to vary between the classic HITS behavior and one in which the
- * scores are smoothed to a uniform distribution.
- * The default value for this parameter is 0 (no random jumps possible).
- * <li/>the edge weights can be set to anything the user likes, and in
- * particular they can be set up (e.g. using <code>UniformDegreeWeight</code>)
- * so that the weights of the relevant edges incident to a vertex sum to 1.
- * <li/>The vertex score normalization has been factored into its own method
- * so that it can be overridden by a subclass. Thus, for example,
- * since the vertices' values are set to sum to 1 initially, if the weights of the
- * relevant edges incident to a vertex sum to 1, then the vertices' values
- * will continue to sum to 1 if the "sum-of-squares" normalization code
- * is overridden to a no-op. (Other normalization methods may also be employed.)
- * </ul>
- *
- * @param <V> the vertex type
- * @param <E> the edge type
- *
- * @see "'Authoritative sources in a hyperlinked environment' by Jon Kleinberg, 1997"
- */
-public class HITS<V,E> extends HITSWithPriors<V,E>
-{
-
- /**
- * Creates an instance for the specified graph, edge weights, and alpha
- * (random jump probability) parameter.
- * @param g the input graph
- * @param edge_weights the weights to use for each edge
- * @param alpha the probability of a hub giving some authority to all vertices,
- * and of an authority increasing the score of all hubs (not just those connected
- * via links)
- */
- public HITS(Graph<V,E> g, Transformer<E, Double> edge_weights, double alpha)
- {
- super(g, edge_weights, ScoringUtils.getHITSUniformRootPrior(g.getVertices()), alpha);
- }
-
- /**
- * Creates an instance for the specified graph and alpha (random jump probability)
- * parameter. The edge weights are all set to 1.
- * @param g the input graph
- * @param alpha the probability of a hub giving some authority to all vertices,
- * and of an authority increasing the score of all hubs (not just those connected
- * via links)
- */
- public HITS(Graph<V,E> g, double alpha)
- {
- super(g, ScoringUtils.getHITSUniformRootPrior(g.getVertices()), alpha);
- }
-
- /**
- * Creates an instance for the specified graph. The edge weights are all set to 1
- * and alpha is set to 0.
- * @param g the input graph
- */
- public HITS(Graph<V,E> g)
- {
- this(g, 0.0);
- }
-
-
- /**
- * Maintains hub and authority score information for a vertex.
- */
- public static class Scores
- {
- /**
- * The hub score for a vertex.
- */
- public double hub;
-
- /**
- * The authority score for a vertex.
- */
- public double authority;
-
- /**
- * Creates an instance with the specified hub and authority score.
- */
- public Scores(double hub, double authority)
- {
- this.hub = hub;
- this.authority = authority;
- }
-
- @Override
- public String toString()
- {
- return String.format("[h:%.4f,a:%.4f]", this.hub, this.authority);
- }
- }
-}
+++ /dev/null
-/*
- * Created on Jul 14, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * A generalization of HITS that permits non-uniformly-distributed random jumps.
- * The 'vertex_priors' (that is, prior probabilities for each vertex) may be
- * thought of as the fraction of the total 'potential' (hub or authority score)
- * that is assigned to that vertex out of the portion that is assigned according
- * to random jumps.
- *
- * @see "Algorithms for Estimating Relative Importance in Graphs by Scott White and Padhraic Smyth, 2003"
- */
-public class HITSWithPriors<V, E>
- extends AbstractIterativeScorerWithPriors<V,E,HITS.Scores>
-{
- /**
- * The sum of the potential, at each step, associated with vertices with no outedges (authority)
- * or no inedges (hub).
- */
- protected HITS.Scores disappearing_potential;
-
- /**
- * Creates an instance for the specified graph, edge weights, vertex prior probabilities,
- * and random jump probability (alpha).
- * @param g the input graph
- * @param edge_weights the edge weights
- * @param vertex_priors the prior probability for each vertex
- * @param alpha the probability of a random jump at each step
- */
- public HITSWithPriors(Hypergraph<V,E> g,
- Transformer<E, ? extends Number> edge_weights,
- Transformer<V, HITS.Scores> vertex_priors, double alpha)
- {
- super(g, edge_weights, vertex_priors, alpha);
- disappearing_potential = new HITS.Scores(0,0);
- }
-
- /**
- * Creates an instance for the specified graph, vertex priors, and random
- * jump probability (alpha). The edge weights default to 1.0.
- * @param g the input graph
- * @param vertex_priors the prior probability for each vertex
- * @param alpha the probability of a random jump at each step
- */
- @SuppressWarnings("unchecked")
- public HITSWithPriors(Hypergraph<V,E> g,
- Transformer<V, HITS.Scores> vertex_priors, double alpha)
- {
- super(g, new ConstantTransformer(1.0), vertex_priors, alpha);
- disappearing_potential = new HITS.Scores(0,0);
- }
-
- /**
- * Updates the value for this vertex.
- */
- @Override
- protected double update(V v)
- {
- collectDisappearingPotential(v);
-
- double v_auth = 0;
- for (E e : graph.getInEdges(v))
- {
- int incident_count = getAdjustedIncidentCount(e);
- for (V w : graph.getIncidentVertices(e))
- {
- if (!w.equals(v) || hyperedges_are_self_loops)
- v_auth += (getCurrentValue(w).hub *
- getEdgeWeight(w,e).doubleValue() / incident_count);
- }
-// V w = graph.getOpposite(v, e);
-// auth += (getCurrentValue(w).hub * getEdgeWeight(w, e).doubleValue());
- }
-
- double v_hub = 0;
- for (E e : graph.getOutEdges(v))
- {
- int incident_count = getAdjustedIncidentCount(e);
- for (V w : graph.getIncidentVertices(e))
- {
- if (!w.equals(v) || hyperedges_are_self_loops)
- v_hub += (getCurrentValue(w).authority *
- getEdgeWeight(w,e).doubleValue() / incident_count);
- }
-// V x = graph.getOpposite(v,e);
-// hub += (getCurrentValue(x).authority * getEdgeWeight(x, e).doubleValue());
- }
-
- // modify total_input according to alpha
- if (alpha > 0)
- {
- v_auth = v_auth * (1 - alpha) + getVertexPrior(v).authority * alpha;
- v_hub = v_hub * (1 - alpha) + getVertexPrior(v).hub * alpha;
- }
- setOutputValue(v, new HITS.Scores(v_hub, v_auth));
-
- return Math.max(Math.abs(getCurrentValue(v).hub - v_hub),
- Math.abs(getCurrentValue(v).authority - v_auth));
- }
-
- /**
- * Code which is executed after each step. In this case, deals with the
- * 'disappearing potential', normalizes the scores, and then calls
- * <code>super.afterStep()</code>.
- * @see #collectDisappearingPotential(Object)
- */
- @Override
- protected void afterStep()
- {
- if (disappearing_potential.hub > 0 || disappearing_potential.authority > 0)
- {
- for (V v : graph.getVertices())
- {
- double new_hub = getOutputValue(v).hub +
- (1 - alpha) * (disappearing_potential.hub * getVertexPrior(v).hub);
- double new_auth = getOutputValue(v).authority +
- (1 - alpha) * (disappearing_potential.authority * getVertexPrior(v).authority);
- setOutputValue(v, new HITS.Scores(new_hub, new_auth));
- }
- disappearing_potential.hub = 0;
- disappearing_potential.authority = 0;
- }
-
- normalizeScores();
-
- super.afterStep();
- }
-
- /**
- * Normalizes scores so that sum of their squares = 1.
- * This method may be overridden so as to yield different
- * normalizations.
- */
- protected void normalizeScores() {
- double hub_ssum = 0;
- double auth_ssum = 0;
- for (V v : graph.getVertices())
- {
- double hub_val = getOutputValue(v).hub;
- double auth_val = getOutputValue(v).authority;
- hub_ssum += (hub_val * hub_val);
- auth_ssum += (auth_val * auth_val);
- }
-
- hub_ssum = Math.sqrt(hub_ssum);
- auth_ssum = Math.sqrt(auth_ssum);
-
- for (V v : graph.getVertices())
- {
- HITS.Scores values = getOutputValue(v);
- setOutputValue(v, new HITS.Scores(
- values.hub / hub_ssum,
- values.authority / auth_ssum));
- }
- }
-
- /**
- * Collects the "disappearing potential" associated with vertices that have either
- * no incoming edges, no outgoing edges, or both. Vertices that have no incoming edges
- * do not directly contribute to the hub scores of other vertices; similarly, vertices
- * that have no outgoing edges do not directly contribute to the authority scores of
- * other vertices. These values are collected at each step and then distributed across all vertices
- * as a part of the normalization process. (This process is not required for, and does
- * not affect, the 'sum-of-squares'-style normalization.)
- */
- @Override
- protected void collectDisappearingPotential(V v)
- {
- if (graph.outDegree(v) == 0)
- {
- if (isDisconnectedGraphOK())
- disappearing_potential.hub += getCurrentValue(v).authority;
- else
- throw new IllegalArgumentException("Outdegree of " + v + " must be > 0");
- }
- if (graph.inDegree(v) == 0)
- {
- if (isDisconnectedGraphOK())
- disappearing_potential.authority += getCurrentValue(v).hub;
- else
- throw new IllegalArgumentException("Indegree of " + v + " must be > 0");
- }
- }
-
-}
+++ /dev/null
-/**
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- * Created on Aug 22, 2008
- *
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.util.ScoringUtils;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * A special case of {@code PageRankWithPriors} in which the final scores
- * represent a probability distribution over position assuming a random (Markovian)
- * walk of exactly k steps, based on the initial distribution specified by the priors.
- *
- * <p><b>NOTE</b>: The version of {@code KStepMarkov} in {@code algorithms.importance}
- * (and in JUNG 1.x) is believed to be incorrect: rather than returning
- * a score which represents a probability distribution over position assuming
- * a k-step random walk, it returns a score which represents the sum over all steps
- * of the probability for each step. If you want that behavior, set the
- * 'cumulative' flag as follows <i>before calling {@code evaluate()}</i>:
- * <pre>
- * KStepMarkov ksm = new KStepMarkov(...);
- * ksm.setCumulative(true);
- * ksm.evaluate();
- * </pre>
- *
- * By default, the 'cumulative' flag is set to false.
- *
- * NOTE: THIS CLASS IS NOT YET COMPLETE. USE AT YOUR OWN RISK. (The original behavior
- * is captured by the version still available in {@code algorithms.importance}.)
- *
- * @see "Algorithms for Estimating Relative Importance in Graphs by Scott White and Padhraic Smyth, 2003"
- * @see PageRank
- * @see PageRankWithPriors
- */
-public class KStepMarkov<V,E> extends PageRankWithPriors<V,E>
-{
- private boolean cumulative;
-
- /**
- * Creates an instance based on the specified graph, edge weights, vertex
- * priors (initial scores), and number of steps to take.
- * @param graph the input graph
- * @param edge_weights the edge weights (transition probabilities)
- * @param vertex_priors the initial probability distribution (score assignment)
- * @param steps the number of times that {@code step()} will be called by {@code evaluate}
- */
- public KStepMarkov(Hypergraph<V,E> graph, Transformer<E, ? extends Number> edge_weights,
- Transformer<V, Double> vertex_priors, int steps)
- {
- super(graph, edge_weights, vertex_priors, 0);
- initialize(steps);
- }
-
- /**
- * Creates an instance based on the specified graph, vertex
- * priors (initial scores), and number of steps to take. The edge
- * weights (transition probabilities) are set to default values (a uniform
- * distribution over all outgoing edges).
- * @param graph the input graph
- * @param vertex_priors the initial probability distribution (score assignment)
- * @param steps the number of times that {@code step()} will be called by {@code evaluate}
- */
- public KStepMarkov(Hypergraph<V,E> graph, Transformer<V, Double> vertex_priors, int steps)
- {
- super(graph, vertex_priors, 0);
- initialize(steps);
- }
-
- /**
- * Creates an instance based on the specified graph and number of steps to
- * take. The edge weights (transition probabilities) and vertex initial scores
- * (prior probabilities) are set to default values (a uniform
- * distribution over all outgoing edges, and a uniform distribution over
- * all vertices, respectively).
- * @param graph the input graph
- * @param steps the number of times that {@code step()} will be called by {@code evaluate}
- */
- public KStepMarkov(Hypergraph<V,E> graph, int steps)
- {
- super(graph, ScoringUtils.getUniformRootPrior(graph.getVertices()), 0);
- initialize(steps);
- }
-
- private void initialize(int steps)
- {
- this.acceptDisconnectedGraph(false);
-
- if (steps <= 0)
- throw new IllegalArgumentException("Number of steps must be > 0");
-
- this.max_iterations = steps;
- this.tolerance = -1.0;
-
- this.cumulative = false;
- }
-
- /**
- * Specifies whether this instance should assign a score to each vertex
- * based on the
- * @param cumulative
- */
- public void setCumulative(boolean cumulative)
- {
- this.cumulative = cumulative;
- }
-
- /**
- * Updates the value for this vertex. Called by <code>step()</code>.
- */
- @Override
- public double update(V v)
- {
- if (!cumulative)
- return super.update(v);
-
- collectDisappearingPotential(v);
-
- double v_input = 0;
- for (E e : graph.getInEdges(v))
- {
- // For graphs, the code below is equivalent to
-// V w = graph.getOpposite(v, e);
-// total_input += (getCurrentValue(w) * getEdgeWeight(w,e).doubleValue());
- // For hypergraphs, this divides the potential coming from w
- // by the number of vertices in the connecting edge e.
- int incident_count = getAdjustedIncidentCount(e);
- for (V w : graph.getIncidentVertices(e))
- {
- if (!w.equals(v) || hyperedges_are_self_loops)
- v_input += (getCurrentValue(w) *
- getEdgeWeight(w,e).doubleValue() / incident_count);
- }
- }
-
- // modify total_input according to alpha
- double new_value = alpha > 0 ?
- v_input * (1 - alpha) + getVertexPrior(v) * alpha :
- v_input;
- setOutputValue(v, new_value + getCurrentValue(v));
-
- // FIXME: DO WE NEED TO CHANGE HOW DISAPPEARING IS COUNTED? NORMALIZE?
-
- return Math.abs(getCurrentValue(v) - new_value);
- }
-
-}
+++ /dev/null
-/*
- * Created on Jul 12, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.util.ScoringUtils;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Assigns scores to each vertex according to the PageRank algorithm.
- *
- * <p>PageRank is an eigenvector-based algorithm. The score for a given vertex may be thought of
- * as the fraction of time spent 'visiting' that vertex (measured over all time)
- * in a random walk over the vertices (following outgoing edges from each vertex).
- * PageRank modifies this random walk by adding to the model a probability (specified as 'alpha'
- * in the constructor) of jumping to any vertex. If alpha is 0, this is equivalent to the
- * eigenvector centrality algorithm; if alpha is 1, all vertices will receive the same score
- * (1/|V|). Thus, alpha acts as a sort of score smoothing parameter.
- *
- * <p>The original algorithm assumed that, for a given vertex, the probability of following any
- * outgoing edge was the same; this is the default if edge weights are not specified.
- * This implementation generalizes the original by permitting
- * the user to specify edge weights; in order to maintain the original semantics, however,
- * the weights on the outgoing edges for a given vertex must represent transition probabilities;
- * that is, they must sum to 1.
- *
- * <p>If a vertex has no outgoing edges, then the probability of taking a random jump from that
- * vertex is (by default) effectively 1. If the user wishes to instead throw an exception when this happens,
- * call <code>acceptDisconnectedGraph(false)</code> on this instance.
- *
- * <p>Typical values for alpha (according to the original paper) are in the range [0.1, 0.2]
- * but may be any value between 0 and 1 inclusive.
- *
- * @see "The Anatomy of a Large-Scale Hypertextual Web Search Engine by L. Page and S. Brin, 1999"
- */
-public class PageRank<V,E> extends PageRankWithPriors<V,E>
-{
-
- /**
- * Creates an instance for the specified graph, edge weights, and random jump probability.
- * @param graph the input graph
- * @param edge_weight the edge weights (transition probabilities)
- * @param alpha the probability of taking a random jump to an arbitrary vertex
- */
- public PageRank(Hypergraph<V,E> graph, Transformer<E, ? extends Number> edge_weight, double alpha)
- {
- super(graph, edge_weight, ScoringUtils.getUniformRootPrior(graph.getVertices()), alpha);
- }
-
- /**
- * Creates an instance for the specified graph and random jump probability; the probability
- * of following any outgoing edge from a given vertex is the same.
- * @param graph the input graph
- * @param alpha the probability of taking a random jump to an arbitrary vertex
- */
- public PageRank(Hypergraph<V,E> graph, double alpha)
- {
- super(graph, ScoringUtils.getUniformRootPrior(graph.getVertices()), alpha);
- }
-}
+++ /dev/null
-/*
- * Created on Jul 6, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.util.UniformDegreeWeight;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * A generalization of PageRank that permits non-uniformly-distributed random jumps.
- * The 'vertex_priors' (that is, prior probabilities for each vertex) may be
- * thought of as the fraction of the total 'potential' that is assigned to that
- * vertex at each step out of the portion that is assigned according
- * to random jumps (this portion is specified by 'alpha').
- *
- * @see "Algorithms for Estimating Relative Importance in Graphs by Scott White and Padhraic Smyth, 2003"
- * @see PageRank
- */
-public class PageRankWithPriors<V, E>
- extends AbstractIterativeScorerWithPriors<V,E,Double>
-{
- /**
- * Maintains the amount of potential associated with vertices with no out-edges.
- */
- protected double disappearing_potential = 0.0;
-
- /**
- * Creates an instance with the specified graph, edge weights, vertex priors, and
- * 'random jump' probability (alpha).
- * @param graph the input graph
- * @param edge_weights the edge weights, denoting transition probabilities from source to destination
- * @param vertex_priors the prior probabilities for each vertex
- * @param alpha the probability of executing a 'random jump' at each step
- */
- public PageRankWithPriors(Hypergraph<V,E> graph,
- Transformer<E, ? extends Number> edge_weights,
- Transformer<V, Double> vertex_priors, double alpha)
- {
- super(graph, edge_weights, vertex_priors, alpha);
- }
-
- /**
- * Creates an instance with the specified graph, vertex priors, and
- * 'random jump' probability (alpha). The outgoing edge weights for each
- * vertex will be equal and sum to 1.
- * @param graph the input graph
- * @param vertex_priors the prior probabilities for each vertex
- * @param alpha the probability of executing a 'random jump' at each step
- */
- public PageRankWithPriors(Hypergraph<V,E> graph,
- Transformer<V, Double> vertex_priors, double alpha)
- {
- super(graph, vertex_priors, alpha);
- this.edge_weights = new UniformDegreeWeight<V,E>(graph);
- }
-
- /**
- * Updates the value for this vertex. Called by <code>step()</code>.
- */
- @Override
- public double update(V v)
- {
- collectDisappearingPotential(v);
-
- double v_input = 0;
- for (E e : graph.getInEdges(v))
- {
- // For graphs, the code below is equivalent to
-// V w = graph.getOpposite(v, e);
-// total_input += (getCurrentValue(w) * getEdgeWeight(w,e).doubleValue());
- // For hypergraphs, this divides the potential coming from w
- // by the number of vertices in the connecting edge e.
- int incident_count = getAdjustedIncidentCount(e);
- for (V w : graph.getIncidentVertices(e))
- {
- if (!w.equals(v) || hyperedges_are_self_loops)
- v_input += (getCurrentValue(w) *
- getEdgeWeight(w,e).doubleValue() / incident_count);
- }
- }
-
- // modify total_input according to alpha
- double new_value = alpha > 0 ?
- v_input * (1 - alpha) + getVertexPrior(v) * alpha :
- v_input;
- setOutputValue(v, new_value);
-
- return Math.abs(getCurrentValue(v) - new_value);
- }
-
- /**
- * Cleans up after each step. In this case that involves allocating the disappearing
- * potential (thus maintaining normalization of the scores) according to the vertex
- * probability priors, and then calling
- * <code>super.afterStep</code>.
- */
- @Override
- protected void afterStep()
- {
- // distribute disappearing potential according to priors
- if (disappearing_potential > 0)
- {
- for (V v : graph.getVertices())
- {
- setOutputValue(v, getOutputValue(v) +
- (1 - alpha) * (disappearing_potential * getVertexPrior(v)));
- }
- disappearing_potential = 0;
- }
-
- super.afterStep();
- }
-
- /**
- * Collects the "disappearing potential" associated with vertices that have
- * no outgoing edges. Vertices that have no outgoing edges do not directly
- * contribute to the scores of other vertices. These values are collected
- * at each step and then distributed across all vertices
- * as a part of the normalization process.
- */
- @Override
- protected void collectDisappearingPotential(V v)
- {
- if (graph.outDegree(v) == 0)
- {
- if (isDisconnectedGraphOK())
- disappearing_potential += getCurrentValue(v);
- else
- throw new IllegalArgumentException("Outdegree of " + v + " must be > 0");
- }
- }
-}
+++ /dev/null
-/*
- * Created on Jul 6, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-
-/**
- * An interface for algorithms that assign scores to vertices.
- *
- * @param <V> the vertex type
- * @param <S> the score type
- */
-public interface VertexScorer<V, S>
-{
- /**
- * Returns the algorithm's score for this vertex.
- * @return the algorithm's score for this vertex
- */
- public S getVertexScore(V v);
-}
+++ /dev/null
-/*
- * Created on Jul 15, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.util.UniformDegreeWeight;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Assigns scores to vertices according to their 'voltage' in an approximate
- * solution to the Kirchoff equations. This is accomplished by tying "source"
- * vertices to specified positive voltages, "sink" vertices to 0 V, and
- * iteratively updating the voltage of each other vertex to the (weighted)
- * average of the voltages of its neighbors.
- *
- * <p>The resultant voltages will all be in the range <code>[0, max]</code>
- * where <code>max</code> is the largest voltage of any source vertex (in the
- * absence of negative source voltages; see below).
- *
- * <p>A few notes about this algorithm's interpretation of the graph data:
- * <ul>
- * <li/>Higher edge weights are interpreted as indicative of greater
- * influence/effect than lower edge weights.
- * <li/>Negative edge weights (and negative "source" voltages) invalidate
- * the interpretation of the resultant values as voltages. However, this
- * algorithm will not reject graphs with negative edge weights or source voltages.
- * <li/>Parallel edges are equivalent to a single edge whose weight is the
- * sum of the weights on the parallel edges.
- * <li/>Current flows along undirected edges in both directions,
- * but only flows along directed edges in the direction of the edge.
- * </ul>
- * </p>
- */
-public class VoltageScorer<V, E> extends AbstractIterativeScorer<V, E, Double>
- implements VertexScorer<V, Double>
-{
- protected Map<V, ? extends Number> source_voltages;
- protected Collection<V> sinks;
-
- /**
- * Creates an instance with the specified graph, edge weights, source voltages,
- * and sinks.
- * @param g the input graph
- * @param edge_weights the edge weights, representing conductivity
- * @param source_voltages the (fixed) voltage for each source
- * @param sinks the vertices whose voltages are tied to 0
- */
- public VoltageScorer(Hypergraph<V, E> g, Transformer<E, ? extends Number> edge_weights,
- Map<V, ? extends Number> source_voltages, Collection<V> sinks)
- {
- super(g, edge_weights);
- this.source_voltages = source_voltages;
- this.sinks = sinks;
- initialize();
- }
-
- /**
- * Creates an instance with the specified graph, edge weights, source vertices
- * (each of whose 'voltages' are tied to 1), and sinks.
- * @param g the input graph
- * @param edge_weights the edge weights, representing conductivity
- * @param sources the vertices whose voltages are tied to 1
- * @param sinks the vertices whose voltages are tied to 0
- */
- public VoltageScorer(Hypergraph<V, E> g, Transformer<E, ? extends Number> edge_weights,
- Collection<V> sources, Collection<V> sinks)
- {
- super(g, edge_weights);
-
- Map<V, Double> unit_voltages = new HashMap<V, Double>();
- for(V v : sources)
- unit_voltages.put(v, new Double(1.0));
- this.source_voltages = unit_voltages;
- this.sinks = sinks;
- initialize();
- }
-
- /**
- * Creates an instance with the specified graph, source vertices
- * (each of whose 'voltages' are tied to 1), and sinks.
- * The outgoing edges for each vertex are assigned
- * weights that sum to 1.
- * @param g the input graph
- * @param sources the vertices whose voltages are tied to 1
- * @param sinks the vertices whose voltages are tied to 0
- */
- public VoltageScorer(Hypergraph<V, E> g, Collection<V> sources, Collection<V> sinks)
- {
- super(g);
-
- Map<V, Double> unit_voltages = new HashMap<V, Double>();
- for(V v : sources)
- unit_voltages.put(v, new Double(1.0));
- this.source_voltages = unit_voltages;
- this.sinks = sinks;
- initialize();
- }
-
- /**
- * Creates an instance with the specified graph, source voltages,
- * and sinks. The outgoing edges for each vertex are assigned
- * weights that sum to 1.
- * @param g the input graph
- * @param source_voltages the (fixed) voltage for each source
- * @param sinks the vertices whose voltages are tied to 0
- */
- public VoltageScorer(Hypergraph<V, E> g, Map<V, ? extends Number> source_voltages,
- Collection<V> sinks)
- {
- super(g);
- this.source_voltages = source_voltages;
- this.sinks = sinks;
- this.edge_weights = new UniformDegreeWeight<V,E>(g);
- initialize();
- }
-
- /**
- * Creates an instance with the specified graph, edge weights, source, and
- * sink. The source vertex voltage is tied to 1.
- * @param g the input graph
- * @param edge_weights the edge weights, representing conductivity
- * @param source the vertex whose voltage is tied to 1
- * @param sink the vertex whose voltage is tied to 0
- */
- public VoltageScorer(Hypergraph<V,E> g, Transformer<E, ? extends Number> edge_weights,
- V source, V sink)
- {
- this(g, edge_weights, Collections.singletonMap(source, 1.0), Collections.singletonList(sink));
- initialize();
- }
-
- /**
- * Creates an instance with the specified graph, edge weights, source, and
- * sink. The source vertex voltage is tied to 1.
- * The outgoing edges for each vertex are assigned
- * weights that sum to 1.
- * @param g the input graph
- * @param source the vertex whose voltage is tied to 1
- * @param sink the vertex whose voltage is tied to 0
- */
- public VoltageScorer(Hypergraph<V,E> g, V source, V sink)
- {
- this(g, Collections.singletonMap(source, 1.0), Collections.singletonList(sink));
- initialize();
- }
-
-
- /**
- * Initializes the state of this instance.
- */
- @Override
- public void initialize()
- {
- super.initialize();
-
- // sanity check
- if (source_voltages.isEmpty() || sinks.isEmpty())
- throw new IllegalArgumentException("Both sources and sinks (grounds) must be defined");
-
- if (source_voltages.size() + sinks.size() > graph.getVertexCount())
- throw new IllegalArgumentException("Source/sink sets overlap, or contain vertices not in graph");
-
- for (Map.Entry<V, ? extends Number> entry : source_voltages.entrySet())
- {
- V v = entry.getKey();
- if (sinks.contains(v))
- throw new IllegalArgumentException("Vertex " + v + " is incorrectly specified as both source and sink");
- double value = entry.getValue().doubleValue();
- if (value <= 0)
- throw new IllegalArgumentException("Source vertex " + v + " has negative voltage");
- }
-
- // set up initial voltages
- for (V v : graph.getVertices())
- {
- if (source_voltages.containsKey(v))
- setOutputValue(v, source_voltages.get(v).doubleValue());
- else
- setOutputValue(v, 0.0);
- }
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.scoring.AbstractIterativeScorer#update(Object)
- */
- @Override
- public double update(V v)
- {
- // if it's a voltage source or sink, we're done
- Number source_volts = source_voltages.get(v);
- if (source_volts != null)
- {
- setOutputValue(v, source_volts.doubleValue());
- return 0.0;
- }
- if (sinks.contains(v))
- {
- setOutputValue(v, 0.0);
- return 0.0;
- }
-
- Collection<E> edges = graph.getInEdges(v);
- double voltage_sum = 0;
- double weight_sum = 0;
- for (E e: edges)
- {
- int incident_count = getAdjustedIncidentCount(e);
- for (V w : graph.getIncidentVertices(e))
- {
- if (!w.equals(v) || hyperedges_are_self_loops)
- {
- double weight = getEdgeWeight(w,e).doubleValue() / incident_count;
- voltage_sum += getCurrentValue(w).doubleValue() * weight;
- weight_sum += weight;
- }
- }
-// V w = graph.getOpposite(v, e);
-// double weight = getEdgeWeight(w,e).doubleValue();
-// voltage_sum += getCurrentValue(w).doubleValue() * weight;
-// weight_sum += weight;
- }
-
- // if either is 0, new value is 0
- if (voltage_sum == 0 || weight_sum == 0)
- {
- setOutputValue(v, 0.0);
- return getCurrentValue(v).doubleValue();
- }
-
- setOutputValue(v, voltage_sum / weight_sum);
- return Math.abs(getCurrentValue(v).doubleValue() - voltage_sum / weight_sum);
- }
-
-}
-
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Mechanisms for assigning values (denoting significance, influence, centrality, etc.)
-to graph elements based on topological properties. These include:
-
-<ul>
-<li/><code>BarycenterScorer</code>: assigns a score to each vertex according to
-the sum of the distances to all other vertices
-<li/><code>ClosenessCentrality</code>: assigns a score to each vertex based on
-the mean distance to each other vertex
-<li/><code>DegreeScorer</code>: assigns a score to each vertex based on its degree
-<li/><code>EigenvectorCentrality</code>: assigns vertex scores based on
-long-term probabilities of random walks passing through the vertex at time t
-<li/><code>PageRank</code>: like <code>EigenvectorCentrality</code>, but with
-a constant probability of the
-random walk restarting at a uniform-randomly chosen vertex
-<li/><code>PageRankWithPriors</code>: like <code>PageRank</code>, but with a
-constant probability of the random
-walk restarting at a vertex drawn from an arbitrary distribution
-<li/><code>HITS</code>: assigns hubs-and-authorities scores to vertices based on
-complementary random walk processes
-<li/><code>HITSWithPriors</code>: analogous to <code>HITS</code>
-(see <code>PageRankWithPriors</code>)
-<li/><code>VoltageScorer</code>: assigns scores to vertices based on simulated
-current flow along edges
-</ul>
-
-</body>
-</html>
+++ /dev/null
-/*
- * Created on Jul 11, 2008
- *
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring.util;
-
-import org.apache.commons.collections15.Transformer;
-
-/**
- * A <code>Transformer<VEPair,Number></code> that delegates its operation to a
- * <code>Transformer<E,Number></code>. Mainly useful for technical reasons inside
- * AbstractIterativeScorer; in essence it allows the edge weight instance
- * variable to be of type <code>VEPair,W</code> even if the edge weight
- * <code>Transformer</code> only operates on edges.
- */
-public class DelegateToEdgeTransformer<V,E> implements
- Transformer<VEPair<V,E>,Number>
-{
- /**
- * The transformer to which this instance delegates its function.
- */
- protected Transformer<E,? extends Number> delegate;
-
- /**
- * Creates an instance with the specified delegate transformer.
- * @param delegate the Transformer to which this instance will delegate
- */
- public DelegateToEdgeTransformer(Transformer<E,? extends Number> delegate)
- {
- this.delegate = delegate;
- }
-
- /**
- * @see Transformer#transform(Object)
- */
- public Number transform(VEPair<V,E> arg0)
- {
- return delegate.transform(arg0.getE());
- }
-
-}
+++ /dev/null
-/*
- * Created on Jul 12, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring.util;
-
-import java.util.Collection;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.HITS;
-
-/**
- * Methods for assigning values (to be interpreted as prior probabilities) to vertices in the context
- * of random-walk-based scoring algorithms.
- */
-public class ScoringUtils
-{
- /**
- * Assigns a probability of 1/<code>roots.size()</code> to each of the elements of <code>roots</code>.
- * @param <V> the vertex type
- * @param roots the vertices to be assigned nonzero prior probabilities
- * @return
- */
- public static <V> Transformer<V, Double> getUniformRootPrior(Collection<V> roots)
- {
- final Collection<V> inner_roots = roots;
- Transformer<V, Double> distribution = new Transformer<V, Double>()
- {
- public Double transform(V input)
- {
- if (inner_roots.contains(input))
- return new Double(1.0 / inner_roots.size());
- else
- return 0.0;
- }
- };
-
- return distribution;
- }
-
- /**
- * Returns a Transformer that hub and authority values of 1/<code>roots.size()</code> to each
- * element of <code>roots</code>.
- * @param <V> the vertex type
- * @param roots the vertices to be assigned nonzero scores
- * @return a Transformer that assigns uniform prior hub/authority probabilities to each root
- */
- public static <V> Transformer<V, HITS.Scores> getHITSUniformRootPrior(Collection<V> roots)
- {
- final Collection<V> inner_roots = roots;
- Transformer<V, HITS.Scores> distribution =
- new Transformer<V, HITS.Scores>()
- {
- public HITS.Scores transform(V input)
- {
- if (inner_roots.contains(input))
- return new HITS.Scores(1.0 / inner_roots.size(), 1.0 / inner_roots.size());
- else
- return new HITS.Scores(0.0, 0.0);
- }
- };
- return distribution;
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- * Created on Jul 14, 2008
- *
- */
-package edu.uci.ics.jung.algorithms.scoring.util;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-
-/**
- * An edge weight function that assigns weights as uniform
- * transition probabilities.
- * For undirected edges, returns 1/degree(v) (where 'v' is the
- * vertex in the VEPair.
- * For directed edges, returns 1/outdegree(source(e)) (where 'e'
- * is the edge in the VEPair).
- * Throws an <code>IllegalArgumentException</code> if the input
- * edge is neither EdgeType.UNDIRECTED nor EdgeType.DIRECTED.
- *
- */
-public class UniformDegreeWeight<V, E> implements
- Transformer<VEPair<V, E>, Double>
-{
- private Hypergraph<V, E> graph;
-
- /**
- * Creates an instance for the specified graph.
- */
- public UniformDegreeWeight(Hypergraph<V, E> graph)
- {
- this.graph = graph;
- }
-
- /**
- * @see org.apache.commons.collections15.Transformer#transform(java.lang.Object)
- */
- public Double transform(VEPair<V, E> ve_pair)
- {
- E e = ve_pair.getE();
- V v = ve_pair.getV();
- EdgeType edge_type = graph.getEdgeType(e);
- if (edge_type == EdgeType.UNDIRECTED)
- return 1.0 / graph.degree(v);
- if (edge_type == EdgeType.DIRECTED)
- return 1.0 / graph.outDegree(graph.getSource(e));
- throw new IllegalArgumentException("can't handle edge type: " + edge_type);
- }
-
-}
+++ /dev/null
-/*
- * Created on Jul 11, 2008
- *
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring.util;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-
-/**
- * Assigns weights to directed edges (the edge of the vertex/edge pair) depending on
- * whether the vertex is the edge's source or its destination.
- * If the vertex v is the edge's source, assigns 1/outdegree(v).
- * Otherwise, assigns 1/indegree(w).
- * Throws <code>IllegalArgumentException</code> if the edge is not directed.
- */
-public class UniformInOut<V,E> implements Transformer<VEPair<V,E>, Double>
-{
- /**
- * The graph for which the edge weights are defined.
- */
- protected Graph<V,E> graph;
-
- /**
- * Creates an instance for the specified graph.
- * @param graph the graph for which the edge weights will be defined
- */
- public UniformInOut(Graph<V,E> graph)
- {
- this.graph = graph;
- }
-
- /**
- * @see org.apache.commons.collections15.Transformer#transform(Object)
- * @throws IllegalArgumentException
- */
- public Double transform(VEPair<V,E> ve_pair)
- {
- V v = ve_pair.getV();
- E e = ve_pair.getE();
- if (graph.getEdgeType(e) != EdgeType.DIRECTED)
- throw new IllegalArgumentException("This transformer only" +
- " operates on directed edges");
- return 1.0 / (graph.isSource(v, e) ?
- graph.outDegree(v) :
- graph.inDegree(v));
- }
-}
+++ /dev/null
-/*
- * Created on Jul 8, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring.util;
-
-/**
- * Convenience class for associating a vertex and an edge. Used, for example,
- * in contexts in which it is necessary to know the origin for an edge traversal
- * (that is, the direction in which an (undirected) edge is being traversed).
- *
- * @param <V> the vertex type
- * @param <E> the edge type
- */
-public class VEPair<V, E>
-{
- private V v;
- private E e;
-
- /**
- * Creates an instance with the specified vertex and edge
- * @param v the vertex to add
- * @param e the edge to add
- */
- public VEPair(V v, E e)
- {
- if (v == null || e == null)
- throw new IllegalArgumentException("elements must be non-null");
-
- this.v = v;
- this.e = e;
- }
-
- /**
- * Returns the vertex of this pair.
- */
- public V getV()
- {
- return v;
- }
-
- /**
- * Returns the edge of this pair.
- */
- public E getE()
- {
- return e;
- }
-}
+++ /dev/null
-/*
- * Created on Jul 18, 2008
- *
- * Copyright (c) 2008, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.scoring.util;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.VertexScorer;
-
-/**
- * A Transformer convenience wrapper around VertexScorer.
- */
-public class VertexScoreTransformer<V, S> implements Transformer<V, S>
-{
- /**
- * The VertexScorer instance that provides the values returned by <code>transform</code>.
- */
- protected VertexScorer<V,S> vs;
-
- /**
- * Creates an instance based on the specified VertexScorer.
- */
- public VertexScoreTransformer(VertexScorer<V,S> vs)
- {
- this.vs = vs;
- }
-
- /**
- * Returns the score for this vertex.
- */
- public S transform(V v)
- {
- return vs.getVertexScore(v);
- }
-
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Utility functions for assigning scores to graph elements. These include:
-<ul>
-<li/><code>EdgeWeight</code>: interface for classes that associate numeric values
-with edges
-<li/><code>ScoringUtils</code>: methods for calculating transition probabilities
-for random-walk-based algorithms.
-<li/><code>UniformOut</code>: an edge weight function that assigns weights as uniform
-transition probabilities to all outgoing edges of a vertex.
-<li/><code>UniformIncident</code>: an edge weight function that assigns
-weights as uniform transition probabilities to all incident edges of a
-vertex (useful for undirected graphs).
-<li/><code>VEPair</code>: analogous to <code>Pair</code> but specifically
-containing an associated vertex and edge.
-<li/><code>VertexEdgeWeight</code>: a subtype of <code>EdgeWeight</code> that
-assigns edge weights with respect to a specified 'source' vertex.
-</ul>
-
-</body>
-</html>
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Labels each node in the graph according to the BFS distance from the start node(s). If nodes are unreachable, then
- * they are assigned a distance of -1.
- * All nodes traversed at step k are marked as predecessors of their successors traversed at step k+1.
- * <p>
- * Running time is: O(m)
- * @author Scott White
- */
-public class BFSDistanceLabeler<V, E> {
-
- private Map<V, Number> distanceDecorator = new HashMap<V,Number>();
- private List<V> mCurrentList;
- private Set<V> mUnvisitedVertices;
- private List<V> mVerticesInOrderVisited;
- private Map<V,HashSet<V>> mPredecessorMap;
-
- /**
- * Creates a new BFS labeler for the specified graph and root set
- * The distances are stored in the corresponding Vertex objects and are of type MutableInteger
- */
- public BFSDistanceLabeler() {
- mPredecessorMap = new HashMap<V,HashSet<V>>();
- }
-
- /**
- * Returns the list of vertices visited in order of traversal
- * @return the list of vertices
- */
- public List<V> getVerticesInOrderVisited() {
- return mVerticesInOrderVisited;
- }
-
- /**
- * Returns the set of all vertices that were not visited
- * @return the list of unvisited vertices
- */
- public Set<V> getUnvisitedVertices() {
- return mUnvisitedVertices;
- }
-
- /**
- * Given a vertex, returns the shortest distance from any node in the root set to v
- * @param v the vertex whose distance is to be retrieved
- * @return the shortest distance from any node in the root set to v
- */
- public int getDistance(Hypergraph<V,E> g, V v) {
- if (!g.getVertices().contains(v)) {
- throw new IllegalArgumentException("Vertex is not contained in the graph.");
- }
-
- return distanceDecorator.get(v).intValue();
- }
-
- /**
- * Returns set of predecessors of the given vertex
- * @param v the vertex whose predecessors are to be retrieved
- * @return the set of predecessors
- */
- public Set<V> getPredecessors(V v) {
- return mPredecessorMap.get(v);
- }
-
- protected void initialize(Hypergraph<V,E> g, Set<V> rootSet) {
- mVerticesInOrderVisited = new ArrayList<V>();
- mUnvisitedVertices = new HashSet<V>();
- for(V currentVertex : g.getVertices()) {
- mUnvisitedVertices.add(currentVertex);
- mPredecessorMap.put(currentVertex,new HashSet<V>());
- }
-
- mCurrentList = new ArrayList<V>();
- for(V v : rootSet) {
- distanceDecorator.put(v, new Integer(0));
- mCurrentList.add(v);
- mUnvisitedVertices.remove(v);
- mVerticesInOrderVisited.add(v);
- }
- }
-
- private void addPredecessor(V predecessor,V sucessor) {
- HashSet<V> predecessors = mPredecessorMap.get(sucessor);
- predecessors.add(predecessor);
- }
-
- /**
- * Computes the distances of all the node from the starting root nodes. If there is more than one root node
- * the minimum distance from each root node is used as the designated distance to a given node. Also keeps track
- * of the predecessors of each node traversed as well as the order of nodes traversed.
- * @param graph the graph to label
- * @param rootSet the set of starting vertices to traverse from
- */
- public void labelDistances(Hypergraph<V,E> graph, Set<V> rootSet) {
-
- initialize(graph,rootSet);
-
- int distance = 1;
- while (true) {
- List<V> newList = new ArrayList<V>();
- for(V currentVertex : mCurrentList) {
- if(graph.containsVertex(currentVertex)) {
- for(V next : graph.getSuccessors(currentVertex)) {
- visitNewVertex(currentVertex,next, distance, newList);
- }
- }
- }
- if (newList.size() == 0) break;
- mCurrentList = newList;
- distance++;
- }
-
- for(V v : mUnvisitedVertices) {
- distanceDecorator.put(v,new Integer(-1));
- }
- }
-
- /**
- * Computes the distances of all the node from the specified root node. Also keeps track
- * of the predecessors of each node traversed as well as the order of nodes traversed.
- * @param graph the graph to label
- * @param root the single starting vertex to traverse from
- */
- public void labelDistances(Hypergraph<V,E> graph, V root) {
- labelDistances(graph, Collections.singleton(root));
- }
-
- private void visitNewVertex(V predecessor, V neighbor, int distance, List<V> newList) {
- if (mUnvisitedVertices.contains(neighbor)) {
- distanceDecorator.put(neighbor, new Integer(distance));
- newList.add(neighbor);
- mVerticesInOrderVisited.add(neighbor);
- mUnvisitedVertices.remove(neighbor);
- }
- int predecessorDistance = distanceDecorator.get(predecessor).intValue();
- int successorDistance = distanceDecorator.get(neighbor).intValue();
- if (predecessorDistance < successorDistance) {
- addPredecessor(predecessor,neighbor);
- }
- }
-
- /**
- * Returns a map from vertices to minimum distances from the original source(s).
- * Must be called after {@code labelDistances} in order to contain valid data.
- */
- public Map<V, Number> getDistanceDecorator() {
- return distanceDecorator;
- }
-}
+++ /dev/null
-/*
- * Created on Jul 9, 2005
- *
- * Copyright (c) 2005, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-
-import edu.uci.ics.jung.algorithms.util.BasicMapEntry;
-import edu.uci.ics.jung.algorithms.util.MapBinaryHeap;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * <p>Calculates distances in a specified graph, using
- * Dijkstra's single-source-shortest-path algorithm. All edge weights
- * in the graph must be nonnegative; if any edge with negative weight is
- * found in the course of calculating distances, an
- * <code>IllegalArgumentException</code> will be thrown.
- * (Note: this exception will only be thrown when such an edge would be
- * used to update a given tentative distance;
- * the algorithm does not check for negative-weight edges "up front".)
- *
- * <p>Distances and partial results are optionally cached (by this instance)
- * for later reference. Thus, if the 10 closest vertices to a specified source
- * vertex are known, calculating the 20 closest vertices does not require
- * starting Dijkstra's algorithm over from scratch.</p>
- *
- * <p>Distances are stored as double-precision values.
- * If a vertex is not reachable from the specified source vertex, no
- * distance is stored. <b>This is new behavior with version 1.4</b>;
- * the previous behavior was to store a value of
- * <code>Double.POSITIVE_INFINITY</code>. This change gives the algorithm
- * an approximate complexity of O(kD log k), where k is either the number of
- * requested targets or the number of reachable vertices (whichever is smaller),
- * and D is the average degree of a vertex.</p>
- *
- * <p> The elements in the maps returned by <code>getDistanceMap</code>
- * are ordered (that is, returned
- * by the iterator) by nondecreasing distance from <code>source</code>.</p>
- *
- * <p>Users are cautioned that distances calculated should be assumed to
- * be invalidated by changes to the graph, and should invoke <code>reset()</code>
- * when appropriate so that the distances can be recalculated.</p>
- *
- * @author Joshua O'Madadhain
- * @author Tom Nelson converted to jung2
- */
-public class DijkstraDistance<V,E> implements Distance<V>
-{
- protected Hypergraph<V,E> g;
- protected Transformer<E,? extends Number> nev;
- protected Map<V,SourceData> sourceMap; // a map of source vertices to an instance of SourceData
- protected boolean cached;
- protected double max_distance;
- protected int max_targets;
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified graph and the specified method of extracting weights
- * from edges, which caches results locally if and only if
- * <code>cached</code> is <code>true</code>.
- *
- * @param g the graph on which distances will be calculated
- * @param nev the class responsible for returning weights for edges
- * @param cached specifies whether the results are to be cached
- */
- public DijkstraDistance(Hypergraph<V,E> g, Transformer<E,? extends Number> nev, boolean cached) {
- this.g = g;
- this.nev = nev;
- this.sourceMap = new HashMap<V,SourceData>();
- this.cached = cached;
- this.max_distance = Double.POSITIVE_INFINITY;
- this.max_targets = Integer.MAX_VALUE;
- }
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified graph and the specified method of extracting weights
- * from edges, which caches results locally.
- *
- * @param g the graph on which distances will be calculated
- * @param nev the class responsible for returning weights for edges
- */
- public DijkstraDistance(Hypergraph<V,E> g, Transformer<E,? extends Number> nev) {
- this(g, nev, true);
- }
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified unweighted graph (that is, all weights 1) which
- * caches results locally.
- *
- * @param g the graph on which distances will be calculated
- */
- @SuppressWarnings("unchecked")
- public DijkstraDistance(Hypergraph<V,E> g) {
- this(g, new ConstantTransformer(1), true);
- }
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified unweighted graph (that is, all weights 1) which
- * caches results locally.
- *
- * @param g the graph on which distances will be calculated
- * @param cached specifies whether the results are to be cached
- */
- @SuppressWarnings("unchecked")
- public DijkstraDistance(Hypergraph<V,E> g, boolean cached) {
- this(g, new ConstantTransformer(1), cached);
- }
-
- /**
- * Implements Dijkstra's single-source shortest-path algorithm for
- * weighted graphs. Uses a <code>MapBinaryHeap</code> as the priority queue,
- * which gives this algorithm a time complexity of O(m lg n) (m = # of edges, n =
- * # of vertices).
- * This algorithm will terminate when any of the following have occurred (in order
- * of priority):
- * <ul>
- * <li> the distance to the specified target (if any) has been found
- * <li> no more vertices are reachable
- * <li> the specified # of distances have been found, or the maximum distance
- * desired has been exceeded
- * <li> all distances have been found
- * </ul>
- *
- * @param source the vertex from which distances are to be measured
- * @param numDests the number of distances to measure
- * @param targets the set of vertices to which distances are to be measured
- * @param regular boolean is true if we want regular SP dijkstra. False for MT.
- */
- private LinkedHashMap<V,Number> singleSourceShortestPath(V source, Collection<V> targets, int numDests, boolean regular)
- {
- SourceData sd = getSourceData(source);
-
- Set<V> to_get = new HashSet<V>();
- if (targets != null) {
- to_get.addAll(targets);
- Set<V> existing_dists = sd.distances.keySet();
- for(V o : targets) {
- if (existing_dists.contains(o))
- to_get.remove(o);
- }
- }
-
- // if we've exceeded the max distance or max # of distances we're willing to calculate, or
- // if we already have all the distances we need,
- // terminate
- if (sd.reached_max ||
- (targets != null && to_get.isEmpty()) ||
- (sd.distances.size() >= numDests))
- {
- return sd.distances;
- }
-
- while (!sd.unknownVertices.isEmpty() && (sd.distances.size() < numDests || !to_get.isEmpty()))
- {
- Map.Entry<V,Number> p = sd.getNextVertex();
- V v = p.getKey();
- double v_dist = p.getValue().doubleValue();
- to_get.remove(v);
- if (v_dist > this.max_distance)
- {
- // we're done; put this vertex back in so that we're not including
- // a distance beyond what we specified
- sd.restoreVertex(v, v_dist);
- sd.reached_max = true;
- break;
- }
- sd.dist_reached = v_dist;
-
- if (sd.distances.size() >= this.max_targets)
- {
- sd.reached_max = true;
- break;
- }
-
- for (E e : getEdgesToCheck(v) )
- {
- for (V w : g.getIncidentVertices(e))
- {
- if (!sd.distances.containsKey(w))
- {
- double edge_weight = nev.transform(e).doubleValue();
- if (edge_weight < 0)
- throw new IllegalArgumentException("Edges weights must be non-negative");
- double new_dist;
- if (regular == true) {
- new_dist = v_dist + edge_weight;
- } else {
- if (v_dist <= edge_weight) {
- new_dist = edge_weight;
- } else {
- new_dist = v_dist;
- }
- }
- if (!sd.estimatedDistances.containsKey(w))
- {
- sd.createRecord(w, e, new_dist);
- }
- else
- {
- double w_dist = ((Double)sd.estimatedDistances.get(w)).doubleValue();
- if (new_dist < w_dist) // update tentative distance & path for w
- sd.update(w, e, new_dist);
- }
- }
- }
- }
- }
- return sd.distances;
- }
-
- /**
- * Implements Dijkstra's single-source shortest-path algorithm for
- * weighted graphs. Uses a <code>MapBinaryHeap</code> as the priority queue,
- * which gives this algorithm a time complexity of O(m lg n) (m = # of edges, n =
- * # of vertices).
- * This algorithm will terminate when any of the following have occurred (in order
- * of priority):
- * <ul>
- * <li> the distance to the specified target (if any) has been found
- * <li> no more vertices are reachable
- * <li> the specified # of distances have been found, or the maximum distance
- * desired has been exceeded
- * <li> all distances have been found
- * </ul>
- *
- * @param source the vertex from which distances are to be measured
- * @param numDests the number of distances to measure
- * @param targets the set of vertices to which distances are to be measured
- */
- protected LinkedHashMap<V,Number> singleSourceShortestPath(V source, Collection<V> targets, int numDests)
- {
- return singleSourceShortestPath(source, targets, numDests, true);
- }
-
- /**
- * Implements Dijkstra's single-source shortest-path algorithm for
- * weighted graphs. Uses a <code>MapBinaryHeap</code> as the priority queue,
- * which gives this algorithm a time complexity of O(m lg n) (m = # of edges, n =
- * # of vertices).
- * This algorithm will terminate when any of the following have occurred (in order
- * of priority):
- * <ul>
- * <li> the distance to the specified target (if any) has been found
- * <li> no more vertices are reachable
- * <li> the specified # of distances have been found, or the maximum distance
- * desired has been exceeded
- * <li> all distances have been found
- * </ul>
- *
- * @param source the vertex from which distances are to be measured
- * @param numDests the number of distances to measure
- * @param targets the set of vertices to which distances are to be measured
- */
- protected LinkedHashMap<V,Number> singleSourceMaxThroughputPath(V source, Collection<V> targets, int numDests)
- {
- return singleSourceShortestPath(source, targets, numDests, false);
- }
-
- protected SourceData getSourceData(V source)
- {
- SourceData sd = sourceMap.get(source);
- if (sd == null)
- sd = new SourceData(source);
- return sd;
- }
-
- /**
- * Returns the set of edges incident to <code>v</code> that should be tested.
- * By default, this is the set of outgoing edges for instances of <code>Graph</code>,
- * the set of incident edges for instances of <code>Hypergraph</code>,
- * and is otherwise undefined.
- */
- protected Collection<E> getEdgesToCheck(V v)
- {
- if (g instanceof Graph)
- return ((Graph<V,E>)g).getOutEdges(v);
- else
- return g.getIncidentEdges(v);
-
- }
-
-
- /**
- * Returns the length of a shortest path from the source to the target vertex,
- * or null if the target is not reachable from the source.
- * If either vertex is not in the graph for which this instance
- * was created, throws <code>IllegalArgumentException</code>.
- *
- * @see #getDistanceMap(Object)
- * @see #getDistanceMap(Object,int)
- */
- public Number getDistance(V source, V target)
- {
- if (g.containsVertex(target) == false)
- throw new IllegalArgumentException("Specified target vertex " +
- target + " is not part of graph " + g);
- if (g.containsVertex(source) == false)
- throw new IllegalArgumentException("Specified source vertex " +
- source + " is not part of graph " + g);
-
- Set<V> targets = new HashSet<V>();
- targets.add(target);
- Map<V,Number> distanceMap = getDistanceMap(source, targets);
- return distanceMap.get(target);
- }
-
-
- /**
- * Returns a {@code Map} from each element {@code t} of {@code targets} to the
- * shortest-path distance from {@code source} to {@code t}.
- */
- public Map<V,Number> getDistanceMap(V source, Collection<V> targets)
- {
- if (g.containsVertex(source) == false)
- throw new IllegalArgumentException("Specified source vertex " +
- source + " is not part of graph " + g);
- if (targets.size() > max_targets)
- throw new IllegalArgumentException("size of target set exceeds maximum " +
- "number of targets allowed: " + this.max_targets);
-
- Map<V,Number> distanceMap =
- singleSourceShortestPath(source, targets,
- Math.min(g.getVertexCount(), max_targets));
- if (!cached)
- reset(source);
-
- return distanceMap;
- }
-
- /**
- * <p>Returns a <code>LinkedHashMap</code> which maps each vertex
- * in the graph (including the <code>source</code> vertex)
- * to its distance from the <code>source</code> vertex.
- * The map's iterator will return the elements in order of
- * increasing distance from <code>source</code>.</p>
- *
- * <p>The size of the map returned will be the number of
- * vertices reachable from <code>source</code>.</p>
- *
- * @see #getDistanceMap(Object,int)
- * @see #getDistance(Object,Object)
- * @param source the vertex from which distances are measured
- */
- public Map<V,Number> getDistanceMap(V source)
- {
- return getDistanceMap(source, Math.min(g.getVertexCount(), max_targets));
- }
-
-
-
- /**
- * <p>Returns a <code>LinkedHashMap</code> which maps each of the closest
- * <code>numDist</code> vertices to the <code>source</code> vertex
- * in the graph (including the <code>source</code> vertex)
- * to its distance from the <code>source</code> vertex. Throws
- * an <code>IllegalArgumentException</code> if <code>source</code>
- * is not in this instance's graph, or if <code>numDests</code> is
- * either less than 1 or greater than the number of vertices in the
- * graph.</p>
- *
- * <p>The size of the map returned will be the smaller of
- * <code>numDests</code> and the number of vertices reachable from
- * <code>source</code>.
- *
- * @see #getDistanceMap(Object)
- * @see #getDistance(Object,Object)
- * @param source the vertex from which distances are measured
- * @param numDests the number of vertices for which to measure distances
- */
- public LinkedHashMap<V,Number> getDistanceMap(V source, int numDests)
- {
-
- if(g.getVertices().contains(source) == false) {
- throw new IllegalArgumentException("Specified source vertex " +
- source + " is not part of graph " + g);
-
- }
- if (numDests < 1 || numDests > g.getVertexCount())
- throw new IllegalArgumentException("numDests must be >= 1 " +
- "and <= g.numVertices()");
-
- if (numDests > max_targets)
- throw new IllegalArgumentException("numDests must be <= the maximum " +
- "number of targets allowed: " + this.max_targets);
-
- LinkedHashMap<V,Number> distanceMap =
- singleSourceShortestPath(source, null, numDests);
-
- if (!cached)
- reset(source);
-
- return distanceMap;
- }
-
- /**
- * Allows the user to specify the maximum distance that this instance will calculate.
- * Any vertices past this distance will effectively be unreachable from the source, in
- * the sense that the algorithm will not calculate the distance to any vertices which
- * are farther away than this distance. A negative value for <code>max_dist</code>
- * will ensure that no further distances are calculated.
- *
- * <p>This can be useful for limiting the amount of time and space used by this algorithm
- * if the graph is very large.</p>
- *
- * <p>Note: if this instance has already calculated distances greater than <code>max_dist</code>,
- * and the results are cached, those results will still be valid and available; this limit
- * applies only to subsequent distance calculations.</p>
- * @see #setMaxTargets(int)
- */
- public void setMaxDistance(double max_dist)
- {
- this.max_distance = max_dist;
- for (V v : sourceMap.keySet())
- {
- SourceData sd = sourceMap.get(v);
- sd.reached_max = (this.max_distance <= sd.dist_reached) || (sd.distances.size() >= max_targets);
- }
- }
-
- /**
- * Allows the user to specify the maximum number of target vertices per source vertex
- * for which this instance will calculate distances. Once this threshold is reached,
- * any further vertices will effectively be unreachable from the source, in
- * the sense that the algorithm will not calculate the distance to any more vertices.
- * A negative value for <code>max_targets</code> will ensure that no further distances are calculated.
- *
- * <p>This can be useful for limiting the amount of time and space used by this algorithm
- * if the graph is very large.</p>
- *
- * <p>Note: if this instance has already calculated distances to a greater number of
- * targets than <code>max_targets</code>, and the results are cached, those results
- * will still be valid and available; this limit applies only to subsequent distance
- * calculations.</p>
- * @see #setMaxDistance(double)
- */
- public void setMaxTargets(int max_targets)
- {
- this.max_targets = max_targets;
- for (V v : sourceMap.keySet())
- {
- SourceData sd = sourceMap.get(v);
- sd.reached_max = (this.max_distance <= sd.dist_reached) || (sd.distances.size() >= max_targets);
- }
- }
-
- /**
- * Clears all stored distances for this instance.
- * Should be called whenever the graph is modified (edge weights
- * changed or edges added/removed). If the user knows that
- * some currently calculated distances are unaffected by a
- * change, <code>reset(V)</code> may be appropriate instead.
- *
- * @see #reset(Object)
- */
- public void reset()
- {
- sourceMap = new HashMap<V,SourceData>();
- }
-
- /**
- * Specifies whether or not this instance of <code>DijkstraShortestPath</code>
- * should cache its results (final and partial) for future reference.
- *
- * @param enable <code>true</code> if the results are to be cached, and
- * <code>false</code> otherwise
- */
- public void enableCaching(boolean enable)
- {
- this.cached = enable;
- }
-
- /**
- * Clears all stored distances for the specified source vertex
- * <code>source</code>. Should be called whenever the stored distances
- * from this vertex are invalidated by changes to the graph.
- *
- * @see #reset()
- */
- public void reset(V source)
- {
- sourceMap.put(source, null);
- }
-
- /**
- * Compares according to distances, so that the BinaryHeap knows how to
- * order the tree.
- */
- protected static class VertexComparator<V> implements Comparator<V>
- {
- private Map<V,Number> distances;
-
- protected VertexComparator(Map<V,Number> distances)
- {
- this.distances = distances;
- }
-
- public int compare(V o1, V o2)
- {
- return ((Double) distances.get(o1)).compareTo((Double) distances.get(o2));
- }
- }
-
- /**
- * For a given source vertex, holds the estimated and final distances,
- * tentative and final assignments of incoming edges on the shortest path from
- * the source vertex, and a priority queue (ordered by estimated distance)
- * of the vertices for which distances are unknown.
- *
- * @author Joshua O'Madadhain
- */
- protected class SourceData
- {
- protected LinkedHashMap<V,Number> distances;
- protected Map<V,Number> estimatedDistances;
- protected MapBinaryHeap<V> unknownVertices;
- protected boolean reached_max = false;
- protected double dist_reached = 0;
-
- protected SourceData(V source)
- {
- distances = new LinkedHashMap<V,Number>();
- estimatedDistances = new HashMap<V,Number>();
- unknownVertices = new MapBinaryHeap<V>(new VertexComparator<V>(estimatedDistances));
-
- sourceMap.put(source, this);
-
- // initialize priority queue
- estimatedDistances.put(source, new Double(0)); // distance from source to itself is 0
- unknownVertices.add(source);
- reached_max = false;
- dist_reached = 0;
- }
-
- protected Map.Entry<V,Number> getNextVertex()
- {
- V v = unknownVertices.remove();
- Double dist = (Double)estimatedDistances.remove(v);
- distances.put(v, dist);
- return new BasicMapEntry<V,Number>(v, dist);
- }
-
- protected void update(V dest, E tentative_edge, double new_dist)
- {
- estimatedDistances.put(dest, new_dist);
- unknownVertices.update(dest);
- }
-
- protected void createRecord(V w, E e, double new_dist)
- {
- estimatedDistances.put(w, new_dist);
- unknownVertices.add(w);
- }
-
- protected void restoreVertex(V v, double dist)
- {
- estimatedDistances.put(v, dist);
- unknownVertices.add(v);
- distances.remove(v);
- }
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * <p>Calculates distances and shortest paths using Dijkstra's
- * single-source-shortest-path algorithm. This is a lightweight
- * extension of <code>DijkstraDistance</code> that also stores
- * path information, so that the shortest paths can be reconstructed.</p>
- *
- * <p> The elements in the maps returned by
- * <code>getIncomingEdgeMap</code> are ordered (that is, returned
- * by the iterator) by nondecreasing distance from <code>source</code>.</p>
- *
- * @author Joshua O'Madadhain
- * @author Tom Nelson converted to jung2
- * @see DijkstraDistance
- */
-public class DijkstraShortestPath<V,E> extends DijkstraDistance<V,E> implements ShortestPath<V,E>
-{
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified graph and the specified method of extracting weights
- * from edges, which caches results locally if and only if
- * <code>cached</code> is <code>true</code>.
- *
- * @param g the graph on which distances will be calculated
- * @param nev the class responsible for returning weights for edges
- * @param cached specifies whether the results are to be cached
- */
- public DijkstraShortestPath(Graph<V,E> g, Transformer<E, ? extends Number> nev, boolean cached)
- {
- super(g, nev, cached);
- }
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified graph and the specified method of extracting weights
- * from edges, which caches results locally.
- *
- * @param g the graph on which distances will be calculated
- * @param nev the class responsible for returning weights for edges
- */
- public DijkstraShortestPath(Graph<V,E> g, Transformer<E, ? extends Number> nev)
- {
- super(g, nev);
- }
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified unweighted graph (that is, all weights 1) which
- * caches results locally.
- *
- * @param g the graph on which distances will be calculated
- */
- public DijkstraShortestPath(Graph<V,E> g)
- {
- super(g);
- }
-
- /**
- * <p>Creates an instance of <code>DijkstraShortestPath</code> for
- * the specified unweighted graph (that is, all weights 1) which
- * caches results locally.
- *
- * @param g the graph on which distances will be calculated
- * @param cached specifies whether the results are to be cached
- */
- public DijkstraShortestPath(Graph<V,E> g, boolean cached)
- {
- super(g, cached);
- }
-
- @Override
- protected SourceData getSourceData(V source)
- {
- SourceData sd = sourceMap.get(source);
- if (sd == null)
- sd = new SourcePathData(source);
- return sd;
- }
-
- /**
- * <p>Returns the last edge on a shortest path from <code>source</code>
- * to <code>target</code>, or null if <code>target</code> is not
- * reachable from <code>source</code>.</p>
- *
- * <p>If either vertex is not in the graph for which this instance
- * was created, throws <code>IllegalArgumentException</code>.</p>
- */
- public E getIncomingEdge(V source, V target)
- {
- if (!g.containsVertex(source))
- throw new IllegalArgumentException("Specified source vertex " +
- source + " is not part of graph " + g);
-
- if (!g.containsVertex(target))
- throw new IllegalArgumentException("Specified target vertex " +
- target + " is not part of graph " + g);
-
- Set<V> targets = new HashSet<V>();
- targets.add(target);
- singleSourceShortestPath(source, targets, g.getVertexCount());
- Map<V,E> incomingEdgeMap =
- ((SourcePathData)sourceMap.get(source)).incomingEdges;
- E incomingEdge = incomingEdgeMap.get(target);
-
- if (!cached)
- reset(source);
-
- return incomingEdge;
- }
-
- /**
- * <p>Returns a <code>LinkedHashMap</code> which maps each vertex
- * in the graph (including the <code>source</code> vertex)
- * to the last edge on the shortest path from the
- * <code>source</code> vertex.
- * The map's iterator will return the elements in order of
- * increasing distance from <code>source</code>.</p>
- *
- * @see DijkstraDistance#getDistanceMap(Object,int)
- * @see DijkstraDistance#getDistance(Object,Object)
- * @param source the vertex from which distances are measured
- */
- public Map<V,E> getIncomingEdgeMap(V source)
- {
- return getIncomingEdgeMap(source, g.getVertexCount());
- }
-
- /**
- * Returns a <code>List</code> of the edges on the shortest path from
- * <code>source</code> to <code>target</code>, in order of their
- * occurrence on this path.
- * If either vertex is not in the graph for which this instance
- * was created, throws <code>IllegalArgumentException</code>.
- */
- private List<E> getPath(V source, V target, boolean spath)
- {
- if(!g.containsVertex(source))
- throw new IllegalArgumentException("Specified source vertex " +
- source + " is not part of graph " + g);
-
- if(!g.containsVertex(target))
- throw new IllegalArgumentException("Specified target vertex " +
- target + " is not part of graph " + g);
-
- LinkedList<E> path = new LinkedList<E>();
-
- // collect path data; must use internal method rather than
- // calling getIncomingEdge() because getIncomingEdge() may
- // wipe out results if results are not cached
- Set<V> targets = new HashSet<V>();
- targets.add(target);
- if (spath == true) {
- singleSourceShortestPath(source, targets, g.getVertexCount());
- } else {
- singleSourceMaxThroughputPath(source, targets, g.getVertexCount());
- }
- Map<V,E> incomingEdges =
- ((SourcePathData)sourceMap.get(source)).incomingEdges;
-
- if (incomingEdges.isEmpty() || incomingEdges.get(target) == null)
- return path;
- V current = target;
- while (!current.equals(source))
- {
- E incoming = incomingEdges.get(current);
- path.addFirst(incoming);
- current = ((Graph<V,E>)g).getOpposite(current, incoming);
- }
- return path;
- }
-
- /**
- * Returns a <code>List</code> of the edges on the shortest path from
- * <code>source</code> to <code>target</code>, in order of their
- * occurrence on this path.
- * If either vertex is not in the graph for which this instance
- * was created, throws <code>IllegalArgumentException</code>.
- */
- public List<E> getPath(V source, V target)
- {
-
- return getPath(source,target, true);
- }
-
- /**
- * Returns a <code>List</code> of the edges on the Max Througput Shortest
- * path from <code>source</code> to <code>target</code>, in order of their
- * their occurrence on this path.
- * Important - Transformer fn should return the appropriate edge weight
- * for this API to return the Path Correctly.
- * If either vertex is not in the graph for which this instance
- * was created, throws <code>IllegalArgumentException</code>.
- */
- public List<E> getMaxThroughputPath(V source, V target)
- {
-
- return getPath(source,target, false);
- }
-
-
- /**
- * <p>Returns a <code>LinkedHashMap</code> which maps each of the closest
- * <code>numDist</code> vertices to the <code>source</code> vertex
- * in the graph (including the <code>source</code> vertex)
- * to the incoming edge along the path from that vertex. Throws
- * an <code>IllegalArgumentException</code> if <code>source</code>
- * is not in this instance's graph, or if <code>numDests</code> is
- * either less than 1 or greater than the number of vertices in the
- * graph.
- *
- * @see #getIncomingEdgeMap(Object)
- * @see #getPath(Object,Object)
- * @param source the vertex from which distances are measured
- * @param numDests the number of vertices for which to measure distances
- */
- public LinkedHashMap<V,E> getIncomingEdgeMap(V source, int numDests)
- {
- if (g.getVertices().contains(source) == false)
- throw new IllegalArgumentException("Specified source vertex " +
- source + " is not part of graph " + g);
-
- if (numDests < 1 || numDests > g.getVertexCount())
- throw new IllegalArgumentException("numDests must be >= 1 " +
- "and <= g.numVertices()");
-
- singleSourceShortestPath(source, null, numDests);
-
- LinkedHashMap<V,E> incomingEdgeMap =
- ((SourcePathData)sourceMap.get(source)).incomingEdges;
-
- if (!cached)
- reset(source);
-
- return incomingEdgeMap;
- }
-
-
- /**
- * For a given source vertex, holds the estimated and final distances,
- * tentative and final assignments of incoming edges on the shortest path from
- * the source vertex, and a priority queue (ordered by estimaed distance)
- * of the vertices for which distances are unknown.
- *
- * @author Joshua O'Madadhain
- */
- protected class SourcePathData extends SourceData
- {
- protected Map<V,E> tentativeIncomingEdges;
- protected LinkedHashMap<V,E> incomingEdges;
-
- protected SourcePathData(V source)
- {
- super(source);
- incomingEdges = new LinkedHashMap<V,E>();
- tentativeIncomingEdges = new HashMap<V,E>();
- }
-
- @Override
- public void update(V dest, E tentative_edge, double new_dist)
- {
- super.update(dest, tentative_edge, new_dist);
- tentativeIncomingEdges.put(dest, tentative_edge);
- }
-
- @Override
- public Map.Entry<V,Number> getNextVertex()
- {
- Map.Entry<V,Number> p = super.getNextVertex();
- V v = p.getKey();
- E incoming = tentativeIncomingEdges.remove(v);
- incomingEdges.put(v, incoming);
- return p;
- }
-
- @Override
- public void restoreVertex(V v, double dist)
- {
- super.restoreVertex(v, dist);
- E incoming = incomingEdges.get(v);
- tentativeIncomingEdges.put(v, incoming);
- }
-
- @Override
- public void createRecord(V w, E e, double new_dist)
- {
- super.createRecord(w, e, new_dist);
- tentativeIncomingEdges.put(w, e);
- }
-
- }
-
-}
+++ /dev/null
-/*
- * Created on Apr 2, 2004
- *
- * Copyright (c) 2004, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.Map;
-
-
-/**
- * An interface for classes which calculate the distance between
- * one vertex and another.
- *
- * @author Joshua O'Madadhain
- */
-public interface Distance<V>
-{
- /**
- * Returns the distance from the <code>source</code> vertex
- * to the <code>target</code> vertex. If <code>target</code>
- * is not reachable from <code>source</code>, returns null.
- */
- Number getDistance(V source, V target);
-
- /**
- * <p>Returns a <code>Map</code> which maps each vertex
- * in the graph (including the <code>source</code> vertex)
- * to its distance (represented as a Number)
- * from <code>source</code>. If any vertex
- * is not reachable from <code>source</code>, no
- * distance is stored for that vertex.
- */
- Map<V,Number> getDistanceMap(V source);
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.shortestpath;
-import java.util.Collection;
-
-import org.apache.commons.collections15.Transformer;
-
-import edu.uci.ics.jung.algorithms.scoring.ClosenessCentrality;
-import edu.uci.ics.jung.algorithms.scoring.util.VertexScoreTransformer;
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Statistics relating to vertex-vertex distances in a graph.
- *
- * <p>Formerly known as <code>GraphStatistics</code> in JUNG 1.x.</p>
- *
- * @author Scott White
- * @author Joshua O'Madadhain
- */
-public class DistanceStatistics
-{
- /**
- * For each vertex <code>v</code> in <code>graph</code>,
- * calculates the average shortest path length from <code>v</code>
- * to all other vertices in <code>graph</code> using the metric
- * specified by <code>d</code>, and returns the results in a
- * <code>Map</code> from vertices to <code>Double</code> values.
- * If there exists an ordered pair <code><u,v></code>
- * for which <code>d.getDistance(u,v)</code> returns <code>null</code>,
- * then the average distance value for <code>u</code> will be stored
- * as <code>Double.POSITIVE_INFINITY</code>).
- *
- * <p>Does not include self-distances (path lengths from <code>v</code>
- * to <code>v</code>).
- *
- * <p>To calculate the average distances, ignoring edge weights if any:
- * <pre>
- * Map distances = DistanceStatistics.averageDistances(g, new UnweightedShortestPath(g));
- * </pre>
- * To calculate the average distances respecting edge weights:
- * <pre>
- * DijkstraShortestPath dsp = new DijkstraShortestPath(g, nev);
- * Map distances = DistanceStatistics.averageDistances(g, dsp);
- * </pre>
- * where <code>nev</code> is an instance of <code>Transformer</code> that
- * is used to fetch the weight for each edge.
- *
- * @see edu.uci.ics.jung.algorithms.shortestpath.UnweightedShortestPath
- * @see edu.uci.ics.jung.algorithms.shortestpath.DijkstraDistance
- */
- public static <V,E> Transformer<V,Double> averageDistances(Hypergraph<V,E> graph, Distance<V> d)
- {
- final ClosenessCentrality<V,E> cc = new ClosenessCentrality<V,E>(graph, d);
- return new VertexScoreTransformer<V, Double>(cc);
- }
-
- /**
- * For each vertex <code>v</code> in <code>g</code>,
- * calculates the average shortest path length from <code>v</code>
- * to all other vertices in <code>g</code>, ignoring edge weights.
- * @see #diameter(Hypergraph)
- * @see edu.uci.ics.jung.algorithms.scoring.ClosenessCentrality
- */
- public static <V,E> Transformer<V, Double> averageDistances(Hypergraph<V,E> g)
- {
- final ClosenessCentrality<V,E> cc = new ClosenessCentrality<V,E>(g,
- new UnweightedShortestPath<V,E>(g));
- return new VertexScoreTransformer<V, Double>(cc);
- }
-
- /**
- * Returns the diameter of <code>g</code> using the metric
- * specified by <code>d</code>. The diameter is defined to be
- * the maximum, over all pairs of vertices <code>u,v</code>,
- * of the length of the shortest path from <code>u</code> to
- * <code>v</code>. If the graph is disconnected (that is, not
- * all pairs of vertices are reachable from one another), the
- * value returned will depend on <code>use_max</code>:
- * if <code>use_max == true</code>, the value returned
- * will be the the maximum shortest path length over all pairs of <b>connected</b>
- * vertices; otherwise it will be <code>Double.POSITIVE_INFINITY</code>.
- */
- public static <V, E> double diameter(Hypergraph<V,E> g, Distance<V> d, boolean use_max)
- {
- double diameter = 0;
- Collection<V> vertices = g.getVertices();
- for(V v : vertices) {
- for(V w : vertices) {
-
- if (v.equals(w) == false) // don't include self-distances
- {
- Number dist = d.getDistance(v, w);
- if (dist == null)
- {
- if (!use_max)
- return Double.POSITIVE_INFINITY;
- }
- else
- diameter = Math.max(diameter, dist.doubleValue());
- }
- }
- }
- return diameter;
- }
-
- /**
- * Returns the diameter of <code>g</code> using the metric
- * specified by <code>d</code>. The diameter is defined to be
- * the maximum, over all pairs of vertices <code>u,v</code>,
- * of the length of the shortest path from <code>u</code> to
- * <code>v</code>, or <code>Double.POSITIVE_INFINITY</code>
- * if any of these distances do not exist.
- * @see #diameter(Hypergraph, Distance, boolean)
- */
- public static <V, E> double diameter(Hypergraph<V,E> g, Distance<V> d)
- {
- return diameter(g, d, false);
- }
-
- /**
- * Returns the diameter of <code>g</code>, ignoring edge weights.
- * @see #diameter(Hypergraph, Distance, boolean)
- */
- public static <V, E> double diameter(Hypergraph<V,E> g)
- {
- return diameter(g, new UnweightedShortestPath<V,E>(g));
- }
-
-}
+++ /dev/null
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-import org.apache.commons.collections15.map.LazyMap;
-
-import edu.uci.ics.jung.graph.Forest;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * For the input Graph, creates a MinimumSpanningTree
- * using a variation of Prim's algorithm.
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- * @param <V>
- * @param <E>
- */
-public class MinimumSpanningForest<V,E> {
-
- protected Graph<V,E> graph;
- protected Forest<V,E> forest;
- protected Map<E,Double> weights;
-
- /**
- * Creates a Forest from the supplied Graph and supplied Factory, which
- * is used to create a new, empty Forest. If non-null, the supplied root
- * will be used as the root of the tree/forest. If the supplied root is
- * null, or not present in the Graph, then an arbitrary Graph vertex
- * will be selected as the root.
- * If the Minimum Spanning Tree does not include all vertices of the
- * Graph, then a leftover vertex is selected as a root, and another
- * tree is created.
- * @param graph the input graph
- * @param factory the factory to use to create the new forest
- * @param root the vertex of the graph to be used as the root of the forest
- * @param weights edge weights
- */
- public MinimumSpanningForest(Graph<V, E> graph, Factory<Forest<V,E>> factory,
- V root, Map<E, Double> weights) {
- this(graph, factory.create(), root, weights);
- }
-
- /**
- * Creates a minimum spanning forest from the supplied graph, populating the
- * supplied Forest, which must be empty.
- * If the supplied root is null, or not present in the Graph,
- * then an arbitrary Graph vertex will be selected as the root.
- * If the Minimum Spanning Tree does not include all vertices of the
- * Graph, then a leftover vertex is selected as a root, and another
- * tree is created
- * @param graph the Graph to find MST in
- * @param forest the Forest to populate. Must be empty
- * @param root first Tree root, may be null
- * @param weights edge weights, may be null
- */
- public MinimumSpanningForest(Graph<V, E> graph, Forest<V,E> forest,
- V root, Map<E, Double> weights) {
-
- if(forest.getVertexCount() != 0) {
- throw new IllegalArgumentException("Supplied Forest must be empty");
- }
- this.graph = graph;
- this.forest = forest;
- if(weights != null) {
- this.weights = weights;
- }
- Set<E> unfinishedEdges = new HashSet<E>(graph.getEdges());
- if(graph.getVertices().contains(root)) {
- this.forest.addVertex(root);
- }
- updateForest(forest.getVertices(), unfinishedEdges);
- }
-
- /**
- * Creates a minimum spanning forest from the supplied graph, populating the
- * supplied Forest, which must be empty.
- * If the supplied root is null, or not present in the Graph,
- * then an arbitrary Graph vertex will be selected as the root.
- * If the Minimum Spanning Tree does not include all vertices of the
- * Graph, then a leftover vertex is selected as a root, and another
- * tree is created
- * @param graph the Graph to find MST in
- * @param forest the Forest to populate. Must be empty
- * @param root first Tree root, may be null
- */
- @SuppressWarnings("unchecked")
- public MinimumSpanningForest(Graph<V, E> graph, Forest<V,E> forest,
- V root) {
-
- if(forest.getVertexCount() != 0) {
- throw new IllegalArgumentException("Supplied Forest must be empty");
- }
- this.graph = graph;
- this.forest = forest;
- this.weights = LazyMap.decorate(new HashMap<E,Double>(),
- new ConstantTransformer(1.0));
- Set<E> unfinishedEdges = new HashSet<E>(graph.getEdges());
- if(graph.getVertices().contains(root)) {
- this.forest.addVertex(root);
- }
- updateForest(forest.getVertices(), unfinishedEdges);
- }
-
- /**
- * Returns the generated forest.
- */
- public Forest<V,E> getForest() {
- return forest;
- }
-
- protected void updateForest(Collection<V> tv, Collection<E> unfinishedEdges) {
- double minCost = Double.MAX_VALUE;
- E nextEdge = null;
- V nextVertex = null;
- V currentVertex = null;
- for(E e : unfinishedEdges) {
-
- if(forest.getEdges().contains(e)) continue;
- // find the lowest cost edge, get its opposite endpoint,
- // and then update forest from its Successors
- Pair<V> endpoints = graph.getEndpoints(e);
- V first = endpoints.getFirst();
- V second = endpoints.getSecond();
- if(tv.contains(first) == true && tv.contains(second) == false) {
- if(weights.get(e) < minCost) {
- minCost = weights.get(e);
- nextEdge = e;
- currentVertex = first;
- nextVertex = second;
- }
- }
- if(graph.getEdgeType(e) == EdgeType.UNDIRECTED &&
- tv.contains(second) == true && tv.contains(first) == false) {
- if(weights.get(e) < minCost) {
- minCost = weights.get(e);
- nextEdge = e;
- currentVertex = second;
- nextVertex = first;
- }
- }
- }
-
- if(nextVertex != null && nextEdge != null) {
- unfinishedEdges.remove(nextEdge);
- forest.addEdge(nextEdge, currentVertex, nextVertex);
- updateForest(forest.getVertices(), unfinishedEdges);
- }
- Collection<V> leftovers = new HashSet<V>(graph.getVertices());
- leftovers.removeAll(forest.getVertices());
- if(leftovers.size() > 0) {
- V anotherRoot = leftovers.iterator().next();
- forest.addVertex(anotherRoot);
- updateForest(forest.getVertices(), unfinishedEdges);
- }
- }
-}
+++ /dev/null
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.Collection;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-
-import edu.uci.ics.jung.algorithms.cluster.WeakComponentClusterer;
-import edu.uci.ics.jung.algorithms.filters.FilterUtils;
-import edu.uci.ics.jung.graph.Forest;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.Tree;
-import edu.uci.ics.jung.graph.util.TreeUtils;
-
-/**
- * For the input Graph, creates a MinimumSpanningTree
- * using a variation of Prim's algorithm.
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- * @param <V>
- * @param <E>
- */
-@SuppressWarnings("unchecked")
-public class MinimumSpanningForest2<V,E> {
-
- protected Graph<V,E> graph;
- protected Forest<V,E> forest;
- protected Transformer<E,Double> weights =
- (Transformer<E,Double>)new ConstantTransformer<Double>(1.0);
-
- /**
- * create a Forest from the supplied Graph and supplied Factory, which
- * is used to create a new, empty Forest. If non-null, the supplied root
- * will be used as the root of the tree/forest. If the supplied root is
- * null, or not present in the Graph, then an arbitary Graph vertex
- * will be selected as the root.
- * If the Minimum Spanning Tree does not include all vertices of the
- * Graph, then a leftover vertex is selected as a root, and another
- * tree is created
- * @param graph
- * @param factory
- * @param weights
- */
- public MinimumSpanningForest2(Graph<V, E> graph,
- Factory<Forest<V,E>> factory,
- Factory<? extends Graph<V,E>> treeFactory,
- Transformer<E, Double> weights) {
- this(graph, factory.create(),
- treeFactory,
- weights);
- }
-
- /**
- * create a forest from the supplied graph, populating the
- * supplied Forest, which must be empty.
- * If the supplied root is null, or not present in the Graph,
- * then an arbitary Graph vertex will be selected as the root.
- * If the Minimum Spanning Tree does not include all vertices of the
- * Graph, then a leftover vertex is selected as a root, and another
- * tree is created
- * @param graph the Graph to find MST in
- * @param forest the Forest to populate. Must be empty
- * @param weights edge weights, may be null
- */
- public MinimumSpanningForest2(Graph<V, E> graph,
- Forest<V,E> forest,
- Factory<? extends Graph<V,E>> treeFactory,
- Transformer<E, Double> weights) {
-
- if(forest.getVertexCount() != 0) {
- throw new IllegalArgumentException("Supplied Forest must be empty");
- }
- this.graph = graph;
- this.forest = forest;
- if(weights != null) {
- this.weights = weights;
- }
-
- WeakComponentClusterer<V,E> wcc =
- new WeakComponentClusterer<V,E>();
- Set<Set<V>> component_vertices = wcc.transform(graph);
- Collection<Graph<V,E>> components =
- FilterUtils.createAllInducedSubgraphs(component_vertices, graph);
-
- for(Graph<V,E> component : components) {
- PrimMinimumSpanningTree<V,E> mst =
- new PrimMinimumSpanningTree<V,E>(treeFactory, this.weights);
- Graph<V,E> subTree = mst.transform(component);
- if(subTree instanceof Tree) {
- TreeUtils.addSubTree(forest, (Tree<V,E>)subTree, null, null);
- }
- }
- }
-
- /**
- * Returns the generated forest.
- */
- public Forest<V,E> getForest() {
- return forest;
- }
-}
+++ /dev/null
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.ConstantTransformer;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * For the input Graph, creates a MinimumSpanningTree
- * using a variation of Prim's algorithm.
- *
- * @author Tom Nelson - tomnelson@dev.java.net
- *
- * @param <V> the vertex type
- * @param <E> the edge type
- */
-@SuppressWarnings("unchecked")
-public class PrimMinimumSpanningTree<V,E> implements Transformer<Graph<V,E>,Graph<V,E>> {
-
- protected Factory<? extends Graph<V,E>> treeFactory;
- protected Transformer<E,Double> weights;
-
- /**
- * Creates an instance which generates a minimum spanning tree assuming constant edge weights.
- */
- public PrimMinimumSpanningTree(Factory<? extends Graph<V,E>> factory) {
- this(factory, new ConstantTransformer(1.0));
- }
-
- /**
- * Creates an instance which generates a minimum spanning tree using the input edge weights.
- */
- public PrimMinimumSpanningTree(Factory<? extends Graph<V,E>> factory,
- Transformer<E, Double> weights) {
- this.treeFactory = factory;
- if(weights != null) {
- this.weights = weights;
- }
- }
-
- /**
- * @param graph the Graph to find MST in
- */
- public Graph<V,E> transform(Graph<V,E> graph) {
- Set<E> unfinishedEdges = new HashSet<E>(graph.getEdges());
- Graph<V,E> tree = treeFactory.create();
- V root = findRoot(graph);
- if(graph.getVertices().contains(root)) {
- tree.addVertex(root);
- } else if(graph.getVertexCount() > 0) {
- // pick an arbitrary vertex to make root
- tree.addVertex(graph.getVertices().iterator().next());
- }
- updateTree(tree, graph, unfinishedEdges);
-
- return tree;
- }
-
- protected V findRoot(Graph<V,E> graph) {
- for(V v : graph.getVertices()) {
- if(graph.getInEdges(v).size() == 0) {
- return v;
- }
- }
- // if there is no obvious root, pick any vertex
- if(graph.getVertexCount() > 0) {
- return graph.getVertices().iterator().next();
- }
- // this graph has no vertices
- return null;
- }
-
- protected void updateTree(Graph<V,E> tree, Graph<V,E> graph, Collection<E> unfinishedEdges) {
- Collection<V> tv = tree.getVertices();
- double minCost = Double.MAX_VALUE;
- E nextEdge = null;
- V nextVertex = null;
- V currentVertex = null;
- for(E e : unfinishedEdges) {
-
- if(tree.getEdges().contains(e)) continue;
- // find the lowest cost edge, get its opposite endpoint,
- // and then update forest from its Successors
- Pair<V> endpoints = graph.getEndpoints(e);
- V first = endpoints.getFirst();
- V second = endpoints.getSecond();
- if((tv.contains(first) == true && tv.contains(second) == false)) {
- if(weights.transform(e) < minCost) {
- minCost = weights.transform(e);
- nextEdge = e;
- currentVertex = first;
- nextVertex = second;
- }
- } else if((tv.contains(second) == true && tv.contains(first) == false)) {
- if(weights.transform(e) < minCost) {
- minCost = weights.transform(e);
- nextEdge = e;
- currentVertex = second;
- nextVertex = first;
- }
- }
- }
-
- if(nextVertex != null && nextEdge != null) {
- unfinishedEdges.remove(nextEdge);
- tree.addEdge(nextEdge, currentVertex, nextVertex);
- updateTree(tree, graph, unfinishedEdges);
- }
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*
-* Created on Feb 12, 2004
-*/
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.Map;
-
-
-/**
- * An interface for algorithms that calculate shortest paths.
- */
-public interface ShortestPath<V, E>
-{
- /**
- * <p>Returns a <code>Map</code> which maps each vertex
- * in the graph (including the <code>source</code> vertex)
- * to the last edge on the shortest path from the
- * <code>source</code> vertex.
- */
- Map<V,E> getIncomingEdgeMap(V source);
-}
+++ /dev/null
-/*
- * Created on Jul 10, 2005
- *
- * Copyright (c) 2005, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * Utilities relating to the shortest paths in a graph.
- */
-public class ShortestPathUtils
-{
- /**
- * Returns a <code>List</code> of the edges on the shortest path from
- * <code>source</code> to <code>target</code>, in order of their
- * occurrence on this path.
- */
- public static <V, E> List<E> getPath(Graph<V,E> graph, ShortestPath<V,E> sp, V source, V target)
- {
- LinkedList<E> path = new LinkedList<E>();
-
- Map<V,E> incomingEdges = sp.getIncomingEdgeMap(source);
-
- if (incomingEdges.isEmpty() || incomingEdges.get(target) == null)
- return path;
- V current = target;
- while (!current.equals(source))
- {
- E incoming = incomingEdges.get(current);
- path.addFirst(incoming);
- Pair<V> endpoints = graph.getEndpoints(incoming);
- if(endpoints.getFirst().equals(current)) {
- current = endpoints.getSecond();
- } else {
- current = endpoints.getFirst();
- }
- //incoming.getOpposite(current);
- }
- return path;
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.shortestpath;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import edu.uci.ics.jung.graph.Hypergraph;
-
-/**
- * Computes the shortest path distances for graphs whose edges are not weighted (using BFS).
- *
- * @author Scott White
- */
-public class UnweightedShortestPath<V, E>
- implements ShortestPath<V,E>, Distance<V>
-{
- private Map<V,Map<V,Number>> mDistanceMap;
- private Map<V,Map<V,E>> mIncomingEdgeMap;
- private Hypergraph<V,E> mGraph;
- private Map<V, Number> distances = new HashMap<V,Number>();
-
- /**
- * Constructs and initializes algorithm
- * @param g the graph
- */
- public UnweightedShortestPath(Hypergraph<V,E> g)
- {
- mDistanceMap = new HashMap<V,Map<V,Number>>();
- mIncomingEdgeMap = new HashMap<V,Map<V,E>>();
- mGraph = g;
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.shortestpath.Distance#getDistance(Object, Object)
- */
- public Number getDistance(V source, V target)
- {
- Map<V, Number> sourceSPMap = getDistanceMap(source);
- return sourceSPMap.get(target);
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.shortestpath.Distance#getDistanceMap(Object)
- */
- public Map<V,Number> getDistanceMap(V source)
- {
- Map<V,Number> sourceSPMap = mDistanceMap.get(source);
- if (sourceSPMap == null)
- {
- computeShortestPathsFromSource(source);
- sourceSPMap = mDistanceMap.get(source);
- }
- return sourceSPMap;
- }
-
- /**
- * @see edu.uci.ics.jung.algorithms.shortestpath.ShortestPath#getIncomingEdgeMap(Object)
- */
- public Map<V,E> getIncomingEdgeMap(V source)
- {
- Map<V,E> sourceIEMap = mIncomingEdgeMap.get(source);
- if (sourceIEMap == null)
- {
- computeShortestPathsFromSource(source);
- sourceIEMap = mIncomingEdgeMap.get(source);
- }
- return sourceIEMap;
- }
-
-
- /**
- * Computes the shortest path distances from a given node to all other nodes.
- * @param source the source node
- */
- private void computeShortestPathsFromSource(V source)
- {
- BFSDistanceLabeler<V,E> labeler = new BFSDistanceLabeler<V,E>();
- labeler.labelDistances(mGraph, source);
- distances = labeler.getDistanceDecorator();
- Map<V,Number> currentSourceSPMap = new HashMap<V,Number>();
- Map<V,E> currentSourceEdgeMap = new HashMap<V,E>();
-
- for(V vertex : mGraph.getVertices()) {
-
- Number distanceVal = distances.get(vertex);
- // BFSDistanceLabeler uses -1 to indicate unreachable vertices;
- // don't bother to store unreachable vertices
- if (distanceVal != null && distanceVal.intValue() >= 0)
- {
- currentSourceSPMap.put(vertex, distanceVal);
- int minDistance = distanceVal.intValue();
- for(E incomingEdge : mGraph.getInEdges(vertex))
- {
- for (V neighbor : mGraph.getIncidentVertices(incomingEdge))
- {
- if (neighbor.equals(vertex))
- continue;
-// V neighbor = mGraph.getOpposite(vertex, incomingEdge);
-
- Number predDistanceVal = distances.get(neighbor);
-
- int pred_distance = predDistanceVal.intValue();
- if (pred_distance < minDistance && pred_distance >= 0)
- {
- minDistance = predDistanceVal.intValue();
- currentSourceEdgeMap.put(vertex, incomingEdge);
- }
- }
- }
- }
- }
- mDistanceMap.put(source, currentSourceSPMap);
- mIncomingEdgeMap.put(source, currentSourceEdgeMap);
- }
-
- /**
- * Clears all stored distances for this instance.
- * Should be called whenever the graph is modified (edge weights
- * changed or edges added/removed). If the user knows that
- * some currently calculated distances are unaffected by a
- * change, <code>reset(V)</code> may be appropriate instead.
- *
- * @see #reset(Object)
- */
- public void reset()
- {
- mDistanceMap.clear();
- mIncomingEdgeMap.clear();
- }
-
- /**
- * Clears all stored distances for the specified source vertex
- * <code>source</code>. Should be called whenever the stored distances
- * from this vertex are invalidated by changes to the graph.
- *
- * @see #reset()
- */
- public void reset(V v)
- {
- mDistanceMap.remove(v);
- mIncomingEdgeMap.remove(v);
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Provides interfaces and classes for calculating (geodesic) distances and shortest paths. Currently includes:
-<ul>
-<li/><code>DijkstraDistance</code>: finds the distances from a specified source vertex to other vertices in a
-weighted graph with no negative cycles
-<li/><code>DijkstraShortestPath</code>: extends <code>DijkstraDistance</code>, also finds shortest paths
-<li/><code>Distance</code>: an interface for defining vertex-vertex distances
-<li/><code>PrimMinimumSpanningTree</code>: identifies the spanning tree for a graph of least total edge weight
-<li/><code>ShortestPath</code>: an interface for shortest-path algorithms
-<li/><code>ShortestPathUtils</code>: utility functions for manipulating shortest paths
-<li/><code>UnweightedShortestPath</code>: finds the distances from a specified source vertex to other vertices in an
-unweighted graph
-</ul>
-
-</body>
-</html>
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Apr 21, 2004
- */
-package edu.uci.ics.jung.algorithms.transformation;
-
-import org.apache.commons.collections15.Factory;
-
-import edu.uci.ics.jung.graph.DirectedGraph;
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.UndirectedGraph;
-import edu.uci.ics.jung.graph.util.EdgeType;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * <p>Functions for transforming graphs into directed or undirected graphs.</p>
- *
- *
- * @author Danyel Fisher
- * @author Joshua O'Madadhain
- */
-public class DirectionTransformer
-{
-
- /**
- * Transforms <code>graph</code> (which may be of any directionality)
- * into an undirected graph. (This may be useful for
- * visualization tasks).
- * Specifically:
- * <ul>
- * <li/>Vertices are copied from <code>graph</code>.
- * <li/>Directed edges are 'converted' into a single new undirected edge in the new graph.
- * <li/>Each undirected edge (if any) in <code>graph</code> is 'recreated' with a new undirected edge in the new
- * graph if <code>create_new</code> is true, or copied from <code>graph</code> otherwise.
- * </ul>
- *
- * @param graph the graph to be transformed
- * @param create_new specifies whether existing undirected edges are to be copied or recreated
- * @param graph_factory used to create the new graph object
- * @param edge_factory used to create new edges
- * @return the transformed <code>Graph</code>
- */
- public static <V,E> UndirectedGraph<V,E> toUndirected(Graph<V,E> graph,
- Factory<UndirectedGraph<V,E>> graph_factory,
- Factory<E> edge_factory, boolean create_new)
- {
- UndirectedGraph<V,E> out = graph_factory.create();
-
- for (V v : graph.getVertices())
- out.addVertex(v);
-
- for (E e : graph.getEdges())
- {
- Pair<V> endpoints = graph.getEndpoints(e);
- V v1 = endpoints.getFirst();
- V v2 = endpoints.getSecond();
- E to_add;
- if (graph.getEdgeType(e) == EdgeType.DIRECTED || create_new)
- to_add = edge_factory.create();
- else
- to_add = e;
- out.addEdge(to_add, v1, v2, EdgeType.UNDIRECTED);
- }
- return out;
- }
-
- /**
- * Transforms <code>graph</code> (which may be of any directionality)
- * into a directed graph.
- * Specifically:
- * <ul>
- * <li/>Vertices are copied from <code>graph</code>.
- * <li/>Undirected edges are 'converted' into two new antiparallel directed edges in the new graph.
- * <li/>Each directed edge (if any) in <code>graph</code> is 'recreated' with a new edge in the new
- * graph if <code>create_new</code> is true, or copied from <code>graph</code> otherwise.
- * </ul>
- *
- * @param graph the graph to be transformed
- * @param create_new specifies whether existing directed edges are to be copied or recreated
- * @param graph_factory used to create the new graph object
- * @param edge_factory used to create new edges
- * @return the transformed <code>Graph</code>
- */
- public static <V,E> Graph<V,E> toDirected(Graph<V,E> graph, Factory<DirectedGraph<V,E>> graph_factory,
- Factory<E> edge_factory, boolean create_new)
- {
- DirectedGraph<V,E> out = graph_factory.create();
-
- for (V v : graph.getVertices())
- out.addVertex(v);
-
- for (E e : graph.getEdges())
- {
- Pair<V> endpoints = graph.getEndpoints(e);
- if (graph.getEdgeType(e) == EdgeType.UNDIRECTED)
- {
- V v1 = endpoints.getFirst();
- V v2 = endpoints.getSecond();
- out.addEdge(edge_factory.create(), v1, v2, EdgeType.DIRECTED);
- out.addEdge(edge_factory.create(), v2, v1, EdgeType.DIRECTED);
- }
- else // if the edge is directed, just add it
- {
- V source = graph.getSource(e);
- V dest = graph.getDest(e);
- E to_add = create_new ? edge_factory.create() : e;
- out.addEdge(to_add, source, dest, EdgeType.DIRECTED);
- }
-
- }
- return out;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Apr 21, 2004
- */
-package edu.uci.ics.jung.algorithms.transformation;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.Predicate;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.Hypergraph;
-import edu.uci.ics.jung.graph.KPartiteGraph;
-
-/**
- * Methods for creating a "folded" graph based on a k-partite graph or a
- * hypergraph.
- *
- * <p>A "folded" graph is derived from a k-partite graph by identifying
- * a partition of vertices which will become the vertices of the new graph, copying
- * these vertices into the new graph, and then connecting those vertices whose
- * original analogues were connected indirectly through elements
- * of other partitions.</p>
- *
- * <p>A "folded" graph is derived from a hypergraph by creating vertices based on
- * either the vertices or the hyperedges of the original graph, and connecting
- * vertices in the new graph if their corresponding vertices/hyperedges share a
- * connection with a common hyperedge/vertex.</p>
- *
- * @author Danyel Fisher
- * @author Joshua O'Madadhain
- */
-public class FoldingTransformer<V,E>
-{
-
- /**
- * Converts <code>g</code> into a unipartite graph whose vertex set is the
- * vertices of <code>g</code>'s partition <code>p</code>. For vertices
- * <code>a</code> and <code>b</code> in this partition, the resultant
- * graph will include the edge <code>(a,b)</code> if the original graph
- * contains edges <code>(a,c)</code> and <code>(c,b)</code> for at least
- * one vertex <code>c</code>.
- *
- * <p>The vertices of the new graph are the same as the vertices of the
- * appropriate partition in the old graph; the edges in the new graph are
- * created by the input edge <code>Factory</code>.</p>
- *
- * <p>If there is more than 1 such vertex <code>c</code> for a given pair
- * <code>(a,b)</code>, the type of the output graph will determine whether
- * it will contain parallel edges or not.</p>
- *
- * <p>This function will not create self-loops.</p>
- *
- * @param <V> vertex type
- * @param <E> input edge type
- * @param g input k-partite graph
- * @param p predicate specifying vertex partition
- * @param graph_factory factory used to create the output graph
- * @param edge_factory factory used to create the edges in the new graph
- * @return a copy of the input graph folded with respect to the input partition
- */
- public static <V,E> Graph<V,E> foldKPartiteGraph(KPartiteGraph<V,E> g, Predicate<V> p,
- Factory<Graph<V,E>> graph_factory, Factory<E> edge_factory)
- {
- Graph<V,E> newGraph = graph_factory.create();
-
- // get vertices for the specified partition
- Collection<V> vertices = g.getVertices(p);
- for (V v : vertices)
- {
- newGraph.addVertex(v);
- for (V s : g.getSuccessors(v))
- {
- for (V t : g.getSuccessors(s))
- {
- if (!vertices.contains(t) || t.equals(v))
- continue;
- newGraph.addVertex(t);
- newGraph.addEdge(edge_factory.create(), v, t);
- }
- }
- }
- return newGraph;
- }
-
- /**
- * Converts <code>g</code> into a unipartite graph whose vertices are the
- * vertices of <code>g</code>'s partition <code>p</code>, and whose edges
- * consist of collections of the intermediate vertices from other partitions.
- * For vertices
- * <code>a</code> and <code>b</code> in this partition, the resultant
- * graph will include the edge <code>(a,b)</code> if the original graph
- * contains edges <code>(a,c)</code> and <code>(c,b)</code> for at least
- * one vertex <code>c</code>.
- *
- * <p>The vertices of the new graph are the same as the vertices of the
- * appropriate partition in the old graph; the edges in the new graph are
- * collections of the intermediate vertices <code>c</code>.</p>
- *
- * <p>This function will not create self-loops.</p>
- *
- * @param <V> vertex type
- * @param <E> input edge type
- * @param g input k-partite graph
- * @param p predicate specifying vertex partition
- * @param graph_factory factory used to create the output graph
- * @return the result of folding g into unipartite graph whose vertices
- * are those of the <code>p</code> partition of g
- */
- public static <V,E> Graph<V, Collection<V>> foldKPartiteGraph(KPartiteGraph<V,E> g, Predicate<V> p,
- Factory<Graph<V, Collection<V>>> graph_factory)
- {
- Graph<V, Collection<V>> newGraph = graph_factory.create();
-
- // get vertices for the specified partition, copy into new graph
- Collection<V> vertices = g.getVertices(p);
-
- for (V v : vertices)
- {
- newGraph.addVertex(v);
- for (V s : g.getSuccessors(v))
- {
- for (V t : g.getSuccessors(s))
- {
- if (!vertices.contains(t) || t.equals(v))
- continue;
- newGraph.addVertex(t);
- Collection<V> v_coll = newGraph.findEdge(v, t);
- if (v_coll == null)
- {
- v_coll = new ArrayList<V>();
- newGraph.addEdge(v_coll, v, t);
- }
- v_coll.add(s);
- }
- }
- }
- return newGraph;
- }
-
- /**
- * Creates a <code>Graph</code> which is an edge-folded version of <code>h</code>, where
- * hyperedges are replaced by k-cliques in the output graph.
- *
- * <p>The vertices of the new graph are the same objects as the vertices of
- * <code>h</code>, and <code>a</code>
- * is connected to <code>b</code> in the new graph if the corresponding vertices
- * in <code>h</code> are connected by a hyperedge. Thus, each hyperedge with
- * <i>k</i> vertices in <code>h</code> induces a <i>k</i>-clique in the new graph.</p>
- *
- * <p>The edges of the new graph consist of collections of each hyperedge that connected
- * the corresponding vertex pair in the original graph.</p>
- *
- * @param <V> vertex type
- * @param <E> input edge type
- * @param h hypergraph to be folded
- * @param graph_factory factory used to generate the output graph
- * @return a copy of the input graph where hyperedges are replaced by cliques
- */
- public static <V,E> Graph<V, Collection<E>> foldHypergraphEdges(Hypergraph<V,E> h,
- Factory<Graph<V, Collection<E>>> graph_factory)
- {
- Graph<V, Collection<E>> target = graph_factory.create();
-
- for (V v : h.getVertices())
- target.addVertex(v);
-
- for (E e : h.getEdges())
- {
- ArrayList<V> incident = new ArrayList<V>(h.getIncidentVertices(e));
- populateTarget(target, e, incident);
- }
- return target;
- }
-
-
- /**
- * Creates a <code>Graph</code> which is an edge-folded version of <code>h</code>, where
- * hyperedges are replaced by k-cliques in the output graph.
- *
- * <p>The vertices of the new graph are the same objects as the vertices of
- * <code>h</code>, and <code>a</code>
- * is connected to <code>b</code> in the new graph if the corresponding vertices
- * in <code>h</code> are connected by a hyperedge. Thus, each hyperedge with
- * <i>k</i> vertices in <code>h</code> induces a <i>k</i>-clique in the new graph.</p>
- *
- * <p>The edges of the new graph are generated by the specified edge factory.</p>
- *
- * @param <V> vertex type
- * @param <E> input edge type
- * @param h hypergraph to be folded
- * @param graph_factory factory used to generate the output graph
- * @param edge_factory factory used to create the new edges
- * @return a copy of the input graph where hyperedges are replaced by cliques
- */
- public static <V,E> Graph<V,E> foldHypergraphEdges(Hypergraph<V,E> h,
- Factory<Graph<V,E>> graph_factory, Factory<E> edge_factory)
- {
- Graph<V,E> target = graph_factory.create();
-
- for (V v : h.getVertices())
- target.addVertex(v);
-
- for (E e : h.getEdges())
- {
- ArrayList<V> incident = new ArrayList<V>(h.getIncidentVertices(e));
- for (int i = 0; i < incident.size(); i++)
- for (int j = i+1; j < incident.size(); j++)
- target.addEdge(edge_factory.create(), incident.get(i), incident.get(j));
- }
- return target;
- }
-
- /**
- * Creates a <code>Graph</code> which is a vertex-folded version of <code>h</code>, whose
- * vertices are the input's hyperedges and whose edges are induced by adjacent hyperedges
- * in the input.
- *
- * <p>The vertices of the new graph are the same objects as the hyperedges of
- * <code>h</code>, and <code>a</code>
- * is connected to <code>b</code> in the new graph if the corresponding edges
- * in <code>h</code> have a vertex in common. Thus, each vertex incident to
- * <i>k</i> edges in <code>h</code> induces a <i>k</i>-clique in the new graph.</p>
- *
- * <p>The edges of the new graph are created by the specified factory.</p>
- *
- * @param <V> vertex type
- * @param <E> input edge type
- * @param <F> output edge type
- * @param h hypergraph to be folded
- * @param graph_factory factory used to generate the output graph
- * @param edge_factory factory used to generate the output edges
- * @return a transformation of the input graph whose vertices correspond to the input's hyperedges
- * and edges are induced by hyperedges sharing vertices in the input
- */
- public static <V,E,F> Graph<E,F> foldHypergraphVertices(Hypergraph<V,E> h,
- Factory<Graph<E,F>> graph_factory, Factory<F> edge_factory)
- {
- Graph<E,F> target = graph_factory.create();
-
- for (E e : h.getEdges())
- target.addVertex(e);
-
- for (V v : h.getVertices())
- {
- ArrayList<E> incident = new ArrayList<E>(h.getIncidentEdges(v));
- for (int i = 0; i < incident.size(); i++)
- for (int j = i+1; j < incident.size(); j++)
- target.addEdge(edge_factory.create(), incident.get(i), incident.get(j));
- }
-
- return target;
- }
-
- /**
- * Creates a <code>Graph</code> which is a vertex-folded version of <code>h</code>, whose
- * vertices are the input's hyperedges and whose edges are induced by adjacent hyperedges
- * in the input.
- *
- * <p>The vertices of the new graph are the same objects as the hyperedges of
- * <code>h</code>, and <code>a</code>
- * is connected to <code>b</code> in the new graph if the corresponding edges
- * in <code>h</code> have a vertex in common. Thus, each vertex incident to
- * <i>k</i> edges in <code>h</code> induces a <i>k</i>-clique in the new graph.</p>
- *
- * <p>The edges of the new graph consist of collections of each vertex incident to
- * the corresponding hyperedge pair in the original graph.</p>
- *
- * @param h hypergraph to be folded
- * @param graph_factory factory used to generate the output graph
- * @return a transformation of the input graph whose vertices correspond to the input's hyperedges
- * and edges are induced by hyperedges sharing vertices in the input
- */
- public Graph<E,Collection<V>> foldHypergraphVertices(Hypergraph<V,E> h,
- Factory<Graph<E,Collection<V>>> graph_factory)
- {
- Graph<E,Collection<V>> target = graph_factory.create();
-
- for (E e : h.getEdges())
- target.addVertex(e);
-
- for (V v : h.getVertices())
- {
- ArrayList<E> incident = new ArrayList<E>(h.getIncidentEdges(v));
- populateTarget(target, v, incident);
- }
- return target;
- }
-
- /**
- * @param target
- * @param e
- * @param incident
- */
- private static <S,T> void populateTarget(Graph<S, Collection<T>> target, T e,
- ArrayList<S> incident)
- {
- for (int i = 0; i < incident.size(); i++)
- {
- S v1 = incident.get(i);
- for (int j = i+1; j < incident.size(); j++)
- {
- S v2 = incident.get(j);
- Collection<T> e_coll = target.findEdge(v1, v2);
- if (e_coll == null)
- {
- e_coll = new ArrayList<T>();
- target.addEdge(e_coll, v1, v2);
- }
- e_coll.add(e);
- }
- }
- }
-
-}
\ No newline at end of file
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.transformation;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.collections15.Factory;
-import org.apache.commons.collections15.Transformer;
-import org.apache.commons.collections15.functors.MapTransformer;
-
-import edu.uci.ics.jung.algorithms.blockmodel.VertexPartition;
-import edu.uci.ics.jung.graph.Graph;
-
-/**
- * This class transforms a graph with a known vertex partitioning into a graph whose
- * vertices correspond to the input graph's partitions. Two vertices in the output graph
- * are connected if and only if there exists at least one edge between vertices in the
- * corresponding partitions of the input graph. If the output graph permits parallel edges,
- * there will be an edge connecting two vertices in the new graph for each such
- * edge connecting constituent vertices in the input graph.
- *
- * <p>Concept based on Danyel Fisher's <code>GraphCollapser</code> in JUNG 1.x.
- *
- */
-public class VertexPartitionCollapser<V,E,CV,CE>
-{
- protected Factory<Graph<CV,CE>> graph_factory;
- protected Factory<CV> vertex_factory;
- protected Factory<CE> edge_factory;
- protected Map<Set<V>, CV> set_collapsedv;
-
- /**
- * Creates an instance with the specified graph and element factories.
- * @param vertex_factory used to construct the vertices of the new graph
- * @param edge_factory used to construct the edges of the new graph
- * @param graph_factory used to construct the new graph
- */
- public VertexPartitionCollapser(Factory<Graph<CV,CE>> graph_factory,
- Factory<CV> vertex_factory, Factory<CE> edge_factory)
- {
- this.graph_factory = graph_factory;
- this.vertex_factory = vertex_factory;
- this.edge_factory = edge_factory;
- this.set_collapsedv = new HashMap<Set<V>, CV>();
- }
-
- /**
- * Creates a new graph whose vertices correspond to the partitions of the supplied graph.
- * @param partitioning
- * @return a new graph whose vertices correspond to the partitions of the supplied graph
- */
- public Graph<CV,CE> collapseVertexPartitions(VertexPartition<V,E> partitioning)
- {
- Graph<V,E> original = partitioning.getGraph();
- Graph<CV, CE> collapsed = graph_factory.create();
-
- // create vertices in new graph corresponding to equivalence sets in the original graph
- for (Set<V> set : partitioning.getVertexPartitions())
- {
- CV cv = vertex_factory.create();
- collapsed.addVertex(vertex_factory.create());
- set_collapsedv.put(set, cv);
- }
-
- // create edges in new graph corresponding to edges in original graph
- for (E e : original.getEdges())
- {
- Collection<V> incident = original.getIncidentVertices(e);
- Collection<CV> collapsed_vertices = new HashSet<CV>();
- Map<V, Set<V>> vertex_partitions = partitioning.getVertexToPartitionMap();
- // collect the collapsed vertices corresponding to the original incident vertices
- for (V v : incident)
- collapsed_vertices.add(set_collapsedv.get(vertex_partitions.get(v)));
- // if there's only one collapsed vertex, continue (no edges to create)
- if (collapsed_vertices.size() > 1)
- {
- CE ce = edge_factory.create();
- collapsed.addEdge(ce, collapsed_vertices);
- }
- }
- return collapsed;
- }
-
- /**
- * Returns a transformer from vertex sets in the original graph to collapsed vertices
- * in the transformed graph.
- */
- public Transformer<Set<V>, CV> getSetToCollapsedVertexTransformer()
- {
- return MapTransformer.getInstance(set_collapsedv);
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Mechanisms for graph transformation. These currently include:
-<ul>
-<li/><code>DirectionTransformer</code>: generates graphs where input undirected
-edges have been converted to directed edges, or vice versa
-<li/><code>FoldingTransformer</code>: transforms k-partite graphs or hypergraphs
-into unipartite graphs
-<li/><code>VertexPartitionCollapser</code>: transforms a graph, given a
-partition of its vertices into disjoint sets, into a graph in which each
-of these disjoint sets has been 'collapsed' into a single new vertex.
-</ul>
-
-</body>
-</html>
+++ /dev/null
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * An simple minimal implementation of <code>Map.Entry</code>.
- *
- * @param <K> the key type
- * @param <V> the value type
- */
-public class BasicMapEntry<K,V> implements Map.Entry<K,V> {
- final K key;
- V value;
-
- /**
- * Create new entry.
- */
- public BasicMapEntry(K k, V v) {
- value = v;
- key = k;
- }
-
- public K getKey() {
- return key;
- }
-
- public V getValue() {
- return value;
- }
-
- public V setValue(V newValue) {
- V oldValue = value;
- value = newValue;
- return oldValue;
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public boolean equals(Object o) {
- if (!(o instanceof Map.Entry))
- return false;
- Map.Entry e = (Map.Entry)o;
- Object k1 = getKey();
- Object k2 = e.getKey();
- if (k1 == k2 || (k1 != null && k1.equals(k2))) {
- Object v1 = getValue();
- Object v2 = e.getValue();
- if (v1 == v2 || (v1 != null && v1.equals(v2)))
- return true;
- }
- return false;
- }
-
- @Override
- public int hashCode() {
- return (key==null ? 0 : key.hashCode()) ^
- (value==null ? 0 : value.hashCode());
- }
-
- @Override
- public String toString() {
- return getKey() + "=" + getValue();
- }
-
- /**
- * This method is invoked whenever the value in an entry is
- * overwritten by an invocation of put(k,v) for a key k that's already
- * in the HashMap.
- */
- void recordAccess(HashMap<K,V> m) {
- }
-
- /**
- * This method is invoked whenever the entry is
- * removed from the table.
- */
- void recordRemoval(HashMap<K,V> m) {
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * An implementation of <code>Map</code> that returns the constructor-supplied
- * value for any input.
- *
- * @param <K> the key type
- * @param <V> the value type
- */
-public class ConstantMap<K,V> implements Map<K,V> {
-
- private Map<K,V> delegate;
-
- /**
- * Creates an instance whose {@code get} method always returns {@code value}.
- */
- public ConstantMap(V value) {
- delegate = Collections.<K,V>unmodifiableMap(Collections.<K,V>singletonMap(null, value));
- }
-
- public V get(Object key) {
- return delegate.get(null);
- }
-
- public void clear() {
- delegate.clear();
- }
-
- public boolean containsKey(Object key) {
- return true;
- }
-
- public boolean containsValue(Object value) {
- return delegate.containsValue(value);
- }
-
- public Set<Entry<K, V>> entrySet() {
- return delegate.entrySet();
- }
-
- @Override
- public boolean equals(Object o) {
- return delegate.equals(o);
- }
-
- @Override
- public int hashCode() {
- return delegate.hashCode();
- }
-
- public boolean isEmpty() {
- return delegate.isEmpty();
- }
-
- public Set<K> keySet() {
- return delegate.keySet();
- }
-
- public V put(K key, V value) {
- return delegate.put(key, value);
- }
-
- public void putAll(Map<? extends K, ? extends V> t) {
- delegate.putAll(t);
- }
-
- public V remove(Object key) {
- return delegate.remove(key);
- }
-
- public int size() {
- return delegate.size();
- }
-
- public Collection<V> values() {
- return delegate.values();
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- *
- * Created on Feb 18, 2004
- */
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.Collection;
-import java.util.Iterator;
-
-/**
- * A utility class for calculating properties of discrete distributions.
- * Generally, these distributions are represented as arrays of
- * <code>double</code> values, which are assumed to be normalized
- * such that the entries in a single array sum to 1.
- *
- * @author Joshua O'Madadhain
- */
-public class DiscreteDistribution
-{
-
- /**
- * Returns the Kullback-Leibler divergence between the
- * two specified distributions, which must have the same
- * number of elements. This is defined as
- * the sum over all <code>i</code> of
- * <code>dist[i] * Math.log(dist[i] / reference[i])</code>.
- * Note that this value is not symmetric; see
- * <code>symmetricKL</code> for a symmetric variant.
- * @see #symmetricKL(double[], double[])
- */
- public static double KullbackLeibler(double[] dist, double[] reference)
- {
- double distance = 0;
-
- checkLengths(dist, reference);
-
- for (int i = 0; i < dist.length; i++)
- {
- if (dist[i] > 0 && reference[i] > 0)
- distance += dist[i] * Math.log(dist[i] / reference[i]);
- }
- return distance;
- }
-
- /**
- * Returns <code>KullbackLeibler(dist, reference) + KullbackLeibler(reference, dist)</code>.
- * @see #KullbackLeibler(double[], double[])
- */
- public static double symmetricKL(double[] dist, double[] reference)
- {
- return KullbackLeibler(dist, reference)
- + KullbackLeibler(reference, dist);
- }
-
- /**
- * Returns the squared difference between the
- * two specified distributions, which must have the same
- * number of elements. This is defined as
- * the sum over all <code>i</code> of the square of
- * <code>(dist[i] - reference[i])</code>.
- */
- public static double squaredError(double[] dist, double[] reference)
- {
- double error = 0;
-
- checkLengths(dist, reference);
-
- for (int i = 0; i < dist.length; i++)
- {
- double difference = dist[i] - reference[i];
- error += difference * difference;
- }
- return error;
- }
-
- /**
- * Returns the cosine distance between the two
- * specified distributions, which must have the same number
- * of elements. The distributions are treated as vectors
- * in <code>dist.length</code>-dimensional space.
- * Given the following definitions
- * <ul>
- * <li/><code>v</code> = the sum over all <code>i</code> of <code>dist[i] * dist[i]</code>
- * <li/><code>w</code> = the sum over all <code>i</code> of <code>reference[i] * reference[i]</code>
- * <li/><code>vw</code> = the sum over all <code>i</code> of <code>dist[i] * reference[i]</code>
- * </ul>
- * the value returned is defined as <code>vw / (Math.sqrt(v) * Math.sqrt(w))</code>.
- */
- public static double cosine(double[] dist, double[] reference)
- {
- double v_prod = 0; // dot product x*x
- double w_prod = 0; // dot product y*y
- double vw_prod = 0; // dot product x*y
-
- checkLengths(dist, reference);
-
- for (int i = 0; i < dist.length; i++)
- {
- vw_prod += dist[i] * reference[i];
- v_prod += dist[i] * dist[i];
- w_prod += reference[i] * reference[i];
- }
- // cosine distance between v and w
- return vw_prod / (Math.sqrt(v_prod) * Math.sqrt(w_prod));
- }
-
- /**
- * Returns the entropy of this distribution.
- * High entropy indicates that the distribution is
- * close to uniform; low entropy indicates that the
- * distribution is close to a Dirac delta (i.e., if
- * the probability mass is concentrated at a single
- * point, this method returns 0). Entropy is defined as
- * the sum over all <code>i</code> of
- * <code>-(dist[i] * Math.log(dist[i]))</code>
- */
- public static double entropy(double[] dist)
- {
- double total = 0;
-
- for (int i = 0; i < dist.length; i++)
- {
- if (dist[i] > 0)
- total += dist[i] * Math.log(dist[i]);
- }
- return -total;
- }
-
- /**
- * Throws an <code>IllegalArgumentException</code> if the two arrays are not of the same length.
- */
- protected static void checkLengths(double[] dist, double[] reference)
- {
- if (dist.length != reference.length)
- throw new IllegalArgumentException("Arrays must be of the same length");
- }
-
- /**
- * Normalizes, with Lagrangian smoothing, the specified <code>double</code>
- * array, so that the values sum to 1 (i.e., can be treated as probabilities).
- * The effect of the Lagrangian smoothing is to ensure that all entries
- * are nonzero; effectively, a value of <code>alpha</code> is added to each
- * entry in the original array prior to normalization.
- * @param counts
- * @param alpha
- */
- public static void normalize(double[] counts, double alpha)
- {
- double total_count = 0;
-
- for (int i = 0; i < counts.length; i++)
- total_count += counts[i];
-
- for (int i = 0; i < counts.length; i++)
- counts[i] = (counts[i] + alpha)
- / (total_count + counts.length * alpha);
- }
-
- /**
- * Returns the mean of the specified <code>Collection</code> of
- * distributions, which are assumed to be normalized arrays of
- * <code>double</code> values.
- * @see #mean(double[][])
- */
- public static double[] mean(Collection<double[]> distributions)
- {
- if (distributions.isEmpty())
- throw new IllegalArgumentException("Distribution collection must be non-empty");
- Iterator<double[]> iter = distributions.iterator();
- double[] first = iter.next();
- double[][] d_array = new double[distributions.size()][first.length];
- d_array[0] = first;
- for (int i = 1; i < d_array.length; i++)
- d_array[i] = iter.next();
-
- return mean(d_array);
- }
-
- /**
- * Returns the mean of the specified array of distributions,
- * represented as normalized arrays of <code>double</code> values.
- * Will throw an "index out of bounds" exception if the
- * distribution arrays are not all of the same length.
- */
- public static double[] mean(double[][] distributions)
- {
- double[] d_mean = new double[distributions[0].length];
- for (int j = 0; j < d_mean.length; j++)
- d_mean[j] = 0;
-
- for (int i = 0; i < distributions.length; i++)
- for (int j = 0; j < d_mean.length; j++)
- d_mean[j] += distributions[i][j] / distributions.length;
-
- return d_mean;
- }
-
-}
\ No newline at end of file
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.Collection;
-
-import org.apache.commons.collections15.BidiMap;
-import org.apache.commons.collections15.bidimap.DualHashBidiMap;
-
-/**
- * A class providing static methods useful for improving the
- * performance of graph algorithms.
- *
- * @author Tom Nelson
- *
- */
-public class Indexer {
-
- /**
- * Returns a <code>BidiMap</code> mapping each element of the collection to its
- * index as encountered while iterating over the collection. The purpose
- * of the index operation is to supply an O(1) replacement operation for the
- * O(n) <code>indexOf(element)</code> method of a <code>List</code>
- * @param <T>
- * @param collection
- * @return a bidirectional map from collection elements to 0-based indices
- */
- public static <T> BidiMap<T,Integer> create(Collection<T> collection) {
- return create(collection, 0);
- }
- /**
- * Returns a <code>BidiMap</code> mapping each element of the collection to its
- * index as encountered while iterating over the collection. The purpose
- * of the index operation is to supply an O(1) replacement operation for the
- * O(n) <code>indexOf(element)</code> method of a <code>List</code>
- * @param <T>
- * @param collection
- * @param start start index
- * @return a bidirectional map from collection elements to start-based indices
- */
- public static <T> BidiMap<T,Integer> create(Collection<T> collection, int start) {
- BidiMap<T,Integer> map = new DualHashBidiMap<T,Integer>();
- int i=start;
- for(T t : collection) {
- map.put(t,i++);
- }
- return map;
- }
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.util;
-
-
-/**
- * An interface for algorithms that proceed iteratively.
- *
- */
-public interface IterativeContext
-{
- /**
- * Advances one step.
- */
- void step();
-
- /**
- * Returns true if this iterative process is finished, and false otherwise.
- */
- boolean done();
-}
+++ /dev/null
-/*
-* Copyright (c) 2003, the JUNG Project and the Regents of the University
-* of California
-* All rights reserved.
-*
-* This software is open-source under the BSD license; see either
-* "license.txt" or
-* http://jung.sourceforge.net/license.txt for a description.
-*/
-package edu.uci.ics.jung.algorithms.util;
-
-
-
-/**
- * Provides basic infrastructure for iterative algorithms. Services provided include:
- * <ul>
- * <li> storage of current and max iteration count </li>
- * <li> framework for initialization, iterative evaluation, and finalization </li>
- * <li> test for convergence </li>
- * <li> etc. </li>
- * </ul>
- * <p>
- * Algorithms that subclass this class are typically used in the following way: <br>
- * <pre>
- * FooAlgorithm foo = new FooAlgorithm(...)
- * foo.setMaximumIterations(100); //set up conditions
- * ...
- * foo.evaluate(); //key method which initiates iterative process
- * foo.getSomeResult();
- * </pre>
- *
- * @author Scott White (originally written by Didier Besset)
- */
-public abstract class IterativeProcess implements IterativeContext {
- /**
- * Number of iterations performed.
- */
- private int iterations;
- /**
- * Maximum allowed number of iterations.
- */
- private int maximumIterations = 50;
- /**
- * Desired precision.
- */
- private double desiredPrecision = Double.MIN_VALUE;
- /**
- * Achieved precision.
- */
- private double precision;
-
-
- /**
- * Generic constructor.
- */
- public IterativeProcess() {
- }
-
- /**
- * Performs the iterative process.
- * Note: this method does not return anything because Java does not
- * allow mixing double, int, or objects
- */
- public void evaluate() {
- iterations = 0;
- initializeIterations();
- while (iterations++ < maximumIterations) {
- step();
- precision = getPrecision();
- if (hasConverged())
- break;
- }
- finalizeIterations();
- }
-
- /**
- * Evaluate the result of the current iteration.
- */
- abstract public void step();
-
- /**
- * Perform eventual clean-up operations
- * (must be implement by subclass when needed).
- */
- protected void finalizeIterations() {
- }
-
- /**
- * Returns the desired precision.
- */
- public double getDesiredPrecision() {
- return desiredPrecision;
- }
-
- /**
- * Returns the number of iterations performed.
- */
- public int getIterations() {
- return iterations;
- }
-
- /**
- * Returns the maximum allowed number of iterations.
- */
- public int getMaximumIterations() {
- return maximumIterations;
- }
-
- /**
- * Returns the attained precision.
- */
- public double getPrecision() {
- return precision;
- }
-
- /**
- * @param precision the precision to set
- */
- public void setPrecision(double precision) {
- this.precision = precision;
- }
-
- /**
- *
- * Check to see if the result has been attained.
- * @return boolean
- */
- public boolean hasConverged() {
- return precision < desiredPrecision;
- }
-
- public boolean done() {
- return hasConverged();
- }
-
- /**
- * Initializes internal parameters to start the iterative process.
- */
- protected void initializeIterations() {
- }
-
- /**
- *
- */
- public void reset() {
- }
-
- /**
- * @return double
- * @param epsilon double
- * @param x double
- */
- public double relativePrecision(double epsilon, double x) {
- return x > desiredPrecision ? epsilon / x: epsilon;
- }
-
- /**
- * Defines the desired precision.
- */
- public void setDesiredPrecision(double prec) throws IllegalArgumentException {
- if (prec <= 0)
- throw new IllegalArgumentException("Non-positive precision: " + prec);
- desiredPrecision = prec;
- }
-
- /**
- * Defines the maximum allowed number of iterations.
- */
- public void setMaximumIterations(int maxIter) throws IllegalArgumentException {
- if (maxIter < 1)
- throw new IllegalArgumentException("Non-positive maximum iteration: " + maxIter);
- maximumIterations = maxIter;
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- * Created on Aug 9, 2004
- *
- */
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-
-
-/**
- * Groups items into a specified number of clusters, based on their proximity in
- * d-dimensional space, using the k-means algorithm. Calls to
- * <code>cluster</code> will terminate when either of the two following
- * conditions is true:
- * <ul>
- * <li/>the number of iterations is > <code>max_iterations</code>
- * <li/>none of the centroids has moved as much as <code>convergence_threshold</code>
- * since the previous iteration
- * </ul>
- *
- * @author Joshua O'Madadhain
- */
-public class KMeansClusterer<T>
-{
- protected int max_iterations;
- protected double convergence_threshold;
- protected Random rand;
-
- /**
- * Creates an instance whose termination conditions are set according
- * to the parameters.
- */
- public KMeansClusterer(int max_iterations, double convergence_threshold)
- {
- this.max_iterations = max_iterations;
- this.convergence_threshold = convergence_threshold;
- this.rand = new Random();
- }
-
- /**
- * Creates an instance with max iterations of 100 and convergence threshold
- * of 0.001.
- */
- public KMeansClusterer()
- {
- this(100, 0.001);
- }
-
- /**
- * Returns the maximum number of iterations.
- */
- public int getMaxIterations()
- {
- return max_iterations;
- }
-
- /**
- * Sets the maximum number of iterations.
- */
- public void setMaxIterations(int max_iterations)
- {
- if (max_iterations < 0)
- throw new IllegalArgumentException("max iterations must be >= 0");
-
- this.max_iterations = max_iterations;
- }
-
- /**
- * Returns the convergence threshold.
- */
- public double getConvergenceThreshold()
- {
- return convergence_threshold;
- }
-
- /**
- * Sets the convergence threshold.
- * @param convergence_threshold
- */
- public void setConvergenceThreshold(double convergence_threshold)
- {
- if (convergence_threshold <= 0)
- throw new IllegalArgumentException("convergence threshold " +
- "must be > 0");
-
- this.convergence_threshold = convergence_threshold;
- }
-
- /**
- * Returns a <code>Collection</code> of clusters, where each cluster is
- * represented as a <code>Map</code> of <code>Objects</code> to locations
- * in d-dimensional space.
- * @param object_locations a map of the Objects to cluster, to
- * <code>double</code> arrays that specify their locations in d-dimensional space.
- * @param num_clusters the number of clusters to create
- * @throws NotEnoughClustersException
- */
- @SuppressWarnings("unchecked")
- public Collection<Map<T, double[]>> cluster(Map<T, double[]> object_locations, int num_clusters)
- {
- if (object_locations == null || object_locations.isEmpty())
- throw new IllegalArgumentException("'objects' must be non-empty");
-
- if (num_clusters < 2 || num_clusters > object_locations.size())
- throw new IllegalArgumentException("number of clusters " +
- "must be >= 2 and <= number of objects (" +
- object_locations.size() + ")");
-
-
- Set<double[]> centroids = new HashSet<double[]>();
-
- Object[] obj_array = object_locations.keySet().toArray();
- Set<T> tried = new HashSet<T>();
-
- // create the specified number of clusters
- while (centroids.size() < num_clusters && tried.size() < object_locations.size())
- {
- T o = (T)obj_array[(int)(rand.nextDouble() * obj_array.length)];
- tried.add(o);
- double[] mean_value = object_locations.get(o);
- boolean duplicate = false;
- for (double[] cur : centroids)
- {
- if (Arrays.equals(mean_value, cur))
- duplicate = true;
- }
- if (!duplicate)
- centroids.add(mean_value);
- }
-
- if (tried.size() >= object_locations.size())
- throw new NotEnoughClustersException();
-
- // put items in their initial clusters
- Map<double[], Map<T, double[]>> clusterMap = assignToClusters(object_locations, centroids);
-
- // keep reconstituting clusters until either
- // (a) membership is stable, or
- // (b) number of iterations passes max_iterations, or
- // (c) max movement of any centroid is <= convergence_threshold
- int iterations = 0;
- double max_movement = Double.POSITIVE_INFINITY;
- while (iterations++ < max_iterations && max_movement > convergence_threshold)
- {
- max_movement = 0;
- Set<double[]> new_centroids = new HashSet<double[]>();
- // calculate new mean for each cluster
- for (Map.Entry<double[], Map<T, double[]>> entry : clusterMap.entrySet())
- {
- double[] centroid = entry.getKey();
- Map<T, double[]> elements = entry.getValue();
- ArrayList<double[]> locations = new ArrayList<double[]>(elements.values());
-
- double[] mean = DiscreteDistribution.mean(locations);
- max_movement = Math.max(max_movement,
- Math.sqrt(DiscreteDistribution.squaredError(centroid, mean)));
- new_centroids.add(mean);
- }
-
- // TODO: check membership of clusters: have they changed?
-
- // regenerate cluster membership based on means
- clusterMap = assignToClusters(object_locations, new_centroids);
- }
- return clusterMap.values();
- }
-
- /**
- * Assigns each object to the cluster whose centroid is closest to the
- * object.
- * @param object_locations a map of objects to locations
- * @param centroids the centroids of the clusters to be formed
- * @return a map of objects to assigned clusters
- */
- protected Map<double[], Map<T, double[]>> assignToClusters(Map<T, double[]> object_locations, Set<double[]> centroids)
- {
- Map<double[], Map<T, double[]>> clusterMap = new HashMap<double[], Map<T, double[]>>();
- for (double[] centroid : centroids)
- clusterMap.put(centroid, new HashMap<T, double[]>());
-
- for (Map.Entry<T, double[]> object_location : object_locations.entrySet())
- {
- T object = object_location.getKey();
- double[] location = object_location.getValue();
-
- // find the cluster with the closest centroid
- Iterator<double[]> c_iter = centroids.iterator();
- double[] closest = c_iter.next();
- double distance = DiscreteDistribution.squaredError(location, closest);
-
- while (c_iter.hasNext())
- {
- double[] centroid = c_iter.next();
- double dist_cur = DiscreteDistribution.squaredError(location, centroid);
- if (dist_cur < distance)
- {
- distance = dist_cur;
- closest = centroid;
- }
- }
- clusterMap.get(closest).put(object, location);
- }
-
- return clusterMap;
- }
-
- /**
- * Sets the seed used by the internal random number generator.
- * Enables consistent outputs.
- */
- public void setSeed(int random_seed)
- {
- this.rand = new Random(random_seed);
- }
-
- /**
- * An exception that indicates that the specified data points cannot be
- * clustered into the number of clusters requested by the user.
- * This will happen if and only if there are fewer distinct points than
- * requested clusters. (If there are fewer total data points than
- * requested clusters, <code>IllegalArgumentException</code> will be thrown.)
- *
- * @author Joshua O'Madadhain
- */
- @SuppressWarnings("serial")
- public static class NotEnoughClustersException extends RuntimeException
- {
- @Override
- public String getMessage()
- {
- return "Not enough distinct points in the input data set to form " +
- "the requested number of clusters";
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-/*
- *
- * Created on Oct 29, 2003
- */
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.AbstractCollection;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Queue;
-import java.util.Vector;
-
-import org.apache.commons.collections15.IteratorUtils;
-
-/**
- * An array-based binary heap implementation of a priority queue,
- * which also provides
- * efficient <code>update()</code> and <code>contains</code> operations.
- * It contains extra infrastructure (a hash table) to keep track of the
- * position of each element in the array; thus, if the key value of an element
- * changes, it may be "resubmitted" to the heap via <code>update</code>
- * so that the heap can reposition it efficiently, as necessary.
- *
- * @author Joshua O'Madadhain
- */
-public class MapBinaryHeap<T>
- extends AbstractCollection<T>
- implements Queue<T>
-{
- private Vector<T> heap = new Vector<T>(); // holds the heap as an implicit binary tree
- private Map<T,Integer> object_indices = new HashMap<T,Integer>(); // maps each object in the heap to its index in the heap
- private Comparator<T> comp;
- private final static int TOP = 0; // the index of the top of the heap
-
- /**
- * Creates a <code>MapBinaryHeap</code> whose heap ordering
- * is based on the ordering of the elements specified by <code>c</code>.
- */
- public MapBinaryHeap(Comparator<T> comp)
- {
- initialize(comp);
- }
-
- /**
- * Creates a <code>MapBinaryHeap</code> whose heap ordering
- * will be based on the <i>natural ordering</i> of the elements,
- * which must be <code>Comparable</code>.
- */
- public MapBinaryHeap()
- {
- initialize(new ComparableComparator());
- }
-
- /**
- * Creates a <code>MapBinaryHeap</code> based on the specified
- * collection whose heap ordering
- * will be based on the <i>natural ordering</i> of the elements,
- * which must be <code>Comparable</code>.
- */
- public MapBinaryHeap(Collection<T> c)
- {
- this();
- addAll(c);
- }
-
- /**
- * Creates a <code>MapBinaryHeap</code> based on the specified collection
- * whose heap ordering
- * is based on the ordering of the elements specified by <code>c</code>.
- */
- public MapBinaryHeap(Collection<T> c, Comparator<T> comp)
- {
- this(comp);
- addAll(c);
- }
-
- private void initialize(Comparator<T> comp)
- {
- this.comp = comp;
- clear();
- }
-
- /**
- * @see Collection#clear()
- */
- @Override
- public void clear()
- {
- object_indices.clear();
- heap.clear();
- }
-
- /**
- * Inserts <code>o</code> into this collection.
- */
- @Override
- public boolean add(T o)
- {
- int i = heap.size(); // index 1 past the end of the heap
- heap.setSize(i+1);
- percolateUp(i, o);
- return true;
- }
-
- /**
- * Returns <code>true</code> if this collection contains no elements, and
- * <code>false</code> otherwise.
- */
- @Override
- public boolean isEmpty()
- {
- return heap.isEmpty();
- }
-
- /**
- * Returns the element at the top of the heap; does not
- * alter the heap.
- */
- public T peek()
- {
- if (heap.size() > 0)
- return heap.elementAt(TOP);
- else
- return null;
- }
-
- /**
- * Removes the element at the top of this heap, and returns it.
- * @deprecated Use {@link MapBinaryHeap#poll()}
- * or {@link MapBinaryHeap#remove()} instead.
- */
- @Deprecated
- public T pop() throws NoSuchElementException
- {
- return this.remove();
- }
-
- /**
- * Returns the size of this heap.
- */
- @Override
- public int size()
- {
- return heap.size();
- }
-
- /**
- * Informs the heap that this object's internal key value has been
- * updated, and that its place in the heap may need to be shifted
- * (up or down).
- * @param o
- */
- public void update(T o)
- {
- // Since we don't know whether the key value increased or
- // decreased, we just percolate up followed by percolating down;
- // one of the two will have no effect.
-
- int cur = object_indices.get(o).intValue(); // current index
- int new_idx = percolateUp(cur, o);
- percolateDown(new_idx);
- }
-
- /**
- * @see Collection#contains(java.lang.Object)
- */
- @Override
- public boolean contains(Object o)
- {
- return object_indices.containsKey(o);
- }
-
- /**
- * Moves the element at position <code>cur</code> closer to
- * the bottom of the heap, or returns if no further motion is
- * necessary. Calls itself recursively if further motion is
- * possible.
- */
- private void percolateDown(int cur)
- {
- int left = lChild(cur);
- int right = rChild(cur);
- int smallest;
-
- if ((left < heap.size()) &&
- (comp.compare(heap.elementAt(left), heap.elementAt(cur)) < 0)) {
- smallest = left;
- } else {
- smallest = cur;
- }
-
- if ((right < heap.size()) &&
- (comp.compare(heap.elementAt(right), heap.elementAt(smallest)) < 0)) {
- smallest = right;
- }
-
- if (cur != smallest)
- {
- swap(cur, smallest);
- percolateDown(smallest);
- }
- }
-
- /**
- * Moves the element <code>o</code> at position <code>cur</code>
- * as high as it can go in the heap. Returns the new position of the
- * element in the heap.
- */
- private int percolateUp(int cur, T o)
- {
- int i = cur;
-
- while ((i > TOP) && (comp.compare(heap.elementAt(parent(i)), o) > 0))
- {
- T parentElt = heap.elementAt(parent(i));
- heap.setElementAt(parentElt, i);
- object_indices.put(parentElt, new Integer(i)); // reset index to i (new location)
- i = parent(i);
- }
-
- // place object in heap at appropriate place
- object_indices.put(o, new Integer(i));
- heap.setElementAt(o, i);
-
- return i;
- }
-
- /**
- * Returns the index of the left child of the element at
- * index <code>i</code> of the heap.
- * @param i
- * @return the index of the left child of the element at
- * index <code>i</code> of the heap
- */
- private int lChild(int i)
- {
- return (i<<1) + 1;
- }
-
- /**
- * Returns the index of the right child of the element at
- * index <code>i</code> of the heap.
- * @param i
- * @return the index of the right child of the element at
- * index <code>i</code> of the heap
- */
- private int rChild(int i)
- {
- return (i<<1) + 2;
- }
-
- /**
- * Returns the index of the parent of the element at
- * index <code>i</code> of the heap.
- * @param i
- * @return the index of the parent of the element at index i of the heap
- */
- private int parent(int i)
- {
- return (i-1)>>1;
- }
-
- /**
- * Swaps the positions of the elements at indices <code>i</code>
- * and <code>j</code> of the heap.
- * @param i
- * @param j
- */
- private void swap(int i, int j)
- {
- T iElt = heap.elementAt(i);
- T jElt = heap.elementAt(j);
-
- heap.setElementAt(jElt, i);
- object_indices.put(jElt, new Integer(i));
-
- heap.setElementAt(iElt, j);
- object_indices.put(iElt, new Integer(j));
- }
-
- /**
- * Comparator used if none is specified in the constructor.
- * @author Joshua O'Madadhain
- */
- private class ComparableComparator implements Comparator<T>
- {
- /**
- * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
- */
- @SuppressWarnings("unchecked")
- public int compare(T arg0, T arg1)
- {
- if (!(arg0 instanceof Comparable) || !(arg1 instanceof Comparable))
- throw new IllegalArgumentException("Arguments must be Comparable");
-
- return ((Comparable<T>)arg0).compareTo(arg1);
- }
- }
-
- /**
- * Returns an <code>Iterator</code> that does not support modification
- * of the heap.
- */
- @Override
- public Iterator<T> iterator()
- {
- return IteratorUtils.<T>unmodifiableIterator(heap.iterator());
- }
-
- /**
- * This data structure does not support the removal of arbitrary elements.
- */
- @Override
- public boolean remove(Object o)
- {
- throw new UnsupportedOperationException();
- }
-
- /**
- * This data structure does not support the removal of arbitrary elements.
- */
- @Override
- public boolean removeAll(Collection<?> c)
- {
- throw new UnsupportedOperationException();
- }
-
- /**
- * This data structure does not support the removal of arbitrary elements.
- */
- @Override
- public boolean retainAll(Collection<?> c)
- {
- throw new UnsupportedOperationException();
- }
-
- public T element() throws NoSuchElementException
- {
- T top = this.peek();
- if (top == null)
- throw new NoSuchElementException();
- return top;
- }
-
- public boolean offer(T o)
- {
- return add(o);
- }
-
- public T poll()
- {
- T top = this.peek();
- if (top != null)
- {
- T bottom_elt = heap.lastElement();
- heap.setElementAt(bottom_elt, TOP);
- object_indices.put(bottom_elt, new Integer(TOP));
-
- heap.setSize(heap.size() - 1); // remove the last element
- if (heap.size() > 1)
- percolateDown(TOP);
-
- object_indices.remove(top);
- }
- return top;
- }
-
- public T remove()
- {
- T top = this.poll();
- if (top == null)
- throw new NoSuchElementException();
- return top;
- }
-
-}
+++ /dev/null
-/*
- * Created on Aug 5, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.Map;
-
-
-/**
- * A <code>SettableTransformer</code> that operates on an underlying <code>Map</code> instance.
- * Similar to <code>MapTransformer</code>.
- *
- * @author Joshua O'Madadhain
- */
-public class MapSettableTransformer<I, O> implements SettableTransformer<I, O>
-{
- protected Map<I,O> map;
-
- /**
- * Creates an instance based on <code>m</code>.
- */
- public MapSettableTransformer(Map<I,O> m)
- {
- this.map = m;
- }
-
- public O transform(I input)
- {
- return map.get(input);
- }
-
- public void set(I input, O output)
- {
- map.put(input, output);
- }
-}
+++ /dev/null
-package edu.uci.ics.jung.algorithms.util;
-
-import org.apache.commons.collections15.Predicate;
-
-import edu.uci.ics.jung.graph.Graph;
-import edu.uci.ics.jung.graph.util.Context;
-import edu.uci.ics.jung.graph.util.Pair;
-
-/**
- * A <code>Predicate</code> that returns <code>true</code> if the input edge's
- * endpoints in the input graph are identical. (Thus, an edge which connects
- * its sole incident vertex to itself).
- *
- * @param <V>
- * @param <E>
- */
-public class SelfLoopEdgePredicate<V,E> implements Predicate<Context<Graph<V,E>,E>> {
-
- public boolean evaluate(Context<Graph<V,E>,E> context) {
- Pair<V> endpoints = context.graph.getEndpoints(context.element);
- return endpoints.getFirst().equals(endpoints.getSecond());
- }
-}
+++ /dev/null
-/*
- * Created on Aug 5, 2007
- *
- * Copyright (c) 2007, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- */
-package edu.uci.ics.jung.algorithms.util;
-
-import org.apache.commons.collections15.Transformer;
-
-/**
- * An interface for classes that can set the value to be returned (from <code>transform()</code>)
- * when invoked on a given input.
- *
- * @author Joshua O'Madadhain
- */
-public interface SettableTransformer<I, O> extends Transformer<I, O>
-{
- /**
- * Sets the value (<code>output</code>) to be returned by a call to
- * <code>transform(input)</code>).
- * @param input
- * @param output
- */
- public void set(I input, O output);
-}
+++ /dev/null
-/**
- * Copyright (c) 2009, the JUNG Project and the Regents of the University
- * of California
- * All rights reserved.
- *
- * This software is open-source under the BSD license; see either
- * "license.txt" or
- * http://jung.sourceforge.net/license.txt for a description.
- * Created on Jan 8, 2009
- *
- */
-package edu.uci.ics.jung.algorithms.util;
-
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Random;
-
-/**
- * Selects items according to their probability in an arbitrary probability
- * distribution. The distribution is specified by a {@code Map} from
- * items (of type {@code T}) to weights of type {@code Number}, supplied
- * to the constructor; these weights are normalized internally to act as
- * probabilities.
- *
- * <p>This implementation selects items in O(1) time, and requires O(n) space.
- *
- * @author Joshua O'Madadhain
- */
-public class WeightedChoice<T>
-{
- private List<ItemPair> item_pairs;
- private Random random;
-
- /**
- * The default minimum value that is treated as a valid probability
- * (as opposed to rounding error from floating-point operations).
- */
- public static final double DEFAULT_THRESHOLD = 0.00000000001;
-
- /**
- * Equivalent to {@code this(item_weights, new Random(), DEFAULT_THRESHOLD)}.
- * @param item_weights
- */
- public WeightedChoice(Map<T, ? extends Number> item_weights)
- {
- this(item_weights, new Random(), DEFAULT_THRESHOLD);
- }
-
- /**
- * Equivalent to {@code this(item_weights, new Random(), threshold)}.
- */
- public WeightedChoice(Map<T, ? extends Number> item_weights, double threshold)
- {
- this(item_weights, new Random(), threshold);
- }
-
- /**
- * Equivalent to {@code this(item_weights, random, DEFAULT_THRESHOLD)}.
- */
- public WeightedChoice(Map<T, ? extends Number> item_weights, Random random)
- {
- this(item_weights, random, DEFAULT_THRESHOLD);
- }
-
- /**
- * Creates an instance with the specified mapping from items to weights,
- * random number generator, and threshold value.
- *
- * <p>The mapping defines the weight for each item to be selected; this
- * will be proportional to the probability of its selection.
- * <p>The random number generator specifies the mechanism which will be
- * used to provide uniform integer and double values.
- * <p>The threshold indicates default minimum value that is treated as a valid
- * probability (as opposed to rounding error from floating-point operations).
- */
- public WeightedChoice(Map<T, ? extends Number> item_weights, Random random,
- double threshold)
- {
- if (item_weights.isEmpty())
- throw new IllegalArgumentException("Item weights must be non-empty");
-
- int item_count = item_weights.size();
- item_pairs = new ArrayList<ItemPair>(item_count);
-
- double sum = 0;
- for (Map.Entry<T, ? extends Number> entry : item_weights.entrySet())
- {
- double value = entry.getValue().doubleValue();
- if (value <= 0)
- throw new IllegalArgumentException("Weights must be > 0");
- sum += value;
- }
- double bucket_weight = 1.0 / item_weights.size();
-
- Queue<ItemPair> light_weights = new LinkedList<ItemPair>();
- Queue<ItemPair> heavy_weights = new LinkedList<ItemPair>();
- for (Map.Entry<T, ? extends Number> entry : item_weights.entrySet())
- {
- double value = entry.getValue().doubleValue() / sum;
- enqueueItem(entry.getKey(), value, bucket_weight, light_weights, heavy_weights);
- }
-
- // repeat until both queues empty
- while (!heavy_weights.isEmpty() || !light_weights.isEmpty())
- {
- ItemPair heavy_item = heavy_weights.poll();
- ItemPair light_item = light_weights.poll();
- double light_weight = 0;
- T light = null;
- T heavy = null;
- if (light_item != null)
- {
- light_weight = light_item.weight;
- light = light_item.light;
- }
- if (heavy_item != null)
- {
- heavy = heavy_item.heavy;
- // put the 'left over' weight from the heavy item--what wasn't
- // needed to make up the difference between the light weight and
- // 1/n--back in the appropriate queue
- double new_weight = heavy_item.weight - (bucket_weight - light_weight);
- if (new_weight > threshold)
- enqueueItem(heavy, new_weight, bucket_weight, light_weights, heavy_weights);
- }
- light_weight *= item_count;
-
- item_pairs.add(new ItemPair(light, heavy, light_weight));
- }
-
- this.random = random;
- }
-
- /**
- * Adds key/value to the appropriate queue. Keys with values less than
- * the threshold get added to {@code light_weights}, all others get added
- * to {@code heavy_weights}.
- */
- private void enqueueItem(T key, double value, double threshold,
- Queue<ItemPair> light_weights, Queue<ItemPair> heavy_weights)
- {
- if (value < threshold)
- light_weights.offer(new ItemPair(key, null, value));
- else
- heavy_weights.offer(new ItemPair(null, key, value));
- }
-
- /**
- * Sets the seed used by the internal random number generator.
- */
- public void setRandomSeed(long seed)
- {
- this.random.setSeed(seed);
- }
-
- /**
- * Retrieves an item with probability proportional to its weight in the
- * {@code Map} provided in the input.
- */
- public T nextItem()
- {
- ItemPair item_pair = item_pairs.get(random.nextInt(item_pairs.size()));
- if (random.nextDouble() < item_pair.weight)
- return item_pair.light;
- return item_pair.heavy;
- }
-
- /**
- * Manages light object/heavy object/light conditional probability tuples.
- */
- private class ItemPair
- {
- T light;
- T heavy;
- double weight;
-
- private ItemPair(T light, T heavy, double weight)
- {
- this.light = light;
- this.heavy = heavy;
- this.weight = weight;
- }
-
- @Override
- public String toString()
- {
- return String.format("[L:%s, H:%s, %.3f]", light, heavy, weight);
- }
- }
-}
+++ /dev/null
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
-<html>
-<head>
-<!--
-
- @(#)package.html
-
- Copyright © 2003 The Regents of the University of California. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies. This software program and documentation are copyrighted by The Regents of the University of California ("The University of California").
-
-THE SOFTWARE PROGRAM AND DOCUMENTATION ARE SUPPLIED "AS IS," WITHOUT ANY ACCOMPANYING SERVICES FROM THE UNIVERSITY OF CALFORNIA. FURTHERMORE, THE UNIVERSITY OF CALIFORNIA DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE. THE END-USER UNDERSTANDS THAT THE PROGRAM WAS DEVELOPED FOR RESEARCH PURPOSES AND IS ADVISED NOT TO RELY EXCLUSIVELY ON THE PROGRAM FOR ANY REASON.
-
-IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--->
-</head>
-<body>
-
-Provides general algorithmic utilities. These include:
-<ul>
-<li/><code>DiscreteDistribution</code>: calculates statistical measures on
-discrete probability distributions represented as <code>double</code> arrays
-<li/><code>KMeansClusterer</code>: uses the k-means algorithm to cluster
-points in d-dimensional space into k clusters
-<li/><code>MapBinaryHeap</code>: a binary heap implementation that permits
-efficient element access and update operations
-<li/><code>RandomLocationTransformer</code>: a class that randomly assigns
-2D coordinates to items (default initializer for iterative Layouts)
-<li/><code>SettableTransformer</code>: an extension of <code>Transformer</code>
-that allows mutation of the transformation
-</ul>
-
-</body>
-</html>
+++ /dev/null
-Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior
-University
-
-We are making the OpenFlow specification and associated documentation
-(Software) available for public use and benefit with the expectation that
-others will use, modify and enhance the Software and contribute those
-enhancements back to the community. However, since we would like to make the
-Software available for broadest use, with as few restrictions as possible
-permission is hereby granted, free of charge, to any person obtaining a copy of
-this Software to deal in the Software under the copyrights without restriction,
-including without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to permit
-persons to whom the Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-The name and trademarks of copyright holder(s) may NOT be used in advertising
-or publicity pertaining to the Software or any derivatives without specific,
-written prior permission.
+++ /dev/null
-# Because I am old and crotchety and my fingers can't stop from running
-# `make` commands
-all:
- ant
-
-run:
- ant run
-
-doc:
- ant javadoc
-
-tests:
- ant tests
-
-count:
- @find . -name \*.java | xargs wc -l | sort -n
-
-clean:
- ant clean
+++ /dev/null
-OpenFlow Java - v1.0.0
-
-A Java implementation of low-level OpenFlow packet marshalling/unmarshalling
-and IO operations. Implements v1.0 of the OpenFlow specification at
-http://www.openflow.org.
-
- - David Erickson (daviderickson@cs.stanford.edu)
- - Rob Sherwood (rob.sherwood@stanford.edu)
-
-Building requires Maven 2.x+ (http://maven.apache.org/).
-
-To build:
- mvn package
-
-To build javadocs:
- mvn javadoc:javadoc
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<profiles version="11">
-<profile kind="CodeFormatterProfile" name="OpenFlowJ" version="11">
-<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_field" value="0"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_ellipsis" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_multiple_fields" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_conditional_expression" value="80"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_package" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.continuation_indentation" value="2"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_package" value="0"/>
-<setting id="org.eclipse.jdt.core.compiler.source" value="1.5"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.format_line_comments" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.join_wrapped_lines" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.align_type_members_on_columns" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_member_type" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.indent_parameter_description" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.lineSplit" value="80"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration" value="0"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indentation.size" value="4"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_assignment" value="0"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.tabulation.char" value="space"/>
-<setting id="org.eclipse.jdt.core.compiler.problem.assertIdentifier" value="error"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_body" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_method" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_switch" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch" value="true"/>
-<setting id="org.eclipse.jdt.core.compiler.problem.enumIdentifier" value="error"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_ellipsis" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_method_declaration" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.compact_else_if" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.indent_root_tags" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_constant" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.tabulation.size" value="4"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_empty_lines" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block_in_case" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter" value="insert"/>
-<setting id="org.eclipse.jdt.core.compiler.compliance" value="1.5"/>
-<setting id="org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_binary_expression" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode" value="enabled"/>
-<setting id="org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.format_javadoc_comments" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.line_length" value="80"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_import_groups" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body" value="0"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.wrap_before_binary_operator" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_block" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.join_lines_in_comments" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_if" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_imports" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.format_html" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.format_source_code" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration" value="16"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="insert"/>
-<setting id="org.eclipse.jdt.core.compiler.codegen.targetPlatform" value="1.5"/>
-<setting id="org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.format_header" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.comment.format_block_comments" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.alignment_for_enum_constants" value="0"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.brace_position_for_type_declaration" value="end_of_line"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_imports" value="1"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header" value="true"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
-<setting id="org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments" value="do not insert"/>
-<setting id="org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
-<setting id="org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column" value="false"/>
-</profile>
-</profiles>
+++ /dev/null
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>org.openflow.openflowj</artifactId>
- <version>1.1.0-SNAPSHOT</version>
- <name>OpenFlow Java</name>
- <description>A Java implemention of the OpenFlow v1.0 protocol</description>
-
- <!-- Get some common settings for the project we are using it in -->
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../commons/thirdparty</relativePath>
- </parent>
-
- <developers>
- <developer>
- <name>David Erickson</name>
- <email>daviderickson@cs.stanford.edu</email>
- </developer>
- <developer>
- <name>Rob Sherwood</name>
- <email>rob.sherwood@stanford.edu</email>
- </developer>
- </developers>
- <packaging>bundle</packaging>
- <url>http://www.openflow.org</url>
- <licenses>
- <license>
- <name>The OpenFlow License</name>
- <url>http://www.openflowswitch.org/wp/legal/</url>
- <distribution>repo</distribution>
- </license>
- </licenses>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
- <properties>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- </properties>
- <!-- For GPG release signing, use mvn release:perform -->
- <profiles>
- <profile>
- <id>release-sign-artifacts</id>
- <activation>
- <!-- don't activate it -->
- <!-- <property> -->
- <!-- <name>performRelease</name> -->
- <!-- <value>true</value> -->
- <!-- </property> -->
- </activation>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-gpg-plugin</artifactId>
- <version>1.1</version>
- <executions>
- <execution>
- <id>sign-artifacts</id>
- <phase>verify</phase>
- <goals>
- <goal>sign</goal>
- </goals>
- </execution>
- </executions>
- <configuration>
- <mavenExecutorId>forked-path</mavenExecutorId>
- </configuration>
- </plugin>
- </plugins>
- </build>
- </profile>
- </profiles>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.3.6</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>
- org.openflow.example;version="1.0.1";
- uses:="org.openflow.example.cli,
- org.openflow.protocol,
- org.openflow.io,
- org.openflow.protocol.factory",
- org.openflow.io;version="1.0.1";
- uses:="org.openflow.protocol,
- org.openflow.protocol.factory",
- org.openflow.protocol;version="1.0.1";
- uses:="org.openflow.protocol.statistics,
- org.openflow.protocol,
- org.openflow.protocol.factory",
- org.openflow.protocol.action;version="1.0.1";
- uses:="org.openflow.protocol",
- org.openflow.protocol.factory;version="1.0.1";
- uses:="org.openflow.protocol.statistics,
- org.openflow.protocol,
- org.openflow.protocol.action,
- org.openflow.protocol.queue",
- org.openflow.protocol.queue;version="1.0.2";
- uses:="org.openflow.protocol,
- org.openflow.protocol.factory",
- org.openflow.protocol.statistics;version="1.0.1";
- uses:="org.openflow.protocol,
- org.openflow.protocol.factory",
- org.openflow.util;version="1.0.1"
- </Export-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <version>2.5.1</version>
- <configuration>
- <source>1.7</source>
- <target>1.7</target>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <dependencies>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <version>4.8.1</version>
- <scope>test</scope>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/**
- *
- */
-package org.openflow.example;
-
-import java.io.IOException;
-import java.nio.channels.SelectionKey;
-
-/**
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public interface SelectListener {
- /**
- * Tell the select listener that an event took place on the passed object
- * @param key the key used on the select
- * @param arg some parameter passed by the caller when registering
- * @throws IOException
- */
- void handleEvent(SelectionKey key, Object arg) throws IOException;
-}
+++ /dev/null
-package org.openflow.example;
-
-import java.io.IOException;
-import java.nio.channels.CancelledKeyException;
-import java.nio.channels.ClosedChannelException;
-import java.nio.channels.SelectableChannel;
-import java.nio.channels.SelectionKey;
-import java.nio.channels.Selector;
-import java.nio.channels.spi.SelectorProvider;
-import java.util.Iterator;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-
-/***
- * Dirt simple SelectLoop for simple java controller
- */
-
-
-public class SelectLoop {
- protected SelectListener callback;
- protected boolean dontStop;
- protected Object registrationLock;
- protected int registrationRequests = 0;
- protected Queue<Object[]> registrationQueue;
- protected Selector selector;
- protected long timeout;
-
- public SelectLoop(SelectListener cb) throws IOException {
- callback = cb;
- dontStop = true;
- selector = SelectorProvider.provider().openSelector();
- registrationLock = new Object();
- registrationQueue = new ConcurrentLinkedQueue<Object[]>();
- timeout = 0;
- }
-
- /**
- * Initializes this SelectLoop
- * @param cb the callback to call when select returns
- * @param timeout the timeout value in milliseconds that select will be
- * called with
- * @throws IOException
- */
- public SelectLoop(SelectListener cb, long timeout) throws IOException {
- callback = cb;
- dontStop = true;
- selector = SelectorProvider.provider().openSelector();
- registrationLock = new Object();
- registrationQueue = new ConcurrentLinkedQueue<Object[]>();
- this.timeout = timeout;
- }
-
- public void register(SelectableChannel ch, int ops, Object arg)
- throws ClosedChannelException {
- registrationQueue.add(new Object[] {ch, ops, arg});
- }
-
- /**
- * Registers the supplied SelectableChannel with this SelectLoop. Note this
- * method blocks until registration proceeds. It is advised that
- * SelectLoop is intialized with a timeout value when using this method.
- * @param ch the channel
- * @param ops interest ops
- * @param arg argument that will be returned with the SelectListener
- * @return
- * @throws ClosedChannelException
- */
- public synchronized SelectionKey registerBlocking(SelectableChannel ch, int ops, Object arg)
- throws ClosedChannelException {
- synchronized (registrationLock) {
- registrationRequests++;
- }
- selector.wakeup();
- SelectionKey key = ch.register(selector, ops, arg);
- synchronized (registrationLock) {
- registrationRequests--;
- registrationLock.notifyAll();
- }
- return key;
- }
-
- /****
- * Main top-level IO loop this dispatches all IO events and timer events
- * together I believe this is fairly efficient
- */
- public void doLoop() throws IOException {
- int nEvents;
- processRegistrationQueue();
-
- while (dontStop) {
- nEvents = selector.select(timeout);
- if (nEvents > 0) {
- for (Iterator<SelectionKey> i = selector.selectedKeys()
- .iterator(); i.hasNext();) {
- SelectionKey sk = i.next();
- i.remove();
-
- if (!sk.isValid())
- continue;
-
- Object arg = sk.attachment();
- callback.handleEvent(sk, arg);
- }
- }
-
- if (this.registrationQueue.size() > 0)
- processRegistrationQueue();
-
- if (registrationRequests > 0) {
- synchronized (registrationLock) {
- while (registrationRequests > 0) {
- try {
- registrationLock.wait();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
- }
- }
- }
- }
-
- protected void processRegistrationQueue() {
- // add any elements in queue
- for (Iterator<Object[]> it = registrationQueue.iterator(); it.hasNext();) {
- Object[] args = it.next();
- SelectableChannel ch = (SelectableChannel) args[0];
- try {
- ch.register(selector, (Integer) args[1], args[2]);
- } catch (CancelledKeyException cke) {
- continue;
- } catch (ClosedChannelException e) {
- }
- it.remove();
- }
- }
-
- /**
- * Force this select loop to return immediately and re-enter select, useful
- * for example if a new item has been added to the select loop while it
- * was already blocked.
- */
- public void wakeup() {
- if (selector != null) {
- selector.wakeup();
- }
- }
-
- /**
- * Shuts down this select loop, may return before it has fully shutdown
- */
- public void shutdown() {
- this.dontStop = false;
- wakeup();
- }
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.example;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.nio.channels.SelectionKey;
-import java.nio.channels.ServerSocketChannel;
-import java.nio.channels.SocketChannel;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-import org.openflow.example.cli.Options;
-import org.openflow.example.cli.ParseException;
-import org.openflow.example.cli.SimpleCLI;
-import org.openflow.io.OFMessageAsyncStream;
-import org.openflow.protocol.OFEchoReply;
-import org.openflow.protocol.OFFlowMod;
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFPacketIn;
-import org.openflow.protocol.OFPacketOut;
-import org.openflow.protocol.OFPort;
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionOutput;
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.util.LRULinkedHashMap;
-import org.openflow.util.U16;
-
-/**
- * @author Rob Sherwood (rob.sherwood@stanford.edu), David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public class SimpleController implements SelectListener {
- protected ExecutorService es;
- protected BasicFactory factory;
- protected SelectLoop listenSelectLoop;
- protected ServerSocketChannel listenSock;
- protected List<SelectLoop> switchSelectLoops;
- protected Map<SocketChannel,OFSwitch> switchSockets;
- protected Integer threadCount;
- protected int port;
-
- protected class OFSwitch {
- protected SocketChannel sock;
- protected OFMessageAsyncStream stream;
- protected Map<Integer, Short> macTable =
- new LRULinkedHashMap<Integer, Short>(64001, 64000);
-
- public OFSwitch(SocketChannel sock, OFMessageAsyncStream stream) {
- this.sock = sock;
- this.stream = stream;
- }
-
- public void handlePacketIn(OFPacketIn pi) {
- // Build the Match
- OFMatch match = new OFMatch();
- match.loadFromPacket(pi.getPacketData(), pi.getInPort());
- byte[] dlDst = match.getDataLayerDestination();
- Integer dlDstKey = Arrays.hashCode(dlDst);
- byte[] dlSrc = match.getDataLayerSource();
- Integer dlSrcKey = Arrays.hashCode(dlSrc);
- int bufferId = pi.getBufferId();
-
- // if the src is not multicast, learn it
- if ((dlSrc[0] & 0x1) == 0) {
- if (!macTable.containsKey(dlSrcKey) ||
- !macTable.get(dlSrcKey).equals(pi.getInPort())) {
- macTable.put(dlSrcKey, pi.getInPort());
- }
- }
-
- Short outPort = null;
- // if the destination is not multicast, look it up
- if ((dlDst[0] & 0x1) == 0) {
- outPort = macTable.get(dlDstKey);
- }
-
- // push a flow mod if we know where the packet should be going
- if (outPort != null) {
- OFFlowMod fm = (OFFlowMod) factory.getMessage(OFType.FLOW_MOD);
- fm.setBufferId(bufferId);
- fm.setCommand((short) 0);
- fm.setCookie(0);
- fm.setFlags((short) 0);
- fm.setHardTimeout((short) 0);
- fm.setIdleTimeout((short) 5);
- match.setInputPort(pi.getInPort());
- match.setWildcards(0);
- fm.setMatch(match);
- fm.setOutPort((short) OFPort.OFPP_NONE.getValue());
- fm.setPriority((short) 0);
- OFActionOutput action = new OFActionOutput();
- action.setMaxLength((short) 0);
- action.setPort(outPort);
- List<OFAction> actions = new ArrayList<OFAction>();
- actions.add(action);
- fm.setActions(actions);
- fm.setLength(U16.t(OFFlowMod.MINIMUM_LENGTH+OFActionOutput.MINIMUM_LENGTH));
- try {
- stream.write(fm);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- // Send a packet out
- if (outPort == null || pi.getBufferId() == 0xffffffff) {
- OFPacketOut po = new OFPacketOut();
- po.setBufferId(bufferId);
- po.setInPort(pi.getInPort());
-
- // set actions
- OFActionOutput action = new OFActionOutput();
- action.setMaxLength((short) 0);
- action.setPort((short) ((outPort == null) ? OFPort.OFPP_FLOOD
- .getValue() : outPort));
- List<OFAction> actions = new ArrayList<OFAction>();
- actions.add(action);
- po.setActions(actions);
- po.setActionsLength((short) OFActionOutput.MINIMUM_LENGTH);
-
- // set data if needed
- if (bufferId == 0xffffffff) {
- byte[] packetData = pi.getPacketData();
- po.setLength(U16.t(OFPacketOut.MINIMUM_LENGTH
- + po.getActionsLength() + packetData.length));
- po.setPacketData(packetData);
- } else {
- po.setLength(U16.t(OFPacketOut.MINIMUM_LENGTH
- + po.getActionsLength()));
- }
- try {
- stream.write(po);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- }
-
- public String toString() {
- InetAddress remote = sock.socket().getInetAddress();
- return remote.getHostAddress() + ":" + sock.socket().getPort();
- }
-
- public OFMessageAsyncStream getStream() {
- return stream;
- }
- }
-
- public SimpleController(int port) throws IOException{
- listenSock = ServerSocketChannel.open();
- listenSock.configureBlocking(false);
- listenSock.socket().bind(new java.net.InetSocketAddress(port));
- listenSock.socket().setReuseAddress(true);
- this.port = port;
- switchSelectLoops = new ArrayList<SelectLoop>();
- switchSockets = new ConcurrentHashMap<SocketChannel,OFSwitch>();
- threadCount = 1;
- listenSelectLoop = new SelectLoop(this);
- // register this connection for accepting
- listenSelectLoop.register(listenSock, SelectionKey.OP_ACCEPT, listenSock);
-
- this.factory = new BasicFactory();
- }
-
- @Override
- public void handleEvent(SelectionKey key, Object arg) throws IOException {
- if (arg instanceof ServerSocketChannel)
- handleListenEvent(key, (ServerSocketChannel)arg);
- else
- handleSwitchEvent(key, (SocketChannel) arg);
- }
-
- protected void handleListenEvent(SelectionKey key, ServerSocketChannel ssc)
- throws IOException {
- SocketChannel sock = listenSock.accept();
- OFMessageAsyncStream stream = new OFMessageAsyncStream(sock, factory);
- switchSockets.put(sock, new OFSwitch(sock, stream));
- System.err
- .println("Got new connection from " + switchSockets.get(sock));
- List<OFMessage> l = new ArrayList<OFMessage>();
- l.add(factory.getMessage(OFType.HELLO));
- l.add(factory.getMessage(OFType.FEATURES_REQUEST));
- stream.write(l);
-
- int ops = SelectionKey.OP_READ;
- if (stream.needsFlush())
- ops |= SelectionKey.OP_WRITE;
-
- // hash this switch into a thread
- SelectLoop sl = switchSelectLoops.get(sock.hashCode()
- % switchSelectLoops.size());
- sl.register(sock, ops, sock);
- // force select to return and re-enter using the new set of keys
- sl.wakeup();
- }
-
- protected void handleSwitchEvent(SelectionKey key, SocketChannel sock) {
- OFSwitch sw = switchSockets.get(sock);
- OFMessageAsyncStream stream = sw.getStream();
- try {
- if (key.isReadable()) {
- List<OFMessage> msgs = stream.read();
- if (msgs == null) {
- key.cancel();
- switchSockets.remove(sock);
- return;
- }
-
- for (OFMessage m : msgs) {
- switch (m.getType()) {
- case PACKET_IN:
- sw.handlePacketIn((OFPacketIn) m);
- break;
- case HELLO:
- System.err.println("GOT HELLO from " + sw);
- break;
- case ECHO_REQUEST:
- OFEchoReply reply = (OFEchoReply) stream
- .getMessageFactory().getMessage(
- OFType.ECHO_REPLY);
- reply.setXid(m.getXid());
- stream.write(reply);
- break;
- default:
- System.err.println("Unhandled OF message: "
- + m.getType() + " from "
- + sock.socket().getInetAddress());
- }
- }
- }
- if (key.isWritable()) {
- stream.flush();
- }
-
- /**
- * Only register for interest in R OR W, not both, causes stream
- * deadlock after some period of time
- */
- if (stream.needsFlush())
- key.interestOps(SelectionKey.OP_WRITE);
- else
- key.interestOps(SelectionKey.OP_READ);
- } catch (IOException e) {
- // if we have an exception, disconnect the switch
- key.cancel();
- switchSockets.remove(sock);
- }
- }
-
- public void run() throws IOException{
- System.err.println("Starting " + this.getClass().getCanonicalName() +
- " on port " + this.port + " with " + this.threadCount + " threads");
- // Static number of threads equal to processor cores
- es = Executors.newFixedThreadPool(threadCount);
-
- // Launch one select loop per threadCount and start running
- for (int i = 0; i < threadCount; ++i) {
- final SelectLoop sl = new SelectLoop(this);
- switchSelectLoops.add(sl);
- es.execute(new Runnable() {
- @Override
- public void run() {
- try {
- sl.doLoop();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }}
- );
- }
-
- // Start the listen loop
- listenSelectLoop.doLoop();
- }
-
- public static void main(String [] args) throws IOException {
- SimpleCLI cmd = parseArgs(args);
- int port = Integer.valueOf(cmd.getOptionValue("p"));
- SimpleController sc = new SimpleController(port);
- sc.threadCount = Integer.valueOf(cmd.getOptionValue("t"));
- sc.run();
- }
-
- public static SimpleCLI parseArgs(String[] args) {
- Options options = new Options();
- options.addOption("h", "help", "print help");
- // unused?
- // options.addOption("n", true, "the number of packets to send");
- options.addOption("p", "port", 6633, "the port to listen on");
- options.addOption("t", "threads", 1, "the number of threads to run");
- try {
- SimpleCLI cmd = SimpleCLI.parse(options, args);
- if (cmd.hasOption("h")) {
- printUsage(options);
- System.exit(0);
- }
- return cmd;
- } catch (ParseException e) {
- System.err.println(e);
- printUsage(options);
- }
-
- System.exit(-1);
- return null;
- }
-
- public static void printUsage(Options options) {
- SimpleCLI.printHelp("Usage: "
- + SimpleController.class.getCanonicalName() + " [options]",
- options);
- }
-}
+++ /dev/null
-package org.openflow.example.cli;
-
-public class Option {
- String shortOpt;
- String longOpt;
- Object defaultVal;
- String val; // current value of this option, string form
- boolean specified; // was this option found in the cmdline?
- String comment;
-
- /**
- * Option information storrage
- *
- * @param shortOpt
- * Short name for the option, e.g., "-p"
- * @param longOpt
- * Long name for option, e.g., "--port"
- * @param defaultVal
- * default value: "6633" or null if no default value
- * @param comment
- * String to print to explain this option, e.g., a help message
- */
- public Option(String shortOpt, String longOpt, Object defaultVal,
- String comment) {
- super();
- this.shortOpt = shortOpt;
- this.longOpt = longOpt;
- this.defaultVal = defaultVal;
- this.comment = comment;
- this.specified = false;
- }
-
- public Option(String shortOpt, String longOpt, String comment) {
- this(shortOpt, longOpt, null, comment);
- }
-
- public boolean needsArg() {
- return this.defaultVal != null;
- }
-}
+++ /dev/null
-package org.openflow.example.cli;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Very basic CLI options listing
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-
-public class Options {
- Map<String, Option> shortOptionsMap;
- Map<String, Option> longOptionsMap;
-
- public Options() {
- this.shortOptionsMap = new HashMap<String, Option>();
- this.longOptionsMap = new HashMap<String, Option>();
- }
-
- public static Options make(Option opts[]) {
- Options options = new Options();
- for (int i = 0; i < opts.length; i++)
- options.addOption(opts[i]);
- return options;
- }
-
- private void addOption(Option option) {
- if (option.shortOpt != null)
- this.shortOptionsMap.put(option.shortOpt, option);
- if (option.longOpt != null)
- this.longOptionsMap.put(option.longOpt, option);
- }
-
- protected void addOption(String shortName, String longName, Object o,
- String comment) {
- Option option = new Option(shortName, longName, o, comment);
- addOption(option);
- }
-
- public void addOption(String shortName, String longName, boolean b,
- String comment) {
- this.addOption(shortName, longName, Boolean.valueOf(b), comment);
- }
-
- public void addOption(String shortName, String longName, int i,
- String comment) {
- this.addOption(shortName, longName, Integer.valueOf(i), comment);
- }
-
- public Option getOption(String shortName) {
- return this.shortOptionsMap.get(shortName);
- }
-
- public Option getOptionByLongName(String longName) {
- return this.longOptionsMap.get(longName);
- }
-
- public Collection<Option> getOptions() {
- return this.shortOptionsMap.values();
- }
-
- public void addOption(String shortName, String longName, String comment) {
- this.addOption(shortName, longName, null, comment);
- }
-
-}
+++ /dev/null
-package org.openflow.example.cli;
-
-public class ParseException extends Exception {
-
- public ParseException(String msg) {
- super(msg);
- }
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
-
-}
+++ /dev/null
-package org.openflow.example.cli;
-
-import java.io.PrintStream;
-
-/**
- * Very basic command line interface
- *
- * (really should be something in java.* for this; only implementing this to
- * remove external dependencies)
- *
- * Modeled after org.apache.common.cli .
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-
-public class SimpleCLI {
-
- private static final String NAME_WIDTH = "-15";
- private static final String VALUE_WIDTH = "-20";
- private static final String FORMAT_STRING = "%1$" + NAME_WIDTH + "s%2$"
- + VALUE_WIDTH + "s%3$s\n";
- Options options;
-
- int optind;
-
- /**
- * Need to use SimpleCLI.parse() instead
- *
- * @param options
- */
-
- private SimpleCLI(Options options) {
- this.options = options;
- }
-
- /**
- * @return the index of the last parsed option
- *
- * Useful for finding options that don't start with "-" or "--"
- */
- public int getOptind() {
- return optind;
- }
-
- /**
- * @param optind
- * the optind to set
- */
- public void setOptind(int optind) {
- this.optind = optind;
- }
-
- public boolean hasOption(String shortName) {
- Option option = this.options.getOption(shortName);
- if (option == null)
- return false;
- return option.specified;
- }
-
- public String getOptionValue(String shortName) {
- Option option = this.options.getOption(shortName);
- if (option == null)
- return null;
- if (!option.specified)
- return option.defaultVal.toString();
- else
- return option.val;
- }
-
- public static SimpleCLI parse(Options options, String[] args)
- throws ParseException {
- SimpleCLI simpleCLI = new SimpleCLI(options);
- int i;
- for (i = 0; i < args.length; i++) {
- if (!args[i].startsWith("-"))
- break; // not a short or long option
- String optName = args[i].replaceFirst("^-*", ""); // remove leading
- // "--"
- Option option;
- if (args[i].startsWith("--"))
- option = options.getOptionByLongName(optName);
- else
- option = options.getOption(optName);
- if (option == null)
- throw new ParseException("unknown option: " + optName);
- option.specified = true;
- if (option.needsArg()) {
- if ((i + 1) >= args.length)
- throw new ParseException("option " + optName
- + " requires an argument:: " + option.comment);
- option.val = args[i + 1];
- i++; // skip next element; we've parsed it
- }
- }
- simpleCLI.setOptind(i);
- return simpleCLI;
- }
-
- public static void printHelp(String canonicalName, Options options) {
- printHelp(canonicalName, options, System.err);
- }
-
- private static void printHelp(String helpString, Options options,
- PrintStream err) {
- err.println(helpString);
- err.format(FORMAT_STRING, "\toption", "type [default]", "usage");
- for (Option option : options.getOptions()) {
- String msg = "\t";
- if (option.shortOpt != null)
- msg += "-" + option.shortOpt;
- if (option.longOpt != null) {
- if (!msg.equals("\t"))
- msg += "|";
- msg += "--" + option.longOpt;
- }
- String val = "";
- if (option.defaultVal != null)
- val += option.defaultVal.getClass().getSimpleName() + " ["
- + option.defaultVal.toString() + "]";
- String comment;
- if (option.comment != null)
- comment = option.comment;
- else
- comment = "";
-
- err.format(FORMAT_STRING, msg, val, comment);
- }
- err.println(""); // print blank line at the end, to look pretty
- }
-
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.io;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.SocketChannel;
-import java.util.List;
-
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.factory.OFMessageFactory;
-
-/**
- * Asynchronous OpenFlow message marshalling and unmarshalling stream wrapped
- * around an NIO SocketChannel
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- * @author David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public class OFMessageAsyncStream implements OFMessageInStream,
- OFMessageOutStream {
- static public int defaultBufferSize = 1048576; // 1MB
-
- protected ByteBuffer inBuf, outBuf;
- protected OFMessageFactory messageFactory;
- protected SocketChannel sock;
- protected int partialReadCount = 0;
-
- public OFMessageAsyncStream(SocketChannel sock,
- OFMessageFactory messageFactory) throws IOException {
- inBuf = ByteBuffer
- .allocateDirect(OFMessageAsyncStream.defaultBufferSize);
- outBuf = ByteBuffer
- .allocateDirect(OFMessageAsyncStream.defaultBufferSize);
- this.sock = sock;
- this.messageFactory = messageFactory;
- this.sock.configureBlocking(false);
- }
-
- @Override
- public List<OFMessage> read() throws IOException {
- return this.read(0);
- }
-
- @Override
- public List<OFMessage> read(int limit) throws IOException {
- List<OFMessage> l;
- int read = sock.read(inBuf);
- if (read == -1)
- return null;
- inBuf.flip();
- l = messageFactory.parseMessages(inBuf, limit);
- if (inBuf.hasRemaining())
- inBuf.compact();
- else
- inBuf.clear();
- return l;
- }
-
- protected void appendMessageToOutBuf(OFMessage m) throws IOException {
- int msglen = m.getLengthU();
- if (outBuf.remaining() < msglen) {
- throw new IOException(
- "Message length exceeds buffer capacity: " + msglen);
- }
- m.writeTo(outBuf);
- }
-
- /**
- * Buffers a single outgoing openflow message
- */
- @Override
- public void write(OFMessage m) throws IOException {
- appendMessageToOutBuf(m);
- }
-
- /**
- * Buffers a list of OpenFlow messages
- */
- @Override
- public void write(List<OFMessage> l) throws IOException {
- for (OFMessage m : l) {
- appendMessageToOutBuf(m);
- }
- }
-
- /**
- * Flush buffered outgoing data. Keep flushing until needsFlush() returns
- * false. Each flush() corresponds to a SocketChannel.write(), so this is
- * designed for one flush() per select() event
- */
- public void flush() throws IOException {
- outBuf.flip(); // swap pointers; lim = pos; pos = 0;
- sock.write(outBuf); // write data starting at pos up to lim
- outBuf.compact();
- }
-
- /**
- * Is there outgoing buffered data that needs to be flush()'d?
- */
- public boolean needsFlush() {
- return outBuf.position() > 0;
- }
-
- /**
- * @return the messageFactory
- */
- public OFMessageFactory getMessageFactory() {
- return messageFactory;
- }
-
- /**
- * @param messageFactory
- * the messageFactory to set
- */
- public void setMessageFactory(OFMessageFactory messageFactory) {
- this.messageFactory = messageFactory;
- }
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.io;
-
-import java.util.List;
-
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.factory.OFMessageFactory;
-
-/**
- * Interface for reading OFMessages from a buffered stream
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public interface OFMessageInStream {
- /**
- * Read OF messages from the stream
- *
- * @return a list of OF Messages, empty if no complete messages are
- * available, null if the stream has closed
- */
- public List<OFMessage> read() throws java.io.IOException;
-
- /**
- * Read OF messages from the stream
- *
- * @param limit
- * The maximum number of messages to read: 0 means all that are
- * buffered
- * @return a list of OF Messages, empty if no complete messages are
- * available, null if the stream has closed
- *
- */
- public List<OFMessage> read(int limit) throws java.io.IOException;
-
- /**
- * Sets the OFMessageFactory used to create messages on this stream
- *
- * @param factory
- */
- public void setMessageFactory(OFMessageFactory factory);
-
- /**
- * Returns the OFMessageFactory used to create messages on this stream
- *
- * @return
- */
- public OFMessageFactory getMessageFactory();
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.io;
-
-import java.util.List;
-import org.openflow.protocol.OFMessage;
-
-/**
- * Interface for writing OFMessages to a buffered stream
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public interface OFMessageOutStream {
- /**
- * Write an OpenFlow message to the stream
- * @param m An OF Message
- */
- public void write(OFMessage m) throws java.io.IOException;
-
- /**
- * Write an OpenFlow message to the stream.
- * Messages are sent in one large write() for efficiency
- * @param l A list of OF Messages
- */
- public void write(List<OFMessage> l) throws java.io.IOException;
-
- /**
- * Pushes buffered data out the Stream; this is NOT guranteed to flush all
- * data, multiple flush() calls may be required, until needFlush() returns
- * false.
- */
- public void flush() throws java.io.IOException;
-
- /**
- * Is there buffered data that needs to be flushed?
- * @return true if there is buffered data and flush() should be called
- */
- public boolean needsFlush();
-}
+++ /dev/null
-package org.openflow.protocol;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface Instantiable<E> {
-
- /**
- * Create a new instance of a given subclass.
- * @return the new instance.
- */
- public E instantiate();
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an OFPT_BARRIER_REPLY message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFBarrierReply extends OFMessage {
- public OFBarrierReply() {
- super();
- this.type = OFType.BARRIER_REPLY;
- this.length = U16.t(OFMessage.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an OFPT_BARRIER_REQUEST message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFBarrierRequest extends OFMessage {
- public OFBarrierRequest() {
- super();
- this.type = OFType.BARRIER_REQUEST;
- this.length = U16.t(OFMessage.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_echo_reply message
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- */
-
-public class OFEchoReply extends OFEchoRequest {
- public static int MINIMUM_LENGTH = 8;
-
- public OFEchoReply() {
- super();
- this.type = OFType.ECHO_REPLY;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_echo_request message
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- */
-
-public class OFEchoRequest extends OFMessage {
- public static int MINIMUM_LENGTH = 8;
- byte[] payload;
-
- public OFEchoRequest() {
- super();
- this.type = OFType.ECHO_REQUEST;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- @Override
- public void readFrom(ByteBuffer bb) {
- super.readFrom(bb);
- int datalen = this.getLengthU() - MINIMUM_LENGTH;
- if (datalen > 0) {
- this.payload = new byte[datalen];
- bb.get(payload);
- }
- }
-
- /**
- * @return the payload
- */
- public byte[] getPayload() {
- return payload;
- }
-
- /**
- * @param payload
- * the payload to set
- */
- public void setPayload(byte[] payload) {
- this.payload = payload;
- }
-
- @Override
- public void writeTo(ByteBuffer bb) {
- super.writeTo(bb);
- if (payload != null)
- bb.put(payload);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.List;
-
-import org.openflow.protocol.factory.OFMessageFactory;
-import org.openflow.protocol.factory.OFMessageFactoryAware;
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_error_msg
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- */
-public class OFError extends OFMessage implements OFMessageFactoryAware {
- public static int MINIMUM_LENGTH = 12;
-
- public enum OFErrorType {
- OFPET_HELLO_FAILED, OFPET_BAD_REQUEST, OFPET_BAD_ACTION, OFPET_FLOW_MOD_FAILED, OFPET_PORT_MOD_FAILED, OFPET_QUEUE_OP_FAILED
- }
-
- public enum OFHelloFailedCode {
- OFPHFC_INCOMPATIBLE, OFPHFC_EPERM
- }
-
- public enum OFBadRequestCode {
- OFPBRC_BAD_VERSION, OFPBRC_BAD_TYPE, OFPBRC_BAD_STAT, OFPBRC_BAD_VENDOR, OFPBRC_BAD_SUBTYPE, OFPBRC_EPERM, OFPBRC_BAD_LEN, OFPBRC_BUFFER_EMPTY, OFPBRC_BUFFER_UNKNOWN
- }
-
- public enum OFBadActionCode {
- OFPBAC_BAD_TYPE, OFPBAC_BAD_LEN, OFPBAC_BAD_VENDOR, OFPBAC_BAD_VENDOR_TYPE, OFPBAC_BAD_OUT_PORT, OFPBAC_BAD_ARGUMENT, OFPBAC_EPERM, OFPBAC_TOO_MANY, OFPBAC_BAD_QUEUE
- }
-
- public enum OFFlowModFailedCode {
- OFPFMFC_ALL_TABLES_FULL, OFPFMFC_OVERLAP, OFPFMFC_EPERM, OFPFMFC_BAD_EMERG_TIMEOUT, OFPFMFC_BAD_COMMAND, OFPFMFC_UNSUPPORTED
- }
-
- public enum OFPortModFailedCode {
- OFPPMFC_BAD_PORT, OFPPMFC_BAD_HW_ADDR
- }
-
- public enum OFQueueOpFailedCode {
- OFPQOFC_BAD_PORT, OFPQOFC_BAD_QUEUE, OFPQOFC_EPERM
- }
-
- protected short errorType;
- protected short errorCode;
- protected OFMessageFactory factory;
- protected byte[] error;
- protected boolean errorIsAscii;
-
- public OFError() {
- super();
- this.type = OFType.ERROR;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the errorType
- */
- public short getErrorType() {
- return errorType;
- }
-
- /**
- * @param errorType
- * the errorType to set
- */
- public void setErrorType(short errorType) {
- this.errorType = errorType;
- }
-
- public void setErrorType(OFErrorType type) {
- this.errorType = (short) type.ordinal();
- }
-
- /**
- * @return the errorCode
- */
- public short getErrorCode() {
- return errorCode;
- }
-
- /**
- * @param errorCode
- * the errorCode to set
- */
- public void setErrorCode(OFHelloFailedCode code) {
- this.errorCode = (short) code.ordinal();
- }
-
- public void setErrorCode(short errorCode) {
- this.errorCode = errorCode;
- }
-
- public void setErrorCode(OFBadRequestCode code) {
- this.errorCode = (short) code.ordinal();
- }
-
- public void setErrorCode(OFBadActionCode code) {
- this.errorCode = (short) code.ordinal();
- }
-
- public void setErrorCode(OFFlowModFailedCode code) {
- this.errorCode = (short) code.ordinal();
- }
-
- public void setErrorCode(OFPortModFailedCode code) {
- this.errorCode = (short) code.ordinal();
- }
-
- public void setErrorCode(OFQueueOpFailedCode code) {
- this.errorCode = (short) code.ordinal();
- }
-
- public OFMessage getOffendingMsg() {
- // should only have one message embedded; if more than one, just
- // grab first
- if (this.error == null)
- return null;
- ByteBuffer errorMsg = ByteBuffer.wrap(this.error);
- if (factory == null)
- throw new RuntimeException("MessageFactory not set");
- List<OFMessage> messages = this.factory.parseMessages(errorMsg,
- error.length);
- // OVS apparently sends partial messages in errors
- // need to be careful of that AND can't use data.limit() as
- // a packet boundary because there could be more data queued
- if (messages.size() > 0) {
- return messages.get(0);
- } else {
- return null;
- }
- }
-
- /**
- * Write this offending message into the payload of the Error message
- *
- * @param offendingMsg
- */
-
- public void setOffendingMsg(OFMessage offendingMsg) {
- if (offendingMsg == null) {
- super.setLengthU(MINIMUM_LENGTH);
- } else {
- this.error = new byte[offendingMsg.getLengthU()];
- ByteBuffer data = ByteBuffer.wrap(this.error);
- offendingMsg.writeTo(data);
- super.setLengthU(MINIMUM_LENGTH + offendingMsg.getLengthU());
- }
- }
-
- public OFMessageFactory getFactory() {
- return factory;
- }
-
- @Override
- public void setMessageFactory(OFMessageFactory factory) {
- this.factory = factory;
- }
-
- /**
- * @return the error
- */
- public byte[] getError() {
- return error;
- }
-
- /**
- * @param error
- * the error to set
- */
- public void setError(byte[] error) {
- this.error = error;
- }
-
- /**
- * @return the errorIsAscii
- */
- public boolean isErrorIsAscii() {
- return errorIsAscii;
- }
-
- /**
- * @param errorIsAscii
- * the errorIsAscii to set
- */
- public void setErrorIsAscii(boolean errorIsAscii) {
- this.errorIsAscii = errorIsAscii;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.errorType = data.getShort();
- this.errorCode = data.getShort();
- int dataLength = this.getLengthU() - MINIMUM_LENGTH;
- if (dataLength > 0) {
- this.error = new byte[dataLength];
- data.get(this.error);
- if (this.errorType == OFErrorType.OFPET_HELLO_FAILED.ordinal())
- this.errorIsAscii = true;
- }
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(errorType);
- data.putShort(errorCode);
- if (error != null)
- data.put(error);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + Arrays.hashCode(error);
- result = prime * result + errorCode;
- result = prime * result + (errorIsAscii ? 1231 : 1237);
- result = prime * result + errorType;
- return result;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (getClass() != obj.getClass())
- return false;
- OFError other = (OFError) obj;
- if (!Arrays.equals(error, other.error))
- return false;
- if (errorCode != other.errorCode)
- return false;
- if (errorIsAscii != other.errorIsAscii)
- return false;
- if (errorType != other.errorType)
- return false;
- return true;
- }
-
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.openflow.util.U16;
-
-
-/**
- * Represents a features reply message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public class OFFeaturesReply extends OFMessage implements Serializable {
- public static int MINIMUM_LENGTH = 32;
-
- /**
- * Corresponds to bits on the capabilities field
- */
- public enum OFCapabilities {
- OFPC_FLOW_STATS (1 << 0),
- OFPC_TABLE_STATS (1 << 1),
- OFPC_PORT_STATS (1 << 2),
- OFPC_STP (1 << 3),
- OFPC_RESERVED (1 << 4),
- OFPC_IP_REASM (1 << 5),
- OFPC_QUEUE_STATS (1 << 6),
- OFPC_ARP_MATCH_IP (1 << 7);
-
- protected int value;
-
- private OFCapabilities(int value) {
- this.value = value;
- }
-
- /**
- * @return the value
- */
- public int getValue() {
- return value;
- }
- }
-
- protected long datapathId;
- protected int buffers;
- protected byte tables;
- protected int capabilities;
- protected int actions;
- protected List<OFPhysicalPort> ports;
-
- public OFFeaturesReply() {
- super();
- this.type = OFType.FEATURES_REPLY;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the datapathId
- */
- public long getDatapathId() {
- return datapathId;
- }
-
- /**
- * @param datapathId the datapathId to set
- */
- public void setDatapathId(long datapathId) {
- this.datapathId = datapathId;
- }
-
- /**
- * @return the buffers
- */
- public int getBuffers() {
- return buffers;
- }
-
- /**
- * @param buffers the buffers to set
- */
- public void setBuffers(int buffers) {
- this.buffers = buffers;
- }
-
- /**
- * @return the tables
- */
- public byte getTables() {
- return tables;
- }
-
- /**
- * @param tables the tables to set
- */
- public void setTables(byte tables) {
- this.tables = tables;
- }
-
- /**
- * @return the capabilities
- */
- public int getCapabilities() {
- return capabilities;
- }
-
- /**
- * @param capabilities the capabilities to set
- */
- public void setCapabilities(int capabilities) {
- this.capabilities = capabilities;
- }
-
- /**
- * @return the actions
- */
- public int getActions() {
- return actions;
- }
-
- /**
- * @param actions the actions to set
- */
- public void setActions(int actions) {
- this.actions = actions;
- }
-
- /**
- * @return the ports
- */
- public List<OFPhysicalPort> getPorts() {
- return ports;
- }
-
- /**
- * @param ports the ports to set
- */
- public void setPorts(List<OFPhysicalPort> ports) {
- this.ports = ports;
- if (ports == null) {
- this.setLengthU(MINIMUM_LENGTH);
- } else {
- this.setLengthU(MINIMUM_LENGTH + ports.size()
- * OFPhysicalPort.MINIMUM_LENGTH);
- }
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.datapathId = data.getLong();
- this.buffers = data.getInt();
- this.tables = data.get();
- data.position(data.position() + 3); // pad
- this.capabilities = data.getInt();
- this.actions = data.getInt();
- if (this.ports == null) {
- this.ports = new ArrayList<OFPhysicalPort>();
- } else {
- this.ports.clear();
- }
- int portCount = (super.getLengthU() - 32)
- / OFPhysicalPort.MINIMUM_LENGTH;
- OFPhysicalPort port;
- for (int i = 0; i < portCount; ++i) {
- port = new OFPhysicalPort();
- port.readFrom(data);
- this.ports.add(port);
- }
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putLong(this.datapathId);
- data.putInt(this.buffers);
- data.put(this.tables);
- data.putShort((short) 0); // pad
- data.put((byte) 0); // pad
- data.putInt(this.capabilities);
- data.putInt(this.actions);
- if (this.ports != null)
- for (OFPhysicalPort port : this.ports) {
- port.writeTo(data);
- }
- }
-
- @Override
- public int hashCode() {
- final int prime = 139;
- int result = super.hashCode();
- result = prime * result + actions;
- result = prime * result + buffers;
- result = prime * result + capabilities;
- result = prime * result + (int) (datapathId ^ (datapathId >>> 32));
- result = prime * result + ((ports == null) ? 0 : ports.hashCode());
- result = prime * result + tables;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFFeaturesReply)) {
- return false;
- }
- OFFeaturesReply other = (OFFeaturesReply) obj;
- if (actions != other.actions) {
- return false;
- }
- if (buffers != other.buffers) {
- return false;
- }
- if (capabilities != other.capabilities) {
- return false;
- }
- if (datapathId != other.datapathId) {
- return false;
- }
- if (ports == null) {
- if (other.ports != null) {
- return false;
- }
- } else if (!ports.equals(other.ports)) {
- return false;
- }
- if (tables != other.tables) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-
-/**
- * Represents a features request message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public class OFFeaturesRequest extends OFMessage {
- public static int MINIMUM_LENGTH = 8;
-
- public OFFeaturesRequest() {
- super();
- this.type = OFType.FEATURES_REQUEST;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.factory.OFActionFactory;
-import org.openflow.protocol.factory.OFActionFactoryAware;
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_flow_mod message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public class OFFlowMod extends OFMessage implements OFActionFactoryAware, Cloneable {
- public static int MINIMUM_LENGTH = 72;
-
- public static final short OFPFC_ADD = 0; /* New flow. */
- public static final short OFPFC_MODIFY = 1; /* Modify all matching flows. */
- public static final short OFPFC_MODIFY_STRICT = 2; /* Modify entry strictly matching wildcards */
- public static final short OFPFC_DELETE=3; /* Delete all matching flows. */
- public static final short OFPFC_DELETE_STRICT =4; /* Strictly match wildcards and priority. */
-
- protected OFActionFactory actionFactory;
- protected OFMatch match;
- protected long cookie;
- protected short command;
- protected short idleTimeout;
- protected short hardTimeout;
- protected short priority;
- protected int bufferId;
- protected short outPort;
- protected short flags;
- protected List<OFAction> actions;
-
- public OFFlowMod() {
- super();
- this.type = OFType.FLOW_MOD;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * Get buffer_id
- * @return
- */
- public int getBufferId() {
- return this.bufferId;
- }
-
- /**
- * Set buffer_id
- * @param bufferId
- */
- public OFFlowMod setBufferId(int bufferId) {
- this.bufferId = bufferId;
- return this;
- }
-
- /**
- * Get cookie
- * @return
- */
- public long getCookie() {
- return this.cookie;
- }
-
- /**
- * Set cookie
- * @param cookie
- */
- public OFFlowMod setCookie(long cookie) {
- this.cookie = cookie;
- return this;
- }
-
- /**
- * Get command
- * @return
- */
- public short getCommand() {
- return this.command;
- }
-
- /**
- * Set command
- * @param command
- */
- public OFFlowMod setCommand(short command) {
- this.command = command;
- return this;
- }
-
- /**
- * Get flags
- * @return
- */
- public short getFlags() {
- return this.flags;
- }
-
- /**
- * Set flags
- * @param flags
- */
- public OFFlowMod setFlags(short flags) {
- this.flags = flags;
- return this;
- }
-
- /**
- * Get hard_timeout
- * @return
- */
- public short getHardTimeout() {
- return this.hardTimeout;
- }
-
- /**
- * Set hard_timeout
- * @param hardTimeout
- */
- public OFFlowMod setHardTimeout(short hardTimeout) {
- this.hardTimeout = hardTimeout;
- return this;
- }
-
- /**
- * Get idle_timeout
- * @return
- */
- public short getIdleTimeout() {
- return this.idleTimeout;
- }
-
- /**
- * Set idle_timeout
- * @param idleTimeout
- */
- public OFFlowMod setIdleTimeout(short idleTimeout) {
- this.idleTimeout = idleTimeout;
- return this;
- }
-
- /**
- * Gets a copy of the OFMatch object for this FlowMod, changes to this
- * object do not modify the FlowMod
- * @return
- */
- public OFMatch getMatch() {
- return this.match;
- }
-
- /**
- * Set match
- * @param match
- */
- public OFFlowMod setMatch(OFMatch match) {
- this.match = match;
- return this;
- }
-
- /**
- * Get out_port
- * @return
- */
- public short getOutPort() {
- return this.outPort;
- }
-
- /**
- * Set out_port
- * @param outPort
- */
- public OFFlowMod setOutPort(short outPort) {
- this.outPort = outPort;
- return this;
- }
-
- /**
- * Set out_port
- * @param port
- */
- public OFFlowMod setOutPort(OFPort port) {
- this.outPort = port.getValue();
- return this;
- }
-
- /**
- * Get priority
- * @return
- */
- public short getPriority() {
- return this.priority;
- }
-
- /**
- * Set priority
- * @param priority
- */
- public OFFlowMod setPriority(short priority) {
- this.priority = priority;
- return this;
- }
-
- /**
- * Returns read-only copies of the actions contained in this Flow Mod
- * @return a list of ordered OFAction objects
- */
- public List<OFAction> getActions() {
- return this.actions;
- }
-
- /**
- * Sets the list of actions this Flow Mod contains
- * @param actions a list of ordered OFAction objects
- */
- public OFFlowMod setActions(List<OFAction> actions) {
- this.actions = actions;
- return this;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- if (this.match == null)
- this.match = new OFMatch();
- this.match.readFrom(data);
- this.cookie = data.getLong();
- this.command = data.getShort();
- this.idleTimeout = data.getShort();
- this.hardTimeout = data.getShort();
- this.priority = data.getShort();
- this.bufferId = data.getInt();
- this.outPort = data.getShort();
- this.flags = data.getShort();
- if (this.actionFactory == null)
- throw new RuntimeException("OFActionFactory not set");
- this.actions = this.actionFactory.parseActions(data, getLengthU() -
- MINIMUM_LENGTH);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- this.match.writeTo(data);
- data.putLong(cookie);
- data.putShort(command);
- data.putShort(idleTimeout);
- data.putShort(hardTimeout);
- data.putShort(priority);
- data.putInt(bufferId);
- data.putShort(outPort);
- data.putShort(flags);
- if (actions != null) {
- for (OFAction action : actions) {
- action.writeTo(data);
- }
- }
- }
-
- @Override
- public void setActionFactory(OFActionFactory actionFactory) {
- this.actionFactory = actionFactory;
- }
-
- @Override
- public int hashCode() {
- final int prime = 227;
- int result = super.hashCode();
- result = prime * result + ((actions == null) ? 0 : actions.hashCode());
- result = prime * result + bufferId;
- result = prime * result + command;
- result = prime * result + (int) (cookie ^ (cookie >>> 32));
- result = prime * result + flags;
- result = prime * result + hardTimeout;
- result = prime * result + idleTimeout;
- result = prime * result + ((match == null) ? 0 : match.hashCode());
- result = prime * result + outPort;
- result = prime * result + priority;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFFlowMod)) {
- return false;
- }
- OFFlowMod other = (OFFlowMod) obj;
- if (actions == null) {
- if (other.actions != null) {
- return false;
- }
- } else if (!actions.equals(other.actions)) {
- return false;
- }
- if (bufferId != other.bufferId) {
- return false;
- }
- if (command != other.command) {
- return false;
- }
- if (cookie != other.cookie) {
- return false;
- }
- if (flags != other.flags) {
- return false;
- }
- if (hardTimeout != other.hardTimeout) {
- return false;
- }
- if (idleTimeout != other.idleTimeout) {
- return false;
- }
- if (match == null) {
- if (other.match != null) {
- return false;
- }
- } else if (!match.equals(other.match)) {
- return false;
- }
- if (outPort != other.outPort) {
- return false;
- }
- if (priority != other.priority) {
- return false;
- }
- return true;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#clone()
- */
- @Override
- public OFFlowMod clone() {
- try {
- OFMatch neoMatch = match.clone();
- OFFlowMod flowMod = (OFFlowMod) super.clone();
- flowMod.setMatch(neoMatch);
- List<OFAction> neoActions = new LinkedList<OFAction>();
- for(OFAction action: this.actions)
- neoActions.add((OFAction) action.clone());
- flowMod.setActions(neoActions);
- return flowMod;
- } catch (CloneNotSupportedException e) {
- // Won't happen
- throw new RuntimeException(e);
- }
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return "OFFlowMod [actionFactory=" + actionFactory + ", actions="
- + actions + ", bufferId=" + bufferId + ", command=" + command
- + ", cookie=" + cookie + ", flags=" + flags + ", hardTimeout="
- + hardTimeout + ", idleTimeout=" + idleTimeout + ", match="
- + match + ", outPort=" + outPort + ", priority=" + priority
- + ", length=" + length + ", type=" + type + ", version="
- + version + ", xid=" + xid + "]";
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_flow_removed message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public class OFFlowRemoved extends OFMessage {
- public static int MINIMUM_LENGTH = 88;
-
- public enum OFFlowRemovedReason {
- OFPRR_IDLE_TIMEOUT,
- OFPRR_HARD_TIMEOUT,
- OFPRR_DELETE
- }
-
- protected OFMatch match;
- protected long cookie;
- protected short priority;
- protected OFFlowRemovedReason reason;
- protected int durationSeconds;
- protected int durationNanoseconds;
- protected short idleTimeout;
- protected long packetCount;
- protected long byteCount;
-
- public OFFlowRemoved() {
- super();
- this.type = OFType.FLOW_REMOVED;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * Get cookie
- * @return
- */
- public long getCookie() {
- return this.cookie;
- }
-
- /**
- * Set cookie
- * @param cookie
- */
- public void setCookie(long cookie) {
- this.cookie = cookie;
- }
-
- /**
- * Get idle_timeout
- * @return
- */
- public short getIdleTimeout() {
- return this.idleTimeout;
- }
-
- /**
- * Set idle_timeout
- * @param idleTimeout
- */
- public void setIdleTimeout(short idleTimeout) {
- this.idleTimeout = idleTimeout;
- }
-
- /**
- * Gets a copy of the OFMatch object for this FlowMod, changes to this
- * object do not modify the FlowMod
- * @return
- */
- public OFMatch getMatch() {
- return this.match;
- }
-
- /**
- * Set match
- * @param match
- */
- public void setMatch(OFMatch match) {
- this.match = match;
- }
-
- /**
- * Get priority
- * @return
- */
- public short getPriority() {
- return this.priority;
- }
-
- /**
- * Set priority
- * @param priority
- */
- public void setPriority(short priority) {
- this.priority = priority;
- }
-
- /**
- * @return the reason
- */
- public OFFlowRemovedReason getReason() {
- return reason;
- }
-
- /**
- * @param reason the reason to set
- */
- public void setReason(OFFlowRemovedReason reason) {
- this.reason = reason;
- }
-
- /**
- * @return the durationSeconds
- */
- public int getDurationSeconds() {
- return durationSeconds;
- }
-
- /**
- * @param durationSeconds the durationSeconds to set
- */
- public void setDurationSeconds(int durationSeconds) {
- this.durationSeconds = durationSeconds;
- }
-
- /**
- * @return the durationNanoseconds
- */
- public int getDurationNanoseconds() {
- return durationNanoseconds;
- }
-
- /**
- * @param durationNanoseconds the durationNanoseconds to set
- */
- public void setDurationNanoseconds(int durationNanoseconds) {
- this.durationNanoseconds = durationNanoseconds;
- }
-
- /**
- * @return the packetCount
- */
- public long getPacketCount() {
- return packetCount;
- }
-
- /**
- * @param packetCount the packetCount to set
- */
- public void setPacketCount(long packetCount) {
- this.packetCount = packetCount;
- }
-
- /**
- * @return the byteCount
- */
- public long getByteCount() {
- return byteCount;
- }
-
- /**
- * @param byteCount the byteCount to set
- */
- public void setByteCount(long byteCount) {
- this.byteCount = byteCount;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- if (this.match == null)
- this.match = new OFMatch();
- this.match.readFrom(data);
- this.cookie = data.getLong();
- this.priority = data.getShort();
- this.reason = OFFlowRemovedReason.values()[(0xff & data.get())];
- data.get(); // pad
- this.durationSeconds = data.getInt();
- this.durationNanoseconds = data.getInt();
- this.idleTimeout = data.getShort();
- data.get(); // pad
- data.get(); // pad
- this.packetCount = data.getLong();
- this.byteCount = data.getLong();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- this.match.writeTo(data);
- data.putLong(cookie);
- data.putShort(priority);
- data.put((byte) this.reason.ordinal());
- data.put((byte) 0);
- data.putInt(this.durationSeconds);
- data.putInt(this.durationNanoseconds);
- data.putShort(idleTimeout);
- data.put((byte) 0); // pad
- data.put((byte) 0); // pad
- data.putLong(this.packetCount);
- data.putLong(this.byteCount);
- }
-
- @Override
- public int hashCode() {
- final int prime = 271;
- int result = super.hashCode();
- result = prime * result + (int) (byteCount ^ (byteCount >>> 32));
- result = prime * result + (int) (cookie ^ (cookie >>> 32));
- result = prime * result + durationNanoseconds;
- result = prime * result + durationSeconds;
- result = prime * result + idleTimeout;
- result = prime * result + ((match == null) ? 0 : match.hashCode());
- result = prime * result + (int) (packetCount ^ (packetCount >>> 32));
- result = prime * result + priority;
- result = prime * result + ((reason == null) ? 0 : reason.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFFlowRemoved)) {
- return false;
- }
- OFFlowRemoved other = (OFFlowRemoved) obj;
- if (byteCount != other.byteCount) {
- return false;
- }
- if (cookie != other.cookie) {
- return false;
- }
- if (durationNanoseconds != other.durationNanoseconds) {
- return false;
- }
- if (durationSeconds != other.durationSeconds) {
- return false;
- }
- if (idleTimeout != other.idleTimeout) {
- return false;
- }
- if (match == null) {
- if (other.match != null) {
- return false;
- }
- } else if (!match.equals(other.match)) {
- return false;
- }
- if (packetCount != other.packetCount) {
- return false;
- }
- if (priority != other.priority) {
- return false;
- }
- if (reason == null) {
- if (other.reason != null) {
- return false;
- }
- } else if (!reason.equals(other.reason)) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-/**
- * Represents an OFPT_GET_CONFIG_REPLY type message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFGetConfigReply extends OFSwitchConfig {
- public OFGetConfigReply() {
- super();
- this.type = OFType.GET_CONFIG_REPLY;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an OFPT_GET_CONFIG_REQUEST type message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFGetConfigRequest extends OFMessage {
- public OFGetConfigRequest() {
- super();
- this.type = OFType.GET_CONFIG_REQUEST;
- this.length = U16.t(OFMessage.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-
-/**
- * Represents an ofp_hello message
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Feb 8, 2010
- */
-public class OFHello extends OFMessage {
- public static int MINIMUM_LENGTH = 8;
-
- /**
- * Construct a ofp_hello message
- */
- public OFHello() {
- super();
- this.type = OFType.HELLO;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import org.openflow.util.HexString;
-import org.openflow.util.U16;
-import org.openflow.util.U8;
-
-/**
- * Represents an ofp_match structure
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public class OFMatch implements Cloneable, Serializable {
- /**
- *
- */
- private static final long serialVersionUID = 1L;
- public static int MINIMUM_LENGTH = 40;
- final public static int OFPFW_ALL = ((1 << 22) - 1);
-
- final public static int OFPFW_IN_PORT = 1 << 0; /* Switch input port. */
- final public static int OFPFW_DL_VLAN = 1 << 1; /* VLAN id. */
- final public static int OFPFW_DL_SRC = 1 << 2; /* Ethernet source address. */
- final public static int OFPFW_DL_DST = 1 << 3; /*
- * Ethernet destination
- * address.
- */
- final public static int OFPFW_DL_TYPE = 1 << 4; /* Ethernet frame type. */
- final public static int OFPFW_NW_PROTO = 1 << 5; /* IP protocol. */
- final public static int OFPFW_TP_SRC = 1 << 6; /* TCP/UDP source port. */
- final public static int OFPFW_TP_DST = 1 << 7; /* TCP/UDP destination port. */
-
- /*
- * IP source address wildcard bit count. 0 is exact match, 1 ignores the
- * LSB, 2 ignores the 2 least-significant bits, ..., 32 and higher wildcard
- * the entire field. This is the *opposite* of the usual convention where
- * e.g. /24 indicates that 8 bits (not 24 bits) are wildcarded.
- */
- final public static int OFPFW_NW_SRC_SHIFT = 8;
- final public static int OFPFW_NW_SRC_BITS = 6;
- final public static int OFPFW_NW_SRC_MASK = ((1 << OFPFW_NW_SRC_BITS) - 1) << OFPFW_NW_SRC_SHIFT;
- final public static int OFPFW_NW_SRC_ALL = 32 << OFPFW_NW_SRC_SHIFT;
-
- /* IP destination address wildcard bit count. Same format as source. */
- final public static int OFPFW_NW_DST_SHIFT = 14;
- final public static int OFPFW_NW_DST_BITS = 6;
- final public static int OFPFW_NW_DST_MASK = ((1 << OFPFW_NW_DST_BITS) - 1) << OFPFW_NW_DST_SHIFT;
- final public static int OFPFW_NW_DST_ALL = 32 << OFPFW_NW_DST_SHIFT;
-
- final public static int OFPFW_DL_VLAN_PCP = 1 << 20; /* VLAN priority. */
- final public static int OFPFW_NW_TOS = 1 << 21; /*
- * IP ToS (DSCP field, 6
- * bits).
- */
-
- /* List of Strings for marshalling and unmarshalling to human readable forms */
- final public static String STR_IN_PORT = "in_port";
- final public static String STR_DL_DST = "dl_dst";
- final public static String STR_DL_SRC = "dl_src";
- final public static String STR_DL_TYPE = "dl_type";
- final public static String STR_DL_VLAN = "dl_vlan";
- final public static String STR_DL_VLAN_PCP = "dl_vpcp";
- final public static String STR_NW_DST = "nw_dst";
- final public static String STR_NW_SRC = "nw_src";
- final public static String STR_NW_PROTO = "nw_proto";
- final public static String STR_NW_TOS = "nw_tos";
- final public static String STR_TP_DST = "tp_dst";
- final public static String STR_TP_SRC = "tp_src";
-
- protected int wildcards;
- protected short inputPort;
- protected byte[] dataLayerSource;
- protected byte[] dataLayerDestination;
- protected short dataLayerVirtualLan;
- protected byte dataLayerVirtualLanPriorityCodePoint;
- protected short dataLayerType;
- protected byte networkTypeOfService;
- protected byte networkProtocol;
- protected int networkSource;
- protected int networkDestination;
- protected short transportSource;
- protected short transportDestination;
-
- /**
- * By default, create a OFMatch that matches everything
- *
- * (mostly because it's the least amount of work to make a valid OFMatch)
- */
- public OFMatch() {
- this.wildcards = OFPFW_ALL;
- this.dataLayerDestination = new byte[6];
- this.dataLayerSource = new byte[6];
- }
-
- /**
- * Get dl_dst
- *
- * @return an arrays of bytes
- */
- public byte[] getDataLayerDestination() {
- return this.dataLayerDestination;
- }
-
- /**
- * Set dl_dst
- *
- * @param dataLayerDestination
- */
- public OFMatch setDataLayerDestination(byte[] dataLayerDestination) {
- this.dataLayerDestination = dataLayerDestination;
- return this;
- }
-
- /**
- * Set dl_dst, but first translate to byte[] using HexString
- *
- * @param mac
- * A colon separated string of 6 pairs of octets, e..g.,
- * "00:17:42:EF:CD:8D"
- */
- public OFMatch setDataLayerDestination(String mac) {
- byte bytes[] = HexString.fromHexString(mac);
- if (bytes.length != 6)
- throw new IllegalArgumentException(
- "expected string with 6 octets, got '" + mac + "'");
- this.dataLayerDestination = bytes;
- return this;
- }
-
- /**
- * Get dl_src
- *
- * @return an array of bytes
- */
- public byte[] getDataLayerSource() {
- return this.dataLayerSource;
- }
-
- /**
- * Set dl_src
- *
- * @param dataLayerSource
- */
- public OFMatch setDataLayerSource(byte[] dataLayerSource) {
- this.dataLayerSource = dataLayerSource;
- return this;
- }
-
- /**
- * Set dl_src, but first translate to byte[] using HexString
- *
- * @param mac
- * A colon separated string of 6 pairs of octets, e..g.,
- * "00:17:42:EF:CD:8D"
- */
- public OFMatch setDataLayerSource(String mac) {
- byte bytes[] = HexString.fromHexString(mac);
- if (bytes.length != 6)
- throw new IllegalArgumentException(
- "expected string with 6 octets, got '" + mac + "'");
- this.dataLayerSource = bytes;
- return this;
- }
-
- /**
- * Get dl_type
- *
- * @return ether_type
- */
- public short getDataLayerType() {
- return this.dataLayerType;
- }
-
- /**
- * Set dl_type
- *
- * @param dataLayerType
- */
- public OFMatch setDataLayerType(short dataLayerType) {
- this.dataLayerType = dataLayerType;
- return this;
- }
-
- /**
- * Get dl_vlan
- *
- * @return vlan tag; VLAN_NONE == no tag
- */
- public short getDataLayerVirtualLan() {
- return this.dataLayerVirtualLan;
- }
-
- /**
- * Set dl_vlan
- *
- * @param dataLayerVirtualLan
- */
- public OFMatch setDataLayerVirtualLan(short dataLayerVirtualLan) {
- this.dataLayerVirtualLan = dataLayerVirtualLan;
- return this;
- }
-
- /**
- * Get dl_vlan_pcp
- *
- * @return
- */
- public byte getDataLayerVirtualLanPriorityCodePoint() {
- return this.dataLayerVirtualLanPriorityCodePoint;
- }
-
- /**
- * Set dl_vlan_pcp
- *
- * @param pcp
- */
- public OFMatch setDataLayerVirtualLanPriorityCodePoint(byte pcp) {
- this.dataLayerVirtualLanPriorityCodePoint = pcp;
- return this;
- }
-
- /**
- * Get in_port
- *
- * @return
- */
- public short getInputPort() {
- return this.inputPort;
- }
-
- /**
- * Set in_port
- *
- * @param inputPort
- */
- public OFMatch setInputPort(short inputPort) {
- this.inputPort = inputPort;
- return this;
- }
-
- /**
- * Get nw_dst
- *
- * @return
- */
- public int getNetworkDestination() {
- return this.networkDestination;
- }
-
- /**
- * Set nw_dst
- *
- * @param networkDestination
- */
- public OFMatch setNetworkDestination(int networkDestination) {
- this.networkDestination = networkDestination;
- return this;
- }
-
- /**
- * Parse this match's wildcard fields and return the number of significant
- * bits in the IP destination field.
- *
- * NOTE: this returns the number of bits that are fixed, i.e., like CIDR,
- * not the number of bits that are free like OpenFlow encodes.
- *
- * @return a number between 0 (matches all IPs) and 63 ( 32>= implies exact
- * match)
- */
- public int getNetworkDestinationMaskLen() {
- return Math
- .max(32 - ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT),
- 0);
- }
-
- /**
- * Parse this match's wildcard fields and return the number of significant
- * bits in the IP destination field.
- *
- * NOTE: this returns the number of bits that are fixed, i.e., like CIDR,
- * not the number of bits that are free like OpenFlow encodes.
- *
- * @return a number between 0 (matches all IPs) and 32 (exact match)
- */
- public int getNetworkSourceMaskLen() {
- return Math
- .max(32 - ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT),
- 0);
- }
-
- /**
- * Get nw_proto
- *
- * @return
- */
- public byte getNetworkProtocol() {
- return this.networkProtocol;
- }
-
- /**
- * Set nw_proto
- *
- * @param networkProtocol
- */
- public OFMatch setNetworkProtocol(byte networkProtocol) {
- this.networkProtocol = networkProtocol;
- return this;
- }
-
- /**
- * Get nw_src
- *
- * @return
- */
- public int getNetworkSource() {
- return this.networkSource;
- }
-
- /**
- * Set nw_src
- *
- * @param networkSource
- */
- public OFMatch setNetworkSource(int networkSource) {
- this.networkSource = networkSource;
- return this;
- }
-
- /**
- * Get nw_tos
- *
- * @return
- */
- public byte getNetworkTypeOfService() {
- return this.networkTypeOfService;
- }
-
- /**
- * Set nw_tos
- *
- * @param networkTypeOfService
- */
- public OFMatch setNetworkTypeOfService(byte networkTypeOfService) {
- this.networkTypeOfService = networkTypeOfService;
- return this;
- }
-
- /**
- * Get tp_dst
- *
- * @return
- */
- public short getTransportDestination() {
- return this.transportDestination;
- }
-
- /**
- * Set tp_dst
- *
- * @param transportDestination
- */
- public OFMatch setTransportDestination(short transportDestination) {
- this.transportDestination = transportDestination;
- return this;
- }
-
- /**
- * Get tp_src
- *
- * @return
- */
- public short getTransportSource() {
- return this.transportSource;
- }
-
- /**
- * Set tp_src
- *
- * @param transportSource
- */
- public OFMatch setTransportSource(short transportSource) {
- this.transportSource = transportSource;
- return this;
- }
-
- /**
- * Get wildcards
- *
- * @return
- */
- public int getWildcards() {
- return this.wildcards;
- }
-
- /**
- * Set wildcards
- *
- * @param wildcards
- */
- public OFMatch setWildcards(int wildcards) {
- this.wildcards = wildcards;
- return this;
- }
-
- /**
- * Initializes this OFMatch structure with the corresponding data from the
- * specified packet.
- *
- * Must specify the input port, to ensure that this.in_port is set
- * correctly.
- *
- * Specify OFPort.NONE or OFPort.ANY if input port not applicable or
- * available
- *
- * @param packetData
- * The packet's data
- * @param inputPort
- * the port the packet arrived on
- */
- public OFMatch loadFromPacket(byte[] packetData, short inputPort) {
- short scratch;
- int transportOffset = 34;
- ByteBuffer packetDataBB = ByteBuffer.wrap(packetData);
- int limit = packetDataBB.limit();
-
- this.wildcards = 0; // all fields have explicit entries
-
- this.inputPort = inputPort;
-
- if (inputPort == OFPort.OFPP_ALL.getValue())
- this.wildcards |= OFPFW_IN_PORT;
-
- assert (limit >= 14);
- // dl dst
- this.dataLayerDestination = new byte[6];
- packetDataBB.get(this.dataLayerDestination);
- // dl src
- this.dataLayerSource = new byte[6];
- packetDataBB.get(this.dataLayerSource);
- // dl type
- this.dataLayerType = packetDataBB.getShort();
-
- if (getDataLayerType() != (short) 0x8100) { // need cast to avoid signed
- // bug
- setDataLayerVirtualLan((short) 0xffff);
- setDataLayerVirtualLanPriorityCodePoint((byte) 0);
- } else {
- // has vlan tag
- scratch = packetDataBB.getShort();
- setDataLayerVirtualLan((short) (0xfff & scratch));
- setDataLayerVirtualLanPriorityCodePoint((byte) ((0xe000 & scratch) >> 13));
- this.dataLayerType = packetDataBB.getShort();
- }
-
- switch (getDataLayerType()) {
- case 0x0800:
- // ipv4
- // check packet length
- scratch = packetDataBB.get();
- scratch = (short) (0xf & scratch);
- transportOffset = (packetDataBB.position() - 1) + (scratch * 4);
- // nw tos (dscp)
- scratch = packetDataBB.get();
- setNetworkTypeOfService((byte) ((0xfc & scratch) >> 2));
- // nw protocol
- packetDataBB.position(packetDataBB.position() + 7);
- this.networkProtocol = packetDataBB.get();
- // nw src
- packetDataBB.position(packetDataBB.position() + 2);
- this.networkSource = packetDataBB.getInt();
- // nw dst
- this.networkDestination = packetDataBB.getInt();
- packetDataBB.position(transportOffset);
- break;
- case 0x0806:
- // arp
- int arpPos = packetDataBB.position();
- // opcode
- scratch = packetDataBB.getShort(arpPos + 6);
- setNetworkProtocol((byte) (0xff & scratch));
-
- scratch = packetDataBB.getShort(arpPos + 2);
- // if ipv4 and addr len is 4
- if (scratch == 0x800 && packetDataBB.get(arpPos + 5) == 4) {
- // nw src
- this.networkSource = packetDataBB.getInt(arpPos + 14);
- // nw dst
- this.networkDestination = packetDataBB.getInt(arpPos + 24);
- } else {
- setNetworkSource(0);
- setNetworkDestination(0);
- }
- break;
- default:
- setNetworkTypeOfService((byte) 0);
- setNetworkProtocol((byte) 0);
- setNetworkSource(0);
- setNetworkDestination(0);
- break;
- }
-
- switch (getNetworkProtocol()) {
- case 0x01:
- // icmp
- // type
- this.transportSource = U8.f(packetDataBB.get());
- // code
- this.transportDestination = U8.f(packetDataBB.get());
- break;
- case 0x06:
- // tcp
- // tcp src
- this.transportSource = packetDataBB.getShort();
- // tcp dest
- this.transportDestination = packetDataBB.getShort();
- break;
- case 0x11:
- // udp
- // udp src
- this.transportSource = packetDataBB.getShort();
- // udp dest
- this.transportDestination = packetDataBB.getShort();
- break;
- default:
- setTransportDestination((short) 0);
- setTransportSource((short) 0);
- break;
- }
- return this;
- }
-
- /**
- * Read this message off the wire from the specified ByteBuffer
- *
- * @param data
- */
- public void readFrom(ByteBuffer data) {
- this.wildcards = data.getInt();
- this.inputPort = data.getShort();
- this.dataLayerSource = new byte[6];
- data.get(this.dataLayerSource);
- this.dataLayerDestination = new byte[6];
- data.get(this.dataLayerDestination);
- this.dataLayerVirtualLan = data.getShort();
- this.dataLayerVirtualLanPriorityCodePoint = data.get();
- data.get(); // pad
- this.dataLayerType = data.getShort();
- this.networkTypeOfService = data.get();
- this.networkProtocol = data.get();
- data.get(); // pad
- data.get(); // pad
- this.networkSource = data.getInt();
- this.networkDestination = data.getInt();
- this.transportSource = data.getShort();
- this.transportDestination = data.getShort();
- }
-
- /**
- * Write this message's binary format to the specified ByteBuffer
- *
- * @param data
- */
- public void writeTo(ByteBuffer data) {
- data.putInt(wildcards);
- data.putShort(inputPort);
- data.put(this.dataLayerSource);
- data.put(this.dataLayerDestination);
- data.putShort(dataLayerVirtualLan);
- data.put(dataLayerVirtualLanPriorityCodePoint);
- data.put((byte) 0x0); // pad
- data.putShort(dataLayerType);
- data.put(networkTypeOfService);
- data.put(networkProtocol);
- data.put((byte) 0x0); // pad
- data.put((byte) 0x0); // pad
- data.putInt(networkSource);
- data.putInt(networkDestination);
- data.putShort(transportSource);
- data.putShort(transportDestination);
- }
-
- @Override
- public int hashCode() {
- final int prime = 131;
- int result = 1;
- result = prime * result + Arrays.hashCode(dataLayerDestination);
- result = prime * result + Arrays.hashCode(dataLayerSource);
- result = prime * result + dataLayerType;
- result = prime * result + dataLayerVirtualLan;
- result = prime * result + dataLayerVirtualLanPriorityCodePoint;
- result = prime * result + inputPort;
- result = prime * result + networkDestination;
- result = prime * result + networkProtocol;
- result = prime * result + networkSource;
- result = prime * result + networkTypeOfService;
- result = prime * result + transportDestination;
- result = prime * result + transportSource;
- result = prime * result + wildcards;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFMatch)) {
- return false;
- }
- OFMatch other = (OFMatch) obj;
- if (!Arrays.equals(dataLayerDestination, other.dataLayerDestination)) {
- return false;
- }
- if (!Arrays.equals(dataLayerSource, other.dataLayerSource)) {
- return false;
- }
- if (dataLayerType != other.dataLayerType) {
- return false;
- }
- if (dataLayerVirtualLan != other.dataLayerVirtualLan) {
- return false;
- }
- if (dataLayerVirtualLanPriorityCodePoint != other.dataLayerVirtualLanPriorityCodePoint) {
- return false;
- }
- if (inputPort != other.inputPort) {
- return false;
- }
- if (networkDestination != other.networkDestination) {
- return false;
- }
- if (networkProtocol != other.networkProtocol) {
- return false;
- }
- if (networkSource != other.networkSource) {
- return false;
- }
- if (networkTypeOfService != other.networkTypeOfService) {
- return false;
- }
- if (transportDestination != other.transportDestination) {
- return false;
- }
- if (transportSource != other.transportSource) {
- return false;
- }
- if ((wildcards & OFMatch.OFPFW_ALL) != (other.wildcards & OFPFW_ALL)) { // only
- // consider
- // allocated
- // part
- // of
- // wildcards
- return false;
- }
- return true;
- }
-
- /**
- * Implement clonable interface
- */
- @Override
- public OFMatch clone() {
- try {
- OFMatch ret = (OFMatch) super.clone();
- ret.dataLayerDestination = this.dataLayerDestination.clone();
- ret.dataLayerSource = this.dataLayerSource.clone();
- return ret;
- } catch (CloneNotSupportedException e) {
- throw new RuntimeException(e);
- }
- }
-
- /**
- * Output a dpctl-styled string, i.e., only list the elements that are not
- * wildcarded
- *
- * A match-everything OFMatch outputs "OFMatch[]"
- *
- * @return
- * "OFMatch[dl_src:00:20:01:11:22:33,nw_src:192.168.0.0/24,tp_dst:80]"
- */
- @Override
- public String toString() {
- String str = "";
-
- // l1
- if ((wildcards & OFPFW_IN_PORT) == 0)
- str += "," + STR_IN_PORT + "=" + U16.f(this.inputPort);
-
- // l2
- if ((wildcards & OFPFW_DL_DST) == 0)
- str += "," + STR_DL_DST + "="
- + HexString.toHexString(this.dataLayerDestination);
- if ((wildcards & OFPFW_DL_SRC) == 0)
- str += "," + STR_DL_SRC + "="
- + HexString.toHexString(this.dataLayerSource);
- if ((wildcards & OFPFW_DL_TYPE) == 0)
- str += "," + STR_DL_TYPE + "=0x"
- + Integer.toHexString(U16.f(this.dataLayerType));
- if ((wildcards & OFPFW_DL_VLAN) == 0)
- str += "," + STR_DL_VLAN + "=0x"
- + Integer.toHexString(U16.f(this.dataLayerVirtualLan));
- if ((wildcards & OFPFW_DL_VLAN_PCP) == 0)
- str += ","
- + STR_DL_VLAN_PCP
- + "="
- + Integer.toHexString(U8
- .f(this.dataLayerVirtualLanPriorityCodePoint));
-
- // l3
- if (getNetworkDestinationMaskLen() > 0)
- str += ","
- + STR_NW_DST
- + "="
- + cidrToString(networkDestination,
- getNetworkDestinationMaskLen());
- if (getNetworkSourceMaskLen() > 0)
- str += "," + STR_NW_SRC + "="
- + cidrToString(networkSource, getNetworkSourceMaskLen());
- if ((wildcards & OFPFW_NW_PROTO) == 0)
- str += "," + STR_NW_PROTO + "=" + this.networkProtocol;
- if ((wildcards & OFPFW_NW_TOS) == 0)
- str += "," + STR_NW_TOS + "=" + this.networkTypeOfService;
-
- // l4
- if ((wildcards & OFPFW_TP_DST) == 0)
- str += "," + STR_TP_DST + "=" + this.transportDestination;
- if ((wildcards & OFPFW_TP_SRC) == 0)
- str += "," + STR_TP_SRC + "=" + this.transportSource;
- if ((str.length() > 0) && (str.charAt(0) == ','))
- str = str.substring(1); // trim the leading ","
- // done
- return "OFMatch[" + str + "]";
- }
-
- private String cidrToString(int ip, int prefix) {
- String str;
- if (prefix >= 32) {
- str = ipToString(ip);
- } else {
- // use the negation of mask to fake endian magic
- int mask = ~((1 << (32 - prefix)) - 1);
- str = ipToString(ip & mask) + "/" + prefix;
- }
-
- return str;
- }
-
- /**
- * Set this OFMatch's parameters based on a comma-separated key=value pair
- * dpctl-style string, e.g., from the output of OFMatch.toString() <br>
- * <p>
- * Supported keys/values include <br>
- * <p>
- * <TABLE border=1>
- * <TR>
- * <TD>KEY(s)
- * <TD>VALUE
- * </TR>
- * <TR>
- * <TD>"in_port","input_port"
- * <TD>integer
- * </TR>
- * <TR>
- * <TD>"dl_src","eth_src", "dl_dst","eth_dst"
- * <TD>hex-string
- * </TR>
- * <TR>
- * <TD>"dl_type", "dl_vlan", "dl_vlan_pcp"
- * <TD>integer
- * </TR>
- * <TR>
- * <TD>"nw_src", "nw_dst", "ip_src", "ip_dst"
- * <TD>CIDR-style netmask
- * </TR>
- * <TR>
- * <TD>"tp_src","tp_dst"
- * <TD>integer (max 64k)
- * </TR>
- * </TABLE>
- * <p>
- * The CIDR-style netmasks assume 32 netmask if none given, so:
- * "128.8.128.118/32" is the same as "128.8.128.118"
- *
- * @param match
- * a key=value comma separated string, e.g.
- * "in_port=5,ip_dst=192.168.0.0/16,tp_src=80"
- * @throws IllegalArgumentException
- * on unexpected key or value
- */
-
- public void fromString(String match) throws IllegalArgumentException {
- if (match.equals("") || match.equalsIgnoreCase("any")
- || match.equalsIgnoreCase("all") || match.equals("[]"))
- match = "OFMatch[]";
- String[] tokens = match.split("[\\[,\\]]");
- String[] values;
- int initArg = 0;
- if (tokens[0].equals("OFMatch"))
- initArg = 1;
- this.wildcards = OFPFW_ALL;
- int i;
- for (i = initArg; i < tokens.length; i++) {
- values = tokens[i].split("=");
- if (values.length != 2)
- throw new IllegalArgumentException("Token " + tokens[i]
- + " does not have form 'key=value' parsing " + match);
- values[0] = values[0].toLowerCase(); // try to make this case insens
- if (values[0].equals(STR_IN_PORT) || values[0].equals("input_port")) {
- this.inputPort = U16.t(Integer.valueOf(values[1]));
- this.wildcards &= ~OFPFW_IN_PORT;
- } else if (values[0].equals(STR_DL_DST)
- || values[0].equals("eth_dst")) {
- this.dataLayerDestination = HexString.fromHexString(values[1]);
- this.wildcards &= ~OFPFW_DL_DST;
- } else if (values[0].equals(STR_DL_SRC)
- || values[0].equals("eth_src")) {
- this.dataLayerSource = HexString.fromHexString(values[1]);
- this.wildcards &= ~OFPFW_DL_SRC;
- } else if (values[0].equals(STR_DL_TYPE)
- || values[0].equals("eth_type")) {
- if (values[1].startsWith("0x"))
- this.dataLayerType = U16.t(Integer.valueOf(
- values[1].replaceFirst("0x", ""), 16));
- else
- this.dataLayerType = U16.t(Integer.valueOf(values[1]));
- this.wildcards &= ~OFPFW_DL_TYPE;
- } else if (values[0].equals(STR_DL_VLAN)) {
- this.dataLayerVirtualLan = U16.t(Integer.valueOf(values[1]));
- this.wildcards &= ~OFPFW_DL_VLAN;
- } else if (values[0].equals(STR_DL_VLAN_PCP)) {
- this.dataLayerVirtualLanPriorityCodePoint = U8.t(Short
- .valueOf(values[1]));
- this.wildcards &= ~OFPFW_DL_VLAN_PCP;
- } else if (values[0].equals(STR_NW_DST)
- || values[0].equals("ip_dst"))
- setFromCIDR(values[1], STR_NW_DST);
- else if (values[0].equals(STR_NW_SRC) || values[0].equals("ip_src"))
- setFromCIDR(values[1], STR_NW_SRC);
- else if (values[0].equals(STR_NW_PROTO)) {
- this.networkProtocol = U8.t(Short.valueOf(values[1]));
- this.wildcards &= ~OFPFW_NW_PROTO;
- } else if (values[0].equals(STR_NW_TOS)) {
- this.networkTypeOfService = U8.t(Short.valueOf(values[1]));
- this.wildcards &= ~OFPFW_NW_TOS;
- } else if (values[0].equals(STR_TP_DST)) {
- this.transportDestination = U16.t(Integer.valueOf(values[1]));
- this.wildcards &= ~OFPFW_TP_DST;
- } else if (values[0].equals(STR_TP_SRC)) {
- this.transportSource = U16.t(Integer.valueOf(values[1]));
- this.wildcards &= ~OFPFW_TP_SRC;
- } else
- throw new IllegalArgumentException("unknown token " + tokens[i]
- + " parsing " + match);
- }
- }
-
- /**
- * Set the networkSource or networkDestionation address and their wildcards
- * from the CIDR string
- *
- * @param cidr
- * "192.168.0.0/16" or "172.16.1.5"
- * @param which
- * one of STR_NW_DST or STR_NW_SRC
- * @throws IllegalArgumentException
- */
- private void setFromCIDR(String cidr, String which)
- throws IllegalArgumentException {
- String values[] = cidr.split("/");
- String[] ip_str = values[0].split("\\.");
- int ip = 0;
- ip += Integer.valueOf(ip_str[0]) << 24;
- ip += Integer.valueOf(ip_str[1]) << 16;
- ip += Integer.valueOf(ip_str[2]) << 8;
- ip += Integer.valueOf(ip_str[3]);
- int prefix = 32; // all bits are fixed, by default
-
- if (values.length >= 2)
- prefix = Integer.valueOf(values[1]);
- int mask = 32 - prefix;
- if (which.equals(STR_NW_DST)) {
- this.networkDestination = ip;
- this.wildcards = (wildcards & ~OFPFW_NW_DST_MASK)
- | (mask << OFPFW_NW_DST_SHIFT);
- } else if (which.equals(STR_NW_SRC)) {
- this.networkSource = ip;
- this.wildcards = (wildcards & ~OFPFW_NW_SRC_MASK)
- | (mask << OFPFW_NW_SRC_SHIFT);
- }
- }
-
- protected static String ipToString(int ip) {
- return Integer.toString(U8.f((byte) ((ip & 0xff000000) >> 24))) + "."
- + Integer.toString((ip & 0x00ff0000) >> 16) + "."
- + Integer.toString((ip & 0x0000ff00) >> 8) + "."
- + Integer.toString(ip & 0x000000ff);
- }
-
- /**
- * Reverses a match such that source and destination values plus
- * corresponding masks are swapped. An input port must be explicitly
- * passed in as the match does not contain an output port.
- *
- * @param inputPort new input port to use in match
- * @param wildcardInputPort should the input port be wildcarded
- *
- * @return Reversed copy of match
- */
- public OFMatch reverse(short inputPort, boolean wildcardInputPort) {
- OFMatch ret = this.clone();
-
- // Set the input port
- if (wildcardInputPort) {
- ret.inputPort = 0;
- ret.wildcards |= OFPFW_IN_PORT;
- } else {
- ret.inputPort = inputPort;
- ret.wildcards &= ~OFPFW_IN_PORT;
- }
-
- // Switch the source/dest fields
- ret.dataLayerDestination = this.dataLayerSource.clone();
- ret.dataLayerSource = this.dataLayerDestination.clone();
-
- ret.networkDestination = this.networkSource;
- ret.networkSource = this.networkDestination;
-
- ret.transportDestination = this.transportSource;
- ret.transportSource = this.transportDestination;
-
- // Switch the wildcards on source/dest fields
- ret.wildcards &= ~(OFPFW_DL_DST | OFPFW_DL_SRC |
- OFPFW_NW_DST_MASK | OFPFW_NW_SRC_MASK |
- OFPFW_TP_DST | OFPFW_TP_SRC);
- ret.wildcards |= ((this.wildcards & OFPFW_DL_DST) != 0 ) ? OFPFW_DL_SRC : 0;
- ret.wildcards |= ((this.wildcards & OFPFW_DL_SRC) != 0 ) ? OFPFW_DL_DST : 0;
- ret.wildcards |= (((this.wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT) << OFPFW_NW_SRC_SHIFT);
- ret.wildcards |= (((this.wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT) << OFPFW_NW_DST_SHIFT);
- ret.wildcards |= ((this.wildcards & OFPFW_TP_DST) != 0 ) ? OFPFW_TP_SRC : 0;
- ret.wildcards |= ((this.wildcards & OFPFW_TP_SRC) != 0 ) ? OFPFW_TP_DST : 0;
-
- return ret;
- }
-
- /**
- * Check whether this match subsumes another match.
- *
- * This match subsumes another match if each field in this
- * object either:
- * <ol>
- * <li> exactly matches the corresponding field in the other match
- * <li> the field is wildcarded in this object
- * </ol>
- * Note: The network source and destination wildcards must have fewer
- * or the same number of bits wildcarded in this object as the other.
- *
- * @param match match used for comparison when checking subsumes
- * @return boolean indicating whether this match subsumes another match
- */
- public boolean subsumes(OFMatch match) {
- // L1
- if ((wildcards & OFPFW_IN_PORT) == 0) {
- if (inputPort != match.inputPort) {
- return false;
- }
- }
-
- // L2
- if ((wildcards & OFPFW_DL_DST) == 0) {
- if (!Arrays.equals(dataLayerDestination, match.dataLayerDestination)) {
- return false;
- }
- }
- if ((wildcards & OFPFW_DL_SRC) == 0) {
- if (!Arrays.equals(dataLayerSource, match.dataLayerSource)) {
- return false;
- }
- }
- if ((wildcards & OFPFW_DL_TYPE) == 0) {
- if (dataLayerType != match.dataLayerType) {
- return false;
- }
- }
- if ((wildcards & OFPFW_DL_VLAN) == 0) {
- if (dataLayerVirtualLan!= match.dataLayerVirtualLan) {
- return false;
- }
- }
- if ((wildcards & OFPFW_DL_VLAN_PCP) == 0) {
- if (dataLayerVirtualLanPriorityCodePoint != match.dataLayerVirtualLanPriorityCodePoint) {
- return false;
- }
- }
-
- // L3
- int maskLen = getNetworkDestinationMaskLen();
- if (maskLen > match.getNetworkDestinationMaskLen()) {
- return false;
- }
- int mask = (maskLen == 0) ? 0 : (0xffffffff << (32 - maskLen));
- if ((networkDestination & mask) != (match.networkDestination & mask)) {
- return false;
- }
- maskLen = getNetworkSourceMaskLen();
- if (maskLen > match.getNetworkSourceMaskLen()) {
- return false;
- }
- mask = (maskLen == 0) ? 0 : (0xffffffff << (32 - maskLen));
- if ((networkSource & mask) != (match.networkSource & mask)) {
- return false;
- }
- if ((wildcards & OFPFW_NW_PROTO) == 0) {
- if (networkProtocol != match.networkProtocol) {
- return false;
- }
- }
- if ((wildcards & OFPFW_NW_TOS) == 0) {
- if (networkTypeOfService != match.networkTypeOfService) {
- return false;
- }
- }
-
- // L4
- if ((wildcards & OFPFW_TP_DST) == 0) {
- if (transportDestination != match.transportDestination) {
- return false;
- }
- }
- if ((wildcards & OFPFW_TP_SRC) == 0) {
- if (transportSource != match.transportSource) {
- return false;
- }
- }
-
- return true;
- }
-
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.beans.IntrospectionException;
-import java.beans.PropertyDescriptor;
-import java.beans.SimpleBeanInfo;
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
-import java.util.LinkedList;
-import java.util.List;
-
-/**
- * Extra info for how to treat OFMatch as a JavaBean
- *
- * For some (inane!) reason, using chained setters in OFMatch breaks a lot of the JavaBean defaults.
- *
- * We don't really use OFMatch as a java bean, but there are a lot of nice XML utils that work for
- * free if OFMatch follows the java bean paradigm.
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-
-public class OFMatchBeanInfo extends SimpleBeanInfo {
-
- @Override
- public PropertyDescriptor[] getPropertyDescriptors() {
- List<PropertyDescriptor> descs = new LinkedList<PropertyDescriptor>();
- Field[] fields = OFMatch.class.getDeclaredFields();
- String name;
- for (int i=0; i< fields.length; i++) {
- int mod = fields[i].getModifiers();
- if(Modifier.isFinal(mod) || // don't expose static or final fields
- Modifier.isStatic(mod))
- continue;
-
- name = fields[i].getName();
- Class<?> type = fields[i].getType();
-
- try {
- descs.add(new PropertyDescriptor(name,
- name2getter(OFMatch.class, name),
- name2setter(OFMatch.class, name, type)));
- } catch (IntrospectionException e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- return descs.toArray(new PropertyDescriptor[0]);
- }
-
-
- private Method name2setter(Class<OFMatch> c, String name, Class<?> type) {
- String mName = "set" + toLeadingCaps(name);
- Method m = null;
- try {
- m = c.getMethod(mName, new Class[]{ type});
- } catch (SecurityException e) {
-
- e.printStackTrace();
- throw new RuntimeException(e);
- } catch (NoSuchMethodException e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- return m;
- }
-
- private Method name2getter(Class<OFMatch> c, String name) {
- String mName= "get" + toLeadingCaps(name);
- Method m = null;
- try {
- m = c.getMethod(mName, new Class[]{});
- } catch (SecurityException e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- } catch (NoSuchMethodException e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- return m;
- }
-
- private String toLeadingCaps(String s) {
- char[] array = s.toCharArray();
- array[0] = Character.toUpperCase(array[0]);
- return String.valueOf(array, 0, array.length);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-import org.openflow.util.U32;
-import org.openflow.util.U8;
-
-/**
- * The base class for all OpenFlow protocol messages. This class contains the
- * equivalent of the ofp_header which is present in all OpenFlow messages.
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Feb 3, 2010
- * @author Rob Sherwood (rob.sherwood@stanford.edu) - Feb 3, 2010
- */
-public class OFMessage implements Serializable{
- public static byte OFP_VERSION = 0x01;
- public static int MINIMUM_LENGTH = 8;
-
- protected byte version;
- protected OFType type;
- protected short length;
- protected int xid;
-
- public OFMessage() {
- this.version = OFP_VERSION;
- }
-
- /**
- * Get the length of this message
- *
- * @return
- */
- public short getLength() {
- return length;
- }
-
- /**
- * Get the length of this message, unsigned
- *
- * @return
- */
- public int getLengthU() {
- return U16.f(length);
- }
-
- /**
- * Set the length of this message
- *
- * @param length
- */
- public OFMessage setLength(short length) {
- this.length = length;
- return this;
- }
-
- /**
- * Set the length of this message, unsigned
- *
- * @param length
- */
- public OFMessage setLengthU(int length) {
- this.length = U16.t(length);
- return this;
- }
-
- /**
- * Get the type of this message
- *
- * @return
- */
- public OFType getType() {
- return type;
- }
-
- /**
- * Set the type of this message
- *
- * @param type
- */
- public void setType(OFType type) {
- this.type = type;
- }
-
- /**
- * Get the OpenFlow version of this message
- *
- * @return
- */
- public byte getVersion() {
- return version;
- }
-
- /**
- * Set the OpenFlow version of this message
- *
- * @param version
- */
- public void setVersion(byte version) {
- this.version = version;
- }
-
- /**
- * Get the transaction id of this message
- *
- * @return
- */
- public int getXid() {
- return xid;
- }
-
- /**
- * Set the transaction id of this message
- *
- * @param xid
- */
- public void setXid(int xid) {
- this.xid = xid;
- }
-
- /**
- * Read this message off the wire from the specified ByteBuffer
- * @param data
- */
- public void readFrom(ByteBuffer data) {
- this.version = data.get();
- this.type = OFType.valueOf(data.get());
- this.length = data.getShort();
- this.xid = data.getInt();
- }
-
- /**
- * Write this message's binary format to the specified ByteBuffer
- * @param data
- */
- public void writeTo(ByteBuffer data) {
- data.put(version);
- data.put(type.getTypeValue());
- data.putShort(length);
- data.putInt(xid);
- }
-
- /**
- * Returns a summary of the message
- * @return "ofmsg=v=$version;t=$type:l=$len:xid=$xid"
- */
- public String toString() {
- return "ofmsg" +
- ":v=" + U8.f(this.getVersion()) +
- ";t=" + this.getType() +
- ";l=" + this.getLengthU() +
- ";x=" + U32.f(this.getXid());
- }
-
- @Override
- public int hashCode() {
- final int prime = 97;
- int result = 1;
- result = prime * result + length;
- result = prime * result + ((type == null) ? 0 : type.hashCode());
- result = prime * result + version;
- result = prime * result + xid;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFMessage)) {
- return false;
- }
- OFMessage other = (OFMessage) obj;
- if (length != other.length) {
- return false;
- }
- if (type == null) {
- if (other.type != null) {
- return false;
- }
- } else if (!type.equals(other.type)) {
- return false;
- }
- if (version != other.version) {
- return false;
- }
- if (xid != other.xid) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import org.openflow.util.U16;
-import org.openflow.util.U8;
-
-/**
- * Represents an ofp_packet_in
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Feb 8, 2010
- */
-public class OFPacketIn extends OFMessage {
- public static int MINIMUM_LENGTH = 18;
-
- public enum OFPacketInReason {
- NO_MATCH, ACTION
- }
-
- protected int bufferId;
- protected short totalLength;
- protected short inPort;
- protected OFPacketInReason reason;
- protected byte[] packetData;
-
- public OFPacketIn() {
- super();
- this.type = OFType.PACKET_IN;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * Get buffer_id
- * @return
- */
- public int getBufferId() {
- return this.bufferId;
- }
-
- /**
- * Set buffer_id
- * @param bufferId
- */
- public OFPacketIn setBufferId(int bufferId) {
- this.bufferId = bufferId;
- return this;
- }
-
- /**
- * Returns the packet data
- * @return
- */
- public byte[] getPacketData() {
- return this.packetData;
- }
-
- /**
- * Sets the packet data, and updates the length of this message
- * @param packetData
- */
- public OFPacketIn setPacketData(byte[] packetData) {
- this.packetData = packetData;
- this.length = U16.t(OFPacketIn.MINIMUM_LENGTH + packetData.length);
- return this;
- }
-
- /**
- * Get in_port
- * @return
- */
- public short getInPort() {
- return this.inPort;
- }
-
- /**
- * Set in_port
- * @param inPort
- */
- public OFPacketIn setInPort(short inPort) {
- this.inPort = inPort;
- return this;
- }
-
- /**
- * Get reason
- * @return
- */
- public OFPacketInReason getReason() {
- return this.reason;
- }
-
- /**
- * Set reason
- * @param reason
- */
- public OFPacketIn setReason(OFPacketInReason reason) {
- this.reason = reason;
- return this;
- }
-
- /**
- * Get total_len
- * @return
- */
- public short getTotalLength() {
- return this.totalLength;
- }
-
- /**
- * Set total_len
- * @param totalLength
- */
- public OFPacketIn setTotalLength(short totalLength) {
- this.totalLength = totalLength;
- return this;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.bufferId = data.getInt();
- this.totalLength = data.getShort();
- this.inPort = data.getShort();
- this.reason = OFPacketInReason.values()[U8.f(data.get())];
- data.get(); // pad
- this.packetData = new byte[getLengthU() - MINIMUM_LENGTH];
- data.get(this.packetData);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putInt(bufferId);
- data.putShort(totalLength);
- data.putShort(inPort);
- data.put((byte) reason.ordinal());
- data.put((byte) 0x0); // pad
- data.put(this.packetData);
- }
-
- @Override
- public int hashCode() {
- final int prime = 283;
- int result = super.hashCode();
- result = prime * result + bufferId;
- result = prime * result + inPort;
- result = prime * result + Arrays.hashCode(packetData);
- result = prime * result + ((reason == null) ? 0 : reason.hashCode());
- result = prime * result + totalLength;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFPacketIn)) {
- return false;
- }
- OFPacketIn other = (OFPacketIn) obj;
- if (bufferId != other.bufferId) {
- return false;
- }
- if (inPort != other.inPort) {
- return false;
- }
- if (!Arrays.equals(packetData, other.packetData)) {
- return false;
- }
- if (reason == null) {
- if (other.reason != null) {
- return false;
- }
- } else if (!reason.equals(other.reason)) {
- return false;
- }
- if (totalLength != other.totalLength) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-import java.util.List;
-
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.factory.OFActionFactory;
-import org.openflow.protocol.factory.OFActionFactoryAware;
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_packet_out message
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 12, 2010
- */
-public class OFPacketOut extends OFMessage implements OFActionFactoryAware {
- public static int MINIMUM_LENGTH = 16;
- public static int BUFFER_ID_NONE = 0xffffffff;
-
- protected OFActionFactory actionFactory;
- protected int bufferId;
- protected short inPort;
- protected short actionsLength;
- protected List<OFAction> actions;
- protected byte[] packetData;
-
- public OFPacketOut() {
- super();
- this.type = OFType.PACKET_OUT;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * Get buffer_id
- * @return
- */
- public int getBufferId() {
- return this.bufferId;
- }
-
- /**
- * Set buffer_id
- * @param bufferId
- */
- public OFPacketOut setBufferId(int bufferId) {
- this.bufferId = bufferId;
- return this;
- }
-
- /**
- * Returns the packet data
- * @return
- */
- public byte[] getPacketData() {
- return this.packetData;
- }
-
- /**
- * Sets the packet data
- * @param packetData
- */
- public OFPacketOut setPacketData(byte[] packetData) {
- this.packetData = packetData;
- return this;
- }
-
- /**
- * Get in_port
- * @return
- */
- public short getInPort() {
- return this.inPort;
- }
-
- /**
- * Set in_port
- * @param inPort
- */
- public OFPacketOut setInPort(short inPort) {
- this.inPort = inPort;
- return this;
- }
-
- /**
- * Set in_port. Convenience method using OFPort enum.
- * @param inPort
- */
- public OFPacketOut setInPort(OFPort inPort) {
- this.inPort = inPort.getValue();
- return this;
- }
-
- /**
- * Get actions_len
- * @return
- */
- public short getActionsLength() {
- return this.actionsLength;
- }
-
- /**
- * Get actions_len, unsigned
- * @return
- */
- public int getActionsLengthU() {
- return U16.f(this.actionsLength);
- }
-
- /**
- * Set actions_len
- * @param actionsLength
- */
- public OFPacketOut setActionsLength(short actionsLength) {
- this.actionsLength = actionsLength;
- return this;
- }
-
- /**
- * Returns the actions contained in this message
- * @return a list of ordered OFAction objects
- */
- public List<OFAction> getActions() {
- return this.actions;
- }
-
- /**
- * Sets the list of actions on this message
- * @param actions a list of ordered OFAction objects
- */
- public OFPacketOut setActions(List<OFAction> actions) {
- this.actions = actions;
- return this;
- }
-
- @Override
- public void setActionFactory(OFActionFactory actionFactory) {
- this.actionFactory = actionFactory;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.bufferId = data.getInt();
- this.inPort = data.getShort();
- this.actionsLength = data.getShort();
- if ( this.actionFactory == null)
- throw new RuntimeException("ActionFactory not set");
- this.actions = this.actionFactory.parseActions(data, getActionsLengthU());
- this.packetData = new byte[getLengthU() - MINIMUM_LENGTH - getActionsLengthU()];
- data.get(this.packetData);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putInt(bufferId);
- data.putShort(inPort);
- data.putShort(actionsLength);
- for (OFAction action : actions) {
- action.writeTo(data);
- }
- if (this.packetData != null)
- data.put(this.packetData);
- }
-
- @Override
- public int hashCode() {
- final int prime = 293;
- int result = super.hashCode();
- result = prime * result + ((actions == null) ? 0 : actions.hashCode());
- result = prime * result + actionsLength;
- result = prime * result + bufferId;
- result = prime * result + inPort;
- result = prime * result + Arrays.hashCode(packetData);
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFPacketOut)) {
- return false;
- }
- OFPacketOut other = (OFPacketOut) obj;
- if (actions == null) {
- if (other.actions != null) {
- return false;
- }
- } else if (!actions.equals(other.actions)) {
- return false;
- }
- if (actionsLength != other.actionsLength) {
- return false;
- }
- if (bufferId != other.bufferId) {
- return false;
- }
- if (inPort != other.inPort) {
- return false;
- }
- if (!Arrays.equals(packetData, other.packetData)) {
- return false;
- }
- return true;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return "OFPacketOut [actionFactory=" + actionFactory + ", actions="
- + actions + ", actionsLength=" + actionsLength + ", bufferId=0x"
- + Integer.toHexString(bufferId) + ", inPort=" + inPort + ", packetData="
- + Arrays.toString(packetData) + "]";
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.io.Serializable;
-import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.HashMap;
-
-
-
-/**
- * Represents ofp_phy_port
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 25, 2010
- */
-public class OFPhysicalPort implements Cloneable, Serializable {
- public static int MINIMUM_LENGTH = 48;
- public static int OFP_ETH_ALEN = 6;
-
- public enum OFPortConfig {
- OFPPC_PORT_DOWN (1 << 0),
- OFPPC_NO_STP (1 << 1),
- OFPPC_NO_RECV (1 << 2),
- OFPPC_NO_RECV_STP (1 << 3),
- OFPPC_NO_FLOOD (1 << 4),
- OFPPC_NO_FWD (1 << 5),
- OFPPC_NO_PACKET_IN (1 << 6);
-
- protected int value;
-
- private OFPortConfig(int value) {
- this.value = value;
- }
-
- /**
- * @return the value
- */
- public int getValue() {
- return value;
- }
- }
-
- public enum OFPortState {
- OFPPS_LINK_DOWN (1 << 0),
- OFPPS_STP_LISTEN (0 << 8),
- OFPPS_STP_LEARN (1 << 8),
- OFPPS_STP_FORWARD (2 << 8),
- OFPPS_STP_BLOCK (3 << 8),
- OFPPS_STP_MASK (3 << 8);
-
- protected int value;
-
- private OFPortState(int value) {
- this.value = value;
- }
-
- /**
- * @return the value
- */
- public int getValue() {
- return value;
- }
- }
-
- public enum OFPortFeatures {
- OFPPF_10MB_HD (1 << 0),
- OFPPF_10MB_FD (1 << 1),
- OFPPF_100MB_HD (1 << 2),
- OFPPF_100MB_FD (1 << 3),
- OFPPF_1GB_HD (1 << 4),
- OFPPF_1GB_FD (1 << 5),
- OFPPF_10GB_FD (1 << 6),
- OFPPF_COPPER (1 << 7),
- OFPPF_FIBER (1 << 8),
- OFPPF_AUTONEG (1 << 9),
- OFPPF_PAUSE (1 << 10),
- OFPPF_PAUSE_ASYM (1 << 11);
-
- protected int value;
-
- private OFPortFeatures(int value) {
- this.value = value;
- }
-
- /**
- * @return the value
- */
- public int getValue() {
- return value;
- }
- }
-
- protected short portNumber;
- protected byte[] hardwareAddress;
- protected String name;
- protected int config;
- protected int state;
- protected int currentFeatures;
- protected int advertisedFeatures;
- protected int supportedFeatures;
- protected int peerFeatures;
-
- /**
- * @return the portNumber
- */
- public short getPortNumber() {
- return portNumber;
- }
-
- /**
- * @param portNumber the portNumber to set
- */
- public void setPortNumber(short portNumber) {
- this.portNumber = portNumber;
- }
-
- /**
- * @return the hardwareAddress
- */
- public byte[] getHardwareAddress() {
- return hardwareAddress;
- }
-
- /**
- * @param hardwareAddress the hardwareAddress to set
- */
- public void setHardwareAddress(byte[] hardwareAddress) {
- if (hardwareAddress.length != OFP_ETH_ALEN)
- throw new RuntimeException("Hardware address must have length "
- + OFP_ETH_ALEN);
- this.hardwareAddress = hardwareAddress;
- }
-
- /**
- * @return the name
- */
- public String getName() {
- return name;
- }
-
- /**
- * @param name the name to set
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /**
- * @return the config
- */
- public int getConfig() {
- return config;
- }
-
- /**
- * @param config the config to set
- */
- public void setConfig(int config) {
- this.config = config;
- }
-
- /**
- * @return the state
- */
- public int getState() {
- return state;
- }
-
- /**
- * @param state the state to set
- */
- public void setState(int state) {
- this.state = state;
- }
-
- /**
- * @return the currentFeatures
- */
- public int getCurrentFeatures() {
- return currentFeatures;
- }
-
- /**
- * @param currentFeatures the currentFeatures to set
- */
- public void setCurrentFeatures(int currentFeatures) {
- this.currentFeatures = currentFeatures;
- }
-
- /**
- * @return the advertisedFeatures
- */
- public int getAdvertisedFeatures() {
- return advertisedFeatures;
- }
-
- /**
- * @param advertisedFeatures the advertisedFeatures to set
- */
- public void setAdvertisedFeatures(int advertisedFeatures) {
- this.advertisedFeatures = advertisedFeatures;
- }
-
- /**
- * @return the supportedFeatures
- */
- public int getSupportedFeatures() {
- return supportedFeatures;
- }
-
- /**
- * @param supportedFeatures the supportedFeatures to set
- */
- public void setSupportedFeatures(int supportedFeatures) {
- this.supportedFeatures = supportedFeatures;
- }
-
- /**
- * @return the peerFeatures
- */
- public int getPeerFeatures() {
- return peerFeatures;
- }
-
- /**
- * @param peerFeatures the peerFeatures to set
- */
- public void setPeerFeatures(int peerFeatures) {
- this.peerFeatures = peerFeatures;
- }
-
- /**
- * Read this message off the wire from the specified ByteBuffer
- * @param data
- */
- public void readFrom(ByteBuffer data) {
- this.portNumber = data.getShort();
- if (this.hardwareAddress == null)
- this.hardwareAddress = new byte[OFP_ETH_ALEN];
- data.get(this.hardwareAddress);
- byte[] name = new byte[16];
- data.get(name);
- // find the first index of 0
- int index = 0;
- for (byte b : name) {
- if (0 == b)
- break;
- ++index;
- }
- this.name = new String(Arrays.copyOf(name, index),
- Charset.forName("ascii"));
- this.config = data.getInt();
- this.state = data.getInt();
- this.currentFeatures = data.getInt();
- this.advertisedFeatures = data.getInt();
- this.supportedFeatures = data.getInt();
- this.peerFeatures = data.getInt();
- }
-
- /**
- * Write this message's binary format to the specified ByteBuffer
- * @param data
- */
- public void writeTo(ByteBuffer data) {
- data.putShort(this.portNumber);
- data.put(hardwareAddress);
- try {
- byte[] name = this.name.getBytes("ASCII");
- if (name.length < 16) {
- data.put(name);
- for (int i = name.length; i < 16; ++i) {
- data.put((byte) 0);
- }
- } else {
- data.put(name, 0, 15);
- data.put((byte) 0);
- }
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException(e);
- }
- data.putInt(this.config);
- data.putInt(this.state);
- data.putInt(this.currentFeatures);
- data.putInt(this.advertisedFeatures);
- data.putInt(this.supportedFeatures);
- data.putInt(this.peerFeatures);
- }
-
- @Override
- public int hashCode() {
- final int prime = 307;
- int result = 1;
- result = prime * result + advertisedFeatures;
- result = prime * result + config;
- result = prime * result + currentFeatures;
- result = prime * result + Arrays.hashCode(hardwareAddress);
- result = prime * result + ((name == null) ? 0 : name.hashCode());
- result = prime * result + peerFeatures;
- result = prime * result + portNumber;
- result = prime * result + state;
- result = prime * result + supportedFeatures;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFPhysicalPort)) {
- return false;
- }
- OFPhysicalPort other = (OFPhysicalPort) obj;
- if (advertisedFeatures != other.advertisedFeatures) {
- return false;
- }
- if (config != other.config) {
- return false;
- }
- if (currentFeatures != other.currentFeatures) {
- return false;
- }
- if (!Arrays.equals(hardwareAddress, other.hardwareAddress)) {
- return false;
- }
- if (name == null) {
- if (other.name != null) {
- return false;
- }
- } else if (!name.equals(other.name)) {
- return false;
- }
- if (peerFeatures != other.peerFeatures) {
- return false;
- }
- if (portNumber != other.portNumber) {
- return false;
- }
- if (state != other.state) {
- return false;
- }
- if (supportedFeatures != other.supportedFeatures) {
- return false;
- }
- return true;
- }
-
- public OFPhysicalPort cloneOFPhysicalPort() {
- OFPhysicalPort p;
- try
- {
- p = (OFPhysicalPort) this.clone();
-
- }
- catch (CloneNotSupportedException e)
- {
- throw new AssertionError();
- }
- return p;
- }
-
-}
+++ /dev/null
-package org.openflow.protocol;
-
-public enum OFPort {
- OFPP_MAX ((short)0xff00),
- OFPP_IN_PORT ((short)0xfff8),
- OFPP_TABLE ((short)0xfff9),
- OFPP_NORMAL ((short)0xfffa),
- OFPP_FLOOD ((short)0xfffb),
- OFPP_ALL ((short)0xfffc),
- OFPP_CONTROLLER ((short)0xfffd),
- OFPP_LOCAL ((short)0xfffe),
- OFPP_NONE ((short)0xffff);
-
- protected short value;
-
- private OFPort(short value) {
- this.value = value;
- }
-
- /**
- * @return the value
- */
- public short getValue() {
- return value;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_port_mod message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFPortMod extends OFMessage {
- public static int MINIMUM_LENGTH = 32;
-
- protected short portNumber;
- protected byte[] hardwareAddress;
- protected int config;
- protected int mask;
- protected int advertise;
-
- public OFPortMod() {
- super();
- this.type = OFType.PORT_MOD;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the portNumber
- */
- public short getPortNumber() {
- return portNumber;
- }
-
- /**
- * @param portNumber the portNumber to set
- */
- public void setPortNumber(short portNumber) {
- this.portNumber = portNumber;
- }
-
- /**
- * @return the hardwareAddress
- */
- public byte[] getHardwareAddress() {
- return hardwareAddress;
- }
-
- /**
- * @param hardwareAddress the hardwareAddress to set
- */
- public void setHardwareAddress(byte[] hardwareAddress) {
- if (hardwareAddress.length != OFPhysicalPort.OFP_ETH_ALEN)
- throw new RuntimeException("Hardware address must have length "
- + OFPhysicalPort.OFP_ETH_ALEN);
- this.hardwareAddress = hardwareAddress;
- }
-
- /**
- * @return the config
- */
- public int getConfig() {
- return config;
- }
-
- /**
- * @param config the config to set
- */
- public void setConfig(int config) {
- this.config = config;
- }
-
- /**
- * @return the mask
- */
- public int getMask() {
- return mask;
- }
-
- /**
- * @param mask the mask to set
- */
- public void setMask(int mask) {
- this.mask = mask;
- }
-
- /**
- * @return the advertise
- */
- public int getAdvertise() {
- return advertise;
- }
-
- /**
- * @param advertise the advertise to set
- */
- public void setAdvertise(int advertise) {
- this.advertise = advertise;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.portNumber = data.getShort();
- if (this.hardwareAddress == null)
- this.hardwareAddress = new byte[OFPhysicalPort.OFP_ETH_ALEN];
- data.get(this.hardwareAddress);
- this.config = data.getInt();
- this.mask = data.getInt();
- this.advertise = data.getInt();
- data.getInt(); // pad
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.portNumber);
- data.put(this.hardwareAddress);
- data.putInt(this.config);
- data.putInt(this.mask);
- data.putInt(this.advertise);
- data.putInt(0); // pad
- }
-
- @Override
- public int hashCode() {
- final int prime = 311;
- int result = super.hashCode();
- result = prime * result + advertise;
- result = prime * result + config;
- result = prime * result + Arrays.hashCode(hardwareAddress);
- result = prime * result + mask;
- result = prime * result + portNumber;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFPortMod)) {
- return false;
- }
- OFPortMod other = (OFPortMod) obj;
- if (advertise != other.advertise) {
- return false;
- }
- if (config != other.config) {
- return false;
- }
- if (!Arrays.equals(hardwareAddress, other.hardwareAddress)) {
- return false;
- }
- if (mask != other.mask) {
- return false;
- }
- if (portNumber != other.portNumber) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_port_status message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFPortStatus extends OFMessage {
- public static int MINIMUM_LENGTH = 64;
-
- public enum OFPortReason {
- OFPPR_ADD,
- OFPPR_DELETE,
- OFPPR_MODIFY
- }
-
- protected byte reason;
- protected OFPhysicalPort desc;
-
- /**
- * @return the reason
- */
- public byte getReason() {
- return reason;
- }
-
- /**
- * @param reason the reason to set
- */
- public void setReason(byte reason) {
- this.reason = reason;
- }
-
- /**
- * @return the desc
- */
- public OFPhysicalPort getDesc() {
- return desc;
- }
-
- /**
- * @param desc the desc to set
- */
- public void setDesc(OFPhysicalPort desc) {
- this.desc = desc;
- }
-
- public OFPortStatus() {
- super();
- this.type = OFType.PORT_STATUS;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.reason = data.get();
- data.position(data.position() + 7); // skip 7 bytes of padding
- if (this.desc == null)
- this.desc = new OFPhysicalPort();
- this.desc.readFrom(data);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.put(this.reason);
- for (int i = 0; i < 7; ++i)
- data.put((byte) 0);
- this.desc.writeTo(data);
- }
-
- @Override
- public int hashCode() {
- final int prime = 313;
- int result = super.hashCode();
- result = prime * result + ((desc == null) ? 0 : desc.hashCode());
- result = prime * result + reason;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFPortStatus)) {
- return false;
- }
- OFPortStatus other = (OFPortStatus) obj;
- if (desc == null) {
- if (other.desc != null) {
- return false;
- }
- } else if (!desc.equals(other.desc)) {
- return false;
- }
- if (reason != other.reason) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.openflow.util.U16;
-import org.openflow.protocol.factory.OFQueuePropertyFactory;
-import org.openflow.protocol.factory.OFQueuePropertyFactoryAware;
-import org.openflow.protocol.queue.OFPacketQueue;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueueConfigReply extends OFMessage implements Cloneable, OFQueuePropertyFactoryAware {
- public static int MINIMUM_LENGTH = 16;
-
- protected OFQueuePropertyFactory queuePropertyFactory;
-
- protected short port;
- protected List<OFPacketQueue> queues;
-
- /**
- *
- */
- public OFQueueConfigReply() {
- super();
- this.type = OFType.QUEUE_CONFIG_REPLY;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the port
- */
- public short getPort() {
- return port;
- }
-
- /**
- * @param port the port to set
- */
- public OFQueueConfigReply setPort(short port) {
- this.port = port;
- return this;
- }
-
- /**
- * @return the queues
- */
- public List<OFPacketQueue> getQueues() {
- return queues;
- }
-
- /**
- * @param queues the queues to set
- */
- public void setQueues(List<OFPacketQueue> queues) {
- this.queues = queues;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.port = data.getShort();
- data.getShort(); // pad
- data.getInt(); // pad
- int remaining = this.getLengthU() - MINIMUM_LENGTH;
- if (data.remaining() < remaining)
- remaining = data.remaining();
- this.queues = new ArrayList<OFPacketQueue>();
- while (remaining >= OFPacketQueue.MINIMUM_LENGTH) {
- OFPacketQueue queue = new OFPacketQueue();
- queue.setQueuePropertyFactory(this.queuePropertyFactory);
- queue.readFrom(data);
- remaining -= U16.f(queue.getLength());
- this.queues.add(queue);
- }
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.port);
- data.putShort((short) 0); // pad
- data.putInt(0); // pad
- if (this.queues != null) {
- for (OFPacketQueue queue : this.queues) {
- queue.writeTo(data);
- }
- }
- }
-
- @Override
- public int hashCode() {
- final int prime = 4549;
- int result = super.hashCode();
- result = prime * result + port;
- result = prime * result + ((queues == null) ? 0 : queues.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (!(obj instanceof OFQueueConfigReply))
- return false;
- OFQueueConfigReply other = (OFQueueConfigReply) obj;
- if (port != other.port)
- return false;
- if (queues == null) {
- if (other.queues != null)
- return false;
- } else if (!queues.equals(other.queues))
- return false;
- return true;
- }
-
- @Override
- public void setQueuePropertyFactory(
- OFQueuePropertyFactory queuePropertyFactory) {
- this.queuePropertyFactory = queuePropertyFactory;
- }
-
- @Override
- public OFQueueConfigReply clone() {
- try {
- OFQueueConfigReply clone = (OFQueueConfigReply) super.clone();
- if (this.queues != null) {
- List<OFPacketQueue> queues = new ArrayList<OFPacketQueue>();
- for (OFPacketQueue queue : this.queues) {
- queues.add(queue.clone());
- }
- clone.setQueues(queues);
- }
- return clone;
- } catch (CloneNotSupportedException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public String toString() {
- return "OFQueueConfigReply [port=" + U16.f(port) + ", queues=" + queues
- + ", xid=" + xid + "]";
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueueConfigRequest extends OFMessage implements Cloneable {
- public static int MINIMUM_LENGTH = 12;
-
- protected short port;
-
- /**
- *
- */
- public OFQueueConfigRequest() {
- super();
- this.type = OFType.QUEUE_CONFIG_REQUEST;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the port
- */
- public short getPort() {
- return port;
- }
-
- /**
- * @param port the port to set
- */
- public void setPort(short port) {
- this.port = port;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.port = data.getShort();
- data.get(); // pad
- data.get(); // pad
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.port);
- data.putShort((short) 0); // pad
- }
-
- @Override
- public int hashCode() {
- final int prime = 7211;
- int result = super.hashCode();
- result = prime * result + port;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (!(obj instanceof OFQueueConfigRequest))
- return false;
- OFQueueConfigRequest other = (OFQueueConfigRequest) obj;
- if (port != other.port)
- return false;
- return true;
- }
-
- @Override
- public OFQueueConfigRequest clone() {
- try {
- return (OFQueueConfigRequest) super.clone();
- } catch (CloneNotSupportedException e) {
- throw new RuntimeException(e);
- }
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-/**
- * Represents an OFPT_SET_CONFIG type message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFSetConfig extends OFSwitchConfig {
- public OFSetConfig() {
- super();
- this.type = OFType.SET_CONFIG;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.factory.OFStatisticsFactory;
-import org.openflow.protocol.factory.OFStatisticsFactoryAware;
-import org.openflow.protocol.statistics.OFStatistics;
-import org.openflow.protocol.statistics.OFStatisticsType;
-
-
-/**
- * Base class for statistics requests/replies
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 27, 2010
- */
-public abstract class OFStatisticsMessageBase extends OFMessage implements
- OFStatisticsFactoryAware {
- public static int MINIMUM_LENGTH = 12;
-
- protected OFStatisticsFactory statisticsFactory;
- protected OFStatisticsType statisticType;
- protected short flags;
- protected List<OFStatistics> statistics;
-
- /**
- * @return the statisticType
- */
- public OFStatisticsType getStatisticType() {
- return statisticType;
- }
-
- /**
- * @param statisticType the statisticType to set
- */
- public void setStatisticType(OFStatisticsType statisticType) {
- this.statisticType = statisticType;
- }
-
- /**
- * @return the flags
- */
- public short getFlags() {
- return flags;
- }
-
- /**
- * @param flags the flags to set
- */
- public void setFlags(short flags) {
- this.flags = flags;
- }
-
- /**
- * @return the statistics
- */
- public List<OFStatistics> getStatistics() {
- return statistics;
- }
-
- /**
- * @param statistics the statistics to set
- */
- public void setStatistics(List<OFStatistics> statistics) {
- this.statistics = statistics;
- }
-
- @Override
- public void setStatisticsFactory(OFStatisticsFactory statisticsFactory) {
- this.statisticsFactory = statisticsFactory;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.statisticType = OFStatisticsType.valueOf(data.getShort(), this
- .getType());
- this.flags = data.getShort();
- if (this.statisticsFactory == null)
- throw new RuntimeException("OFStatisticsFactory not set");
- this.statistics = statisticsFactory.parseStatistics(this.getType(),
- this.statisticType, data, super.getLengthU() - MINIMUM_LENGTH);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.statisticType.getTypeValue());
- data.putShort(this.flags);
- if (this.statistics != null) {
- for (OFStatistics statistic : this.statistics) {
- statistic.writeTo(data);
- }
- }
- }
-
- @Override
- public int hashCode() {
- final int prime = 317;
- int result = super.hashCode();
- result = prime * result + flags;
- result = prime * result
- + ((statisticType == null) ? 0 : statisticType.hashCode());
- result = prime * result
- + ((statistics == null) ? 0 : statistics.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFStatisticsMessageBase)) {
- return false;
- }
- OFStatisticsMessageBase other = (OFStatisticsMessageBase) obj;
- if (flags != other.flags) {
- return false;
- }
- if (statisticType == null) {
- if (other.statisticType != null) {
- return false;
- }
- } else if (!statisticType.equals(other.statisticType)) {
- return false;
- }
- if (statistics == null) {
- if (other.statistics != null) {
- return false;
- }
- } else if (!statistics.equals(other.statistics)) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_stats_reply message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFStatisticsReply extends OFStatisticsMessageBase {
- public enum OFStatisticsReplyFlags {
- REPLY_MORE (1 << 0);
-
- protected short type;
-
- OFStatisticsReplyFlags(int type) {
- this.type = (short) type;
- }
-
- public short getTypeValue() {
- return type;
- }
- }
-
- public OFStatisticsReply() {
- super();
- this.type = OFType.STATS_REPLY;
- this.length = U16.t(OFStatisticsMessageBase.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_stats_request message
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFStatisticsRequest extends OFStatisticsMessageBase {
- public OFStatisticsRequest() {
- super();
- this.type = OFType.STATS_REQUEST;
- this.length = U16.t(OFStatisticsMessageBase.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-/**
- * Base class representing ofp_switch_config based messages
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public abstract class OFSwitchConfig extends OFMessage {
- public static int MINIMUM_LENGTH = 12;
-
- public enum OFConfigFlags {
- OFPC_FRAG_NORMAL,
- OFPC_FRAG_DROP,
- OFPC_FRAG_REASM,
- OFPC_FRAG_MASK
- }
-
- protected short flags;
- protected short missSendLength;
-
- public OFSwitchConfig() {
- super();
- super.setLengthU(MINIMUM_LENGTH);
- }
-
- /**
- * @return the flags
- */
- public short getFlags() {
- return flags;
- }
-
- /**
- * @param flags the flags to set
- */
- public OFSwitchConfig setFlags(short flags) {
- this.flags = flags;
- return this;
- }
-
- /**
- * @return the missSendLength
- */
- public short getMissSendLength() {
- return missSendLength;
- }
-
- /**
- * @param missSendLength the missSendLength to set
- */
- public OFSwitchConfig setMissSendLength(short missSendLength) {
- this.missSendLength = missSendLength;
- return this;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.flags = data.getShort();
- this.missSendLength = data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.flags);
- data.putShort(this.missSendLength);
- }
-
- @Override
- public int hashCode() {
- final int prime = 331;
- int result = super.hashCode();
- result = prime * result + flags;
- result = prime * result + missSendLength;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFSwitchConfig)) {
- return false;
- }
- OFSwitchConfig other = (OFSwitchConfig) obj;
- if (flags != other.flags) {
- return false;
- }
- if (missSendLength != other.missSendLength) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.lang.reflect.Constructor;
-
-/**
- * List of OpenFlow types and mappings to wire protocol value and derived
- * classes
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- * @author David Erickson (daviderickson@cs.stanford.edu)
- *
- */
-public enum OFType {
- HELLO (0, OFHello.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFHello();
- }}),
- ERROR (1, OFError.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFError();
- }}),
- ECHO_REQUEST (2, OFEchoRequest.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFEchoRequest();
- }}),
- ECHO_REPLY (3, OFEchoReply.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFEchoReply();
- }}),
- VENDOR (4, OFVendor.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFVendor();
- }}),
- FEATURES_REQUEST (5, OFFeaturesRequest.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFFeaturesRequest();
- }}),
- FEATURES_REPLY (6, OFFeaturesReply.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFFeaturesReply();
- }}),
- GET_CONFIG_REQUEST (7, OFGetConfigRequest.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFGetConfigRequest();
- }}),
- GET_CONFIG_REPLY (8, OFGetConfigReply.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFGetConfigReply();
- }}),
- SET_CONFIG (9, OFSetConfig.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFSetConfig();
- }}),
- PACKET_IN (10, OFPacketIn.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFPacketIn();
- }}),
- FLOW_REMOVED (11, OFFlowRemoved.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFFlowRemoved();
- }}),
- PORT_STATUS (12, OFPortStatus.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFPortStatus();
- }}),
- PACKET_OUT (13, OFPacketOut.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFPacketOut();
- }}),
- FLOW_MOD (14, OFFlowMod.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFFlowMod();
- }}),
- PORT_MOD (15, OFPortMod.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFPortMod();
- }}),
- STATS_REQUEST (16, OFStatisticsRequest.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFStatisticsRequest();
- }}),
- STATS_REPLY (17, OFStatisticsReply.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFStatisticsReply();
- }}),
- BARRIER_REQUEST (18, OFBarrierRequest.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFBarrierRequest();
- }}),
- BARRIER_REPLY (19, OFBarrierReply.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFBarrierReply();
- }}),
- QUEUE_CONFIG_REQUEST (20, OFMessage.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFQueueConfigRequest();
- }}),
- QUEUE_CONFIG_REPLY (21, OFMessage.class, new Instantiable<OFMessage>() {
- @Override
- public OFMessage instantiate() {
- return new OFQueueConfigReply();
- }});
-
- static OFType[] mapping;
-
- protected Class<? extends OFMessage> clazz;
- protected Constructor<? extends OFMessage> constructor;
- protected Instantiable<OFMessage> instantiable;
- protected byte type;
-
- /**
- * Store some information about the OpenFlow type, including wire protocol
- * type number, length, and derived class
- *
- * @param type Wire protocol number associated with this OFType
- * @param requestClass The Java class corresponding to this type of OpenFlow
- * message
- * @param instantiator An Instantiator<OFMessage> implementation that creates an
- * instance of the specified OFMessage
- */
- OFType(int type, Class<? extends OFMessage> clazz, Instantiable<OFMessage> instantiator) {
- this.type = (byte) type;
- this.clazz = clazz;
- this.instantiable = instantiator;
- try {
- this.constructor = clazz.getConstructor(new Class[]{});
- } catch (Exception e) {
- throw new RuntimeException(
- "Failure getting constructor for class: " + clazz, e);
- }
- OFType.addMapping(this.type, this);
- }
-
- /**
- * Adds a mapping from type value to OFType enum
- *
- * @param i OpenFlow wire protocol type
- * @param t type
- */
- public static void addMapping(byte i, OFType t) {
- if (mapping == null)
- mapping = new OFType[32];
- OFType.mapping[i] = t;
- }
-
- /**
- * Remove a mapping from type value to OFType enum
- *
- * @param i OpenFlow wire protocol type
- */
- public static void removeMapping(byte i) {
- OFType.mapping[i] = null;
- }
-
- /**
- * Given a wire protocol OpenFlow type number, return the OFType associated
- * with it
- *
- * @param i wire protocol number
- * @return OFType enum type
- */
-
- public static OFType valueOf(Byte i) {
- return OFType.mapping[i];
- }
-
- /**
- * @return Returns the wire protocol value corresponding to this OFType
- */
- public byte getTypeValue() {
- return this.type;
- }
-
- /**
- * @return return the OFMessage subclass corresponding to this OFType
- */
- public Class<? extends OFMessage> toClass() {
- return clazz;
- }
-
- /**
- * Returns the no-argument Constructor of the implementation class for
- * this OFType
- * @return the constructor
- */
- public Constructor<? extends OFMessage> getConstructor() {
- return constructor;
- }
-
- /**
- * Returns a new instance of the OFMessage represented by this OFType
- * @return the new object
- */
- public OFMessage newInstance() {
- return instantiable.instantiate();
- }
-
- /**
- * @return the instantiable
- */
- public Instantiable<OFMessage> getInstantiable() {
- return instantiable;
- }
-
- /**
- * @param instantiable the instantiable to set
- */
- public void setInstantiable(Instantiable<OFMessage> instantiable) {
- this.instantiable = instantiable;
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import org.openflow.util.U16;
-
-/**
- * Represents ofp_vendor_header
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFVendor extends OFMessage {
- public static int MINIMUM_LENGTH = 12;
-
- protected int vendor;
- protected byte[] data;
-
- public OFVendor() {
- super();
- this.type = OFType.VENDOR;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the vendor
- */
- public int getVendor() {
- return vendor;
- }
-
- /**
- * @param vendor the vendor to set
- */
- public void setVendor(int vendor) {
- this.vendor = vendor;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.vendor = data.getInt();
- if (this.length > MINIMUM_LENGTH) {
- this.data = new byte[this.length - MINIMUM_LENGTH];
- data.get(this.data);
- }
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putInt(this.vendor);
- if (this.data != null)
- data.put(this.data);
- }
-
- /**
- * @return the data
- */
- public byte[] getData() {
- return data;
- }
-
- /**
- * @param data the data to set
- */
- public void setData(byte[] data) {
- this.data = data;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
- final int prime = 337;
- int result = super.hashCode();
- result = prime * result + Arrays.hashCode(data);
- result = prime * result + vendor;
- return result;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (getClass() != obj.getClass())
- return false;
- OFVendor other = (OFVendor) obj;
- if (!Arrays.equals(data, other.data))
- return false;
- if (vendor != other.vendor)
- return false;
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.action;
-
- import java.net.Inet4Address;
- import java.net.InetAddress;
- import java.net.UnknownHostException;
-import java.nio.ByteBuffer;
-
-import org.openflow.util.HexString;
-
-
- public class ActionVendorOutputNextHop extends OFActionVendor {
- private static final long serialVersionUID = 1L;
- private static int VENDOR_CISCO = 0xC;
- private enum ONHLength {
- ONH_LEN_MIN(16),
- ONH_LEN_P2P(16),
- ONH_LEN_IPV4(24),
- ONH_LEN_MAC(32),
- ONH_LEN_IPV6(40);
- private int value;
- private ONHLength(int value) {
- this.value = value;
- }
- public int getValue() {
- return this.value;
- }
- }
- private enum ONHActionType {
- ONH_ACTION_NONE(0),
- ONH_ACTION_OUTPUT_NH(1),
- ONH_ACTION_NETFLOW(2);
- private int value;
- private ONHActionType(int value) {
- this.value = value;
- }
- public int getValue() {
- return this.value;
- }
- }
- private enum ONHAddressType {
- ONH_ADDRTYPE_NONE(0),
- ONH_ADDRTYPE_P2P(1),
- ONH_ADDRTYPE_IPV4(2),
- ONH_ADDRTYPE_IPV6(3),
- ONH_ADDRTYPE_MAC48(4);
- private int value;
- private ONHAddressType(int value) {
- this.value = value;
- }
- public int getValue() {
- return this.value;
- }
- }
- private enum ONHXAddressType {
- ONH_XADDRTYPE_NONE(0),
- ONH_XADDRTYPE_PORT(1),
- ONH_XADDRTYPE_VPNID(2);
- private int value;
- private ONHXAddressType(int value) {
- this.value = value;
- }
- public int getValue() {
- return this.value;
- }
- }
- protected InetAddress address;
-
- public ActionVendorOutputNextHop() {
- super();
- super.setLength((short)ONHLength.ONH_LEN_MIN.getValue());
- super.setVendor(VENDOR_CISCO);
- this.address = null;
- }
-
- public void setNextHop(InetAddress address) {
- short actionLen;
- if (address instanceof Inet4Address) {
- actionLen = (short)ONHLength.ONH_LEN_IPV4.getValue();
- } else {
- actionLen = (short)ONHLength.ONH_LEN_IPV6.getValue();
- }
- super.setLength(actionLen);
- this.address = address;
- }
- public InetAddress getNextHop() {
- return this.address;
- }
- @Override
- public void readFrom(ByteBuffer data) {
- /*
- * For now, only contains the next hop address
- */
- //super.readFrom(data); don't need this
-
- if (data.remaining() < super.getLength()-8) {
- /*
- * malformed element, skip over
- */
- data.position(data.remaining());
- return;
- }
- if ((super.getLength() != (short)ONHLength.ONH_LEN_IPV4.getValue()) &&
- (super.getLength() != (short)ONHLength.ONH_LEN_IPV6.getValue())) {
- /*
- * mal-formed element, skip over
- */
- data.position(super.getLength());
- return;
- }
- data.getShort(); // skip the ONH_ACTION_OUTPUT_NH
- data.getShort(); // skip address and xtraaddress types
- data.getInt(); // skip the extra address (8 bytes)
- data.getInt();
- byte[] a;
- if (super.getLength() == (short)ONHLength.ONH_LEN_IPV4.getValue()) {
- a = new byte[4];
- data.get(a);
- } else {
- a = new byte[16];
- data.get(a);
- data.getInt(); //4 bytes pad
- }
- try {
- this.address = InetAddress.getByAddress(a);
- } catch (UnknownHostException e) {
- e.printStackTrace();
- }
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- byte atype = (byte)(ONHAddressType.ONH_ADDRTYPE_NONE.getValue());
- byte xatype = (byte)(ONHXAddressType.ONH_XADDRTYPE_NONE.getValue());
- if (address instanceof Inet4Address)
- atype = (byte)(ONHAddressType.ONH_ADDRTYPE_IPV4.getValue());
- else
- atype = (byte)(ONHAddressType.ONH_ADDRTYPE_IPV6.getValue());
- super.writeTo(data); // this writes the standard 8byte ofp_action_vendor_header
- data.putShort((short)(ONHActionType.ONH_ACTION_OUTPUT_NH.getValue()));
- data.put(atype);
- data.put(xatype);
- /*
- * write the xtra address. For now it is all 0
- */
- data.putInt(0); // 8-byte pad
- data.putInt(0);
- /*
- * write the address only when address type is not P2P
- */
- if (atype == (byte)(ONHAddressType.ONH_ADDRTYPE_IPV4.getValue())) {
- data.put(address.getAddress()); // no need to pad
- //avnh.put(address.getAddress());
- } else if (atype == (byte)(ONHAddressType.ONH_ADDRTYPE_IPV6.getValue())) {
- data.put(address.getAddress());
- //avnh.put(address.getAddress());
- data.putInt(0); // 4-byte pad
- //avnh.putInt(0);
- }
- ActionVendorOutputNextHop a = new ActionVendorOutputNextHop();
- a.setLength((short)24);
- }
-
- @Override
- public int hashCode() {
- final int prime = 347;
- int result = super.hashCode();
- result = prime * result + address.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof ActionVendorOutputNextHop)) {
- return false;
- }
- ActionVendorOutputNextHop other = (ActionVendorOutputNextHop) obj;
- if (!other.address.equals(this.address))
- return false;
- return true;
- }
-
- public String toString() {
- return ("OutputNextHop: " + address.getHostAddress());
- }
-
- }
-
+++ /dev/null
-package org.openflow.protocol.action;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * The base class for all OpenFlow Actions.
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public class OFAction implements Cloneable, Serializable{
- /**
- * Note the true minimum length for this header is 8 including a pad to 64
- * bit alignment, however as this base class is used for demuxing an
- * incoming Action, it is only necessary to read the first 4 bytes. All
- * Actions extending this class are responsible for reading/writing the
- * first 8 bytes, including the pad if necessary.
- */
- public static int MINIMUM_LENGTH = 4;
- public static int OFFSET_LENGTH = 2;
- public static int OFFSET_TYPE = 0;
-
- protected OFActionType type;
- protected short length;
-
- /**
- * Get the length of this message
- *
- * @return
- */
- public short getLength() {
- return length;
- }
-
- /**
- * Get the length of this message, unsigned
- *
- * @return
- */
- public int getLengthU() {
- return U16.f(length);
- }
-
- /**
- * Set the length of this message
- *
- * @param length
- */
- public OFAction setLength(short length) {
- this.length = length;
- return this;
- }
-
- /**
- * Get the type of this message
- *
- * @return OFActionType enum
- */
- public OFActionType getType() {
- return this.type;
- }
-
- /**
- * Set the type of this message
- *
- * @param type
- */
- public void setType(OFActionType type) {
- this.type = type;
- }
-
- /**
- * Returns a summary of the message
- * @return "ofmsg=v=$version;t=$type:l=$len:xid=$xid"
- */
- public String toString() {
- return "ofaction" +
- ";t=" + this.getType() +
- ";l=" + this.getLength();
- }
-
- /**
- * Given the output from toString(),
- * create a new OFAction
- * @param val
- * @return
- */
- public static OFAction fromString(String val) {
- String tokens[] = val.split(";");
- if (!tokens[0].equals("ofaction"))
- throw new IllegalArgumentException("expected 'ofaction' but got '" +
- tokens[0] + "'");
- String type_tokens[] = tokens[1].split("=");
- String len_tokens[] = tokens[2].split("=");
- OFAction action = new OFAction();
- action.setLength(Short.valueOf(len_tokens[1]));
- action.setType(OFActionType.valueOf(type_tokens[1]));
- return action;
- }
-
- public void readFrom(ByteBuffer data) {
- this.type = OFActionType.valueOf(data.getShort());
- this.length = data.getShort();
- // Note missing PAD, see MINIMUM_LENGTH comment for details
- }
-
- public void writeTo(ByteBuffer data) {
- data.putShort(type.getTypeValue());
- data.putShort(length);
- // Note missing PAD, see MINIMUM_LENGTH comment for details
- }
-
- @Override
- public int hashCode() {
- final int prime = 347;
- int result = 1;
- result = prime * result + length;
- result = prime * result + ((type == null) ? 0 : type.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFAction)) {
- return false;
- }
- OFAction other = (OFAction) obj;
- if (length != other.length) {
- return false;
- }
- if (type == null) {
- if (other.type != null) {
- return false;
- }
- } else if (!type.equals(other.type)) {
- return false;
- }
- return true;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#clone()
- */
- @Override
- public OFAction clone() throws CloneNotSupportedException {
- return (OFAction) super.clone();
- }
-
-}
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import org.openflow.protocol.OFPhysicalPort;
-
-/**
- * Represents an ofp_action_dl_addr
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public abstract class OFActionDataLayer extends OFAction {
- public static int MINIMUM_LENGTH = 16;
-
- protected byte[] dataLayerAddress;
-
- /**
- * @return the dataLayerAddress
- */
- public byte[] getDataLayerAddress() {
- return dataLayerAddress;
- }
-
- /**
- * @param dataLayerAddress the dataLayerAddress to set
- */
- public void setDataLayerAddress(byte[] dataLayerAddress) {
- this.dataLayerAddress = dataLayerAddress;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- if (this.dataLayerAddress == null)
- this.dataLayerAddress = new byte[OFPhysicalPort.OFP_ETH_ALEN];
- data.get(this.dataLayerAddress);
- data.getInt();
- data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.put(this.dataLayerAddress, 0, OFPhysicalPort.OFP_ETH_ALEN);
- data.putInt(0);
- data.putShort((short) 0);
- }
-
- @Override
- public int hashCode() {
- final int prime = 347;
- int result = super.hashCode();
- result = prime * result + Arrays.hashCode(dataLayerAddress);
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionDataLayer)) {
- return false;
- }
- OFActionDataLayer other = (OFActionDataLayer) obj;
- if (!Arrays.equals(dataLayerAddress, other.dataLayerAddress)) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.action;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionDataLayerDestination extends OFActionDataLayer {
- public OFActionDataLayerDestination() {
- super();
- super.setType(OFActionType.SET_DL_DST);
- super.setLength((short) OFActionDataLayer.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol.action;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionDataLayerSource extends OFActionDataLayer {
- public OFActionDataLayerSource() {
- super();
- super.setType(OFActionType.SET_DL_SRC);
- super.setLength((short) OFActionDataLayer.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_action_enqueue
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public class OFActionEnqueue extends OFAction {
- public static int MINIMUM_LENGTH = 16;
-
- protected short port;
- protected int queueId;
-
- public OFActionEnqueue() {
- super.setType(OFActionType.OPAQUE_ENQUEUE);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- /**
- * Get the output port
- * @return
- */
- public short getPort() {
- return this.port;
- }
-
- /**
- * Set the output port
- * @param port
- */
- public void setPort(short port) {
- this.port = port;
- }
-
- /**
- * @return the queueId
- */
- public int getQueueId() {
- return queueId;
- }
-
- /**
- * @param queueId the queueId to set
- */
- public void setQueueId(int queueId) {
- this.queueId = queueId;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.port = data.getShort();
- data.getShort();
- data.getInt();
- this.queueId = data.getInt();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.port);
- data.putShort((short) 0);
- data.putInt(0);
- data.putInt(this.queueId);
- }
-
- @Override
- public int hashCode() {
- final int prime = 349;
- int result = super.hashCode();
- result = prime * result + port;
- result = prime * result + queueId;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionEnqueue)) {
- return false;
- }
- OFActionEnqueue other = (OFActionEnqueue) obj;
- if (port != other.port) {
- return false;
- }
- if (queueId != other.queueId) {
- return false;
- }
- return true;
- }
-}
\ No newline at end of file
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_action_nw_addr
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public abstract class OFActionNetworkLayerAddress extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- protected int networkAddress;
-
- /**
- * @return the networkAddress
- */
- public int getNetworkAddress() {
- return networkAddress;
- }
-
- /**
- * @param networkAddress the networkAddress to set
- */
- public void setNetworkAddress(int networkAddress) {
- this.networkAddress = networkAddress;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.networkAddress = data.getInt();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putInt(this.networkAddress);
- }
-
- @Override
- public int hashCode() {
- final int prime = 353;
- int result = super.hashCode();
- result = prime * result + networkAddress;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionNetworkLayerAddress)) {
- return false;
- }
- OFActionNetworkLayerAddress other = (OFActionNetworkLayerAddress) obj;
- if (networkAddress != other.networkAddress) {
- return false;
- }
- return true;
- }
-}
\ No newline at end of file
+++ /dev/null
-package org.openflow.protocol.action;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionNetworkLayerDestination extends OFActionNetworkLayerAddress {
- public OFActionNetworkLayerDestination() {
- super();
- super.setType(OFActionType.SET_NW_DST);
- super.setLength((short) OFActionNetworkLayerAddress.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol.action;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionNetworkLayerSource extends OFActionNetworkLayerAddress {
- public OFActionNetworkLayerSource() {
- super();
- super.setType(OFActionType.SET_NW_SRC);
- super.setLength((short) OFActionNetworkLayerAddress.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_action_enqueue
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public class OFActionNetworkTypeOfService extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- protected byte networkTypeOfService;
-
- public OFActionNetworkTypeOfService() {
- super.setType(OFActionType.SET_NW_TOS);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- /**
- * @return the networkTypeOfService
- */
- public byte getNetworkTypeOfService() {
- return networkTypeOfService;
- }
-
- /**
- * @param networkTypeOfService the networkTypeOfService to set
- */
- public void setNetworkTypeOfService(byte networkTypeOfService) {
- this.networkTypeOfService = networkTypeOfService;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.networkTypeOfService = data.get();
- data.getShort();
- data.get();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.put(this.networkTypeOfService);
- data.putShort((short) 0);
- data.put((byte) 0);
- }
-
- @Override
- public int hashCode() {
- final int prime = 359;
- int result = super.hashCode();
- result = prime * result + networkTypeOfService;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionNetworkTypeOfService)) {
- return false;
- }
- OFActionNetworkTypeOfService other = (OFActionNetworkTypeOfService) obj;
- if (networkTypeOfService != other.networkTypeOfService) {
- return false;
- }
- return true;
- }
-}
\ No newline at end of file
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- */
-public class OFActionOutput extends OFAction implements Cloneable {
- public static int MINIMUM_LENGTH = 8;
-
- protected short port;
- protected short maxLength;
-
- public OFActionOutput() {
- super.setType(OFActionType.OUTPUT);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- public OFActionOutput(short port, short maxLength) {
- super();
- super.setType(OFActionType.OUTPUT);
- super.setLength((short) MINIMUM_LENGTH);
- this.port = port;
- this.maxLength = maxLength;
- }
-
- /**
- * Get the output port
- * @return
- */
- public short getPort() {
- return this.port;
- }
-
- /**
- * Set the output port
- * @param port
- */
- public OFActionOutput setPort(short port) {
- this.port = port;
- return this;
- }
-
- /**
- * Get the max length to send to the controller
- * @return
- */
- public short getMaxLength() {
- return this.maxLength;
- }
-
- /**
- * Set the max length to send to the controller
- * @param maxLength
- */
- public OFActionOutput setMaxLength(short maxLength) {
- this.maxLength = maxLength;
- return this;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.port = data.getShort();
- this.maxLength = data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(port);
- data.putShort(maxLength);
- }
-
- @Override
- public int hashCode() {
- final int prime = 367;
- int result = super.hashCode();
- result = prime * result + maxLength;
- result = prime * result + port;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionOutput)) {
- return false;
- }
- OFActionOutput other = (OFActionOutput) obj;
- if (maxLength != other.maxLength) {
- return false;
- }
- if (port != other.port) {
- return false;
- }
- return true;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return "OFActionOutput [maxLength=" + maxLength + ", port=" + U16.f(port)
- + ", length=" + length + ", type=" + type + "]";
- }
-}
\ No newline at end of file
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-
-/**
- * Represents an ofp_action_strip_vlan
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public class OFActionStripVirtualLan extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- public OFActionStripVirtualLan() {
- super();
- super.setType(OFActionType.STRIP_VLAN);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- // PAD
- data.getInt();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- // PAD
- data.putInt(0);
- }
-}
\ No newline at end of file
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_action_tp_port
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public abstract class OFActionTransportLayer extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- protected short transportPort;
-
- /**
- * @return the transportPort
- */
- public short getTransportPort() {
- return transportPort;
- }
-
- /**
- * @param transportPort the transportPort to set
- */
- public void setTransportPort(short transportPort) {
- this.transportPort = transportPort;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.transportPort = data.getShort();
- data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.transportPort);
- data.putShort((short) 0);
- }
-
- @Override
- public int hashCode() {
- final int prime = 373;
- int result = super.hashCode();
- result = prime * result + transportPort;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionTransportLayer)) {
- return false;
- }
- OFActionTransportLayer other = (OFActionTransportLayer) obj;
- if (transportPort != other.transportPort) {
- return false;
- }
- return true;
- }
-}
\ No newline at end of file
+++ /dev/null
-package org.openflow.protocol.action;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionTransportLayerDestination extends OFActionTransportLayer {
- public OFActionTransportLayerDestination() {
- super();
- super.setType(OFActionType.SET_TP_DST);
- super.setLength((short) OFActionTransportLayer.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-package org.openflow.protocol.action;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionTransportLayerSource extends OFActionTransportLayer {
- public OFActionTransportLayerSource() {
- super();
- super.setType(OFActionType.SET_TP_SRC);
- super.setLength((short) OFActionTransportLayer.MINIMUM_LENGTH);
- }
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.protocol.action;
-
-import java.io.Serializable;
-import java.lang.reflect.Constructor;
-
-import org.openflow.protocol.Instantiable;
-
-/**
- * List of OpenFlow Action types and mappings to wire protocol value and
- * derived classes
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public enum OFActionType implements Serializable{
- OUTPUT (0, OFActionOutput.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionOutput();
- }}),
- SET_VLAN_VID (1, OFActionVirtualLanIdentifier.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionVirtualLanIdentifier();
- }}),
- SET_VLAN_PCP (2, OFActionVirtualLanPriorityCodePoint.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionVirtualLanPriorityCodePoint();
- }}),
- STRIP_VLAN (3, OFActionStripVirtualLan.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionStripVirtualLan();
- }}),
- SET_DL_SRC (4, OFActionDataLayerSource.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionDataLayerSource();
- }}),
- SET_DL_DST (5, OFActionDataLayerDestination.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionDataLayerDestination();
- }}),
- SET_NW_SRC (6, OFActionNetworkLayerSource.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionNetworkLayerSource();
- }}),
- SET_NW_DST (7, OFActionNetworkLayerDestination.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionNetworkLayerDestination();
- }}),
- SET_NW_TOS (8, OFActionNetworkTypeOfService.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionNetworkTypeOfService();
- }}),
- SET_TP_SRC (9, OFActionTransportLayerSource.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionTransportLayerSource();
- }}),
- SET_TP_DST (10, OFActionTransportLayerDestination.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionTransportLayerDestination();
- }}),
- OPAQUE_ENQUEUE (11, OFActionEnqueue.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionEnqueue();
- }}),
- VENDOR (0xffff, OFActionVendor.class, new Instantiable<OFAction>() {
- @Override
- public OFAction instantiate() {
- return new OFActionVendor();
- }});
-
- protected static OFActionType[] mapping;
-
- protected Class<? extends OFAction> clazz;
- protected Constructor<? extends OFAction> constructor;
- protected Instantiable<OFAction> instantiable;
- protected int minLen;
- protected short type;
-
- /**
- * Store some information about the OpenFlow Action type, including wire
- * protocol type number, length, and derrived class
- *
- * @param type Wire protocol number associated with this OFType
- * @param clazz The Java class corresponding to this type of OpenFlow Action
- * @param instantiable the instantiable for the OFAction this type represents
- */
- OFActionType(int type, Class<? extends OFAction> clazz, Instantiable<OFAction> instantiable) {
- this.type = (short) type;
- this.clazz = clazz;
- this.instantiable = instantiable;
- try {
- this.constructor = clazz.getConstructor(new Class[]{});
- } catch (Exception e) {
- throw new RuntimeException(
- "Failure getting constructor for class: " + clazz, e);
- }
- OFActionType.addMapping(this.type, this);
- }
-
- /**
- * Adds a mapping from type value to OFActionType enum
- *
- * @param i OpenFlow wire protocol Action type value
- * @param t type
- */
- static public void addMapping(short i, OFActionType t) {
- if (mapping == null)
- mapping = new OFActionType[16];
- // bring higher mappings down to the edge of our array
- if (i < 0)
- i = (short) (16 + i);
- OFActionType.mapping[i] = t;
- }
-
- /**
- * Given a wire protocol OpenFlow type number, return the OFType associated
- * with it
- *
- * @param i wire protocol number
- * @return OFType enum type
- */
-
- static public OFActionType valueOf(short i) {
- if (i < 0)
- i = (short) (16+i);
- return OFActionType.mapping[i];
- }
-
- /**
- * @return Returns the wire protocol value corresponding to this
- * OFActionType
- */
- public short getTypeValue() {
- return this.type;
- }
-
- /**
- * @return return the OFAction subclass corresponding to this OFActionType
- */
- public Class<? extends OFAction> toClass() {
- return clazz;
- }
-
- /**
- * Returns the no-argument Constructor of the implementation class for
- * this OFActionType
- * @return the constructor
- */
- public Constructor<? extends OFAction> getConstructor() {
- return constructor;
- }
-
- /**
- * Returns a new instance of the OFAction represented by this OFActionType
- * @return the new object
- */
- public OFAction newInstance() {
- return instantiable.instantiate();
- }
-
- /**
- * @return the instantiable
- */
- public Instantiable<OFAction> getInstantiable() {
- return instantiable;
- }
-
- /**
- * @param instantiable the instantiable to set
- */
- public void setInstantiable(Instantiable<OFAction> instantiable) {
- this.instantiable = instantiable;
- }
-}
+++ /dev/null
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFActionVendor extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- protected int vendor;
-
- public enum ActionVendorID {
- AVI_CISCO(0xC);
- private int value;
- private ActionVendorID(int value) {
- this.value = value;
- }
- public int getValue() {
- return this.value;
- }
- }
-
- public OFActionVendor() {
- super();
- super.setType(OFActionType.VENDOR);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- /**
- * @return the vendor
- */
- public int getVendor() {
- return vendor;
- }
-
- /**
- * @param vendor the vendor to set
- */
- public void setVendor(int vendor) {
- this.vendor = vendor;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.vendor = data.getInt();
- if (this.vendor == ActionVendorID.AVI_CISCO.getValue()) {
- ActionVendorOutputNextHop nh = new ActionVendorOutputNextHop();
- nh.readFrom(data);
- }
-
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putInt(this.vendor);
- }
-
- @Override
- public int hashCode() {
- final int prime = 379;
- int result = super.hashCode();
- result = prime * result + vendor;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionVendor)) {
- return false;
- }
- OFActionVendor other = (OFActionVendor) obj;
- if (vendor != other.vendor) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_action_vlan_vid
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public class OFActionVirtualLanIdentifier extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- protected short virtualLanIdentifier;
-
- public OFActionVirtualLanIdentifier() {
- super.setType(OFActionType.SET_VLAN_VID);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- /**
- * @return the virtualLanIdentifier
- */
- public short getVirtualLanIdentifier() {
- return virtualLanIdentifier;
- }
-
- /**
- * @param virtualLanIdentifier the virtualLanIdentifier to set
- */
- public void setVirtualLanIdentifier(short virtualLanIdentifier) {
- this.virtualLanIdentifier = virtualLanIdentifier;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.virtualLanIdentifier = data.getShort();
- data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.virtualLanIdentifier);
- data.putShort((short) 0);
- }
-
- @Override
- public int hashCode() {
- final int prime = 383;
- int result = super.hashCode();
- result = prime * result + virtualLanIdentifier;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionVirtualLanIdentifier)) {
- return false;
- }
- OFActionVirtualLanIdentifier other = (OFActionVirtualLanIdentifier) obj;
- if (virtualLanIdentifier != other.virtualLanIdentifier) {
- return false;
- }
- return true;
- }
-}
\ No newline at end of file
+++ /dev/null
-/**
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-package org.openflow.protocol.action;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_action_vlan_pcp
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public class OFActionVirtualLanPriorityCodePoint extends OFAction {
- public static int MINIMUM_LENGTH = 8;
-
- protected byte virtualLanPriorityCodePoint;
-
- public OFActionVirtualLanPriorityCodePoint() {
- super.setType(OFActionType.SET_VLAN_PCP);
- super.setLength((short) MINIMUM_LENGTH);
- }
-
- /**
- * @return the virtualLanPriorityCodePoint
- */
- public byte getVirtualLanPriorityCodePoint() {
- return virtualLanPriorityCodePoint;
- }
-
- /**
- * @param virtualLanPriorityCodePoint the virtualLanPriorityCodePoint to set
- */
- public void setVirtualLanPriorityCodePoint(byte virtualLanPriorityCodePoint) {
- this.virtualLanPriorityCodePoint = virtualLanPriorityCodePoint;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.virtualLanPriorityCodePoint = data.get();
- data.getShort(); // pad
- data.get(); // pad
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.put(this.virtualLanPriorityCodePoint);
- data.putShort((short) 0);
- data.put((byte) 0);
- }
-
- @Override
- public int hashCode() {
- final int prime = 389;
- int result = super.hashCode();
- result = prime * result + virtualLanPriorityCodePoint;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (!(obj instanceof OFActionVirtualLanPriorityCodePoint)) {
- return false;
- }
- OFActionVirtualLanPriorityCodePoint other = (OFActionVirtualLanPriorityCodePoint) obj;
- if (virtualLanPriorityCodePoint != other.virtualLanPriorityCodePoint) {
- return false;
- }
- return true;
- }
-}
\ No newline at end of file
+++ /dev/null
-package org.openflow.protocol.factory;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionType;
-import org.openflow.protocol.queue.OFQueueProperty;
-import org.openflow.protocol.queue.OFQueuePropertyType;
-import org.openflow.protocol.statistics.OFStatistics;
-import org.openflow.protocol.statistics.OFStatisticsType;
-import org.openflow.protocol.statistics.OFVendorStatistics;
-
-
-/**
- * A basic OpenFlow factory that supports naive creation of both Messages and
- * Actions.
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public class BasicFactory implements OFMessageFactory, OFActionFactory,
- OFQueuePropertyFactory, OFStatisticsFactory {
- @Override
- public OFMessage getMessage(OFType t) {
- return t.newInstance();
- }
-
- @Override
- public List<OFMessage> parseMessages(ByteBuffer data) {
- return parseMessages(data, 0);
- }
-
- @Override
- public List<OFMessage> parseMessages(ByteBuffer data, int limit) {
- List<OFMessage> results = new ArrayList<OFMessage>();
- OFMessage demux = new OFMessage();
- OFMessage ofm;
-
- while (limit == 0 || results.size() <= limit) {
- if (data.remaining() < OFMessage.MINIMUM_LENGTH)
- return results;
-
- data.mark();
- demux.readFrom(data);
- data.reset();
-
- if (demux.getLengthU() > data.remaining())
- return results;
-
- ofm = getMessage(demux.getType());
- if (ofm instanceof OFActionFactoryAware) {
- ((OFActionFactoryAware)ofm).setActionFactory(this);
- }
- if (ofm instanceof OFMessageFactoryAware) {
- ((OFMessageFactoryAware)ofm).setMessageFactory(this);
- }
- if (ofm instanceof OFQueuePropertyFactoryAware) {
- ((OFQueuePropertyFactoryAware)ofm).setQueuePropertyFactory(this);
- }
- if (ofm instanceof OFStatisticsFactoryAware) {
- ((OFStatisticsFactoryAware)ofm).setStatisticsFactory(this);
- }
- ofm.readFrom(data);
- if (OFMessage.class.equals(ofm.getClass())) {
- // advance the position for un-implemented messages
- data.position(data.position()+(ofm.getLengthU() -
- OFMessage.MINIMUM_LENGTH));
- }
- results.add(ofm);
- }
-
- return results;
- }
-
- @Override
- public OFAction getAction(OFActionType t) {
- return t.newInstance();
- }
-
- @Override
- public List<OFAction> parseActions(ByteBuffer data, int length) {
- return parseActions(data, length, 0);
- }
-
- @Override
- public List<OFAction> parseActions(ByteBuffer data, int length, int limit) {
- List<OFAction> results = new ArrayList<OFAction>();
- OFAction demux = new OFAction();
- OFAction ofa;
- int end = data.position() + length;
-
- while (limit == 0 || results.size() <= limit) {
- if (data.remaining() < OFAction.MINIMUM_LENGTH ||
- (data.position() + OFAction.MINIMUM_LENGTH) > end)
- return results;
-
- data.mark();
- demux.readFrom(data);
- data.reset();
-
- if (demux.getLengthU() > data.remaining() ||
- (data.position() + demux.getLengthU()) > end)
- return results;
-
- ofa = getAction(demux.getType());
- ofa.readFrom(data);
- if (OFAction.class.equals(ofa.getClass())) {
- // advance the position for un-implemented messages
- data.position(data.position()+(ofa.getLengthU() -
- OFAction.MINIMUM_LENGTH));
- }
- results.add(ofa);
- }
-
- return results;
- }
-
- @Override
- public OFActionFactory getActionFactory() {
- return this;
- }
-
- @Override
- public OFStatistics getStatistics(OFType t, OFStatisticsType st) {
- return st.newInstance(t);
- }
-
- @Override
- public List<OFStatistics> parseStatistics(OFType t, OFStatisticsType st,
- ByteBuffer data, int length) {
- return parseStatistics(t, st, data, length, 0);
- }
-
- /**
- * @param t
- * OFMessage type: should be one of stats_request or stats_reply
- * @param st
- * statistics type of this message, e.g., DESC, TABLE
- * @param data
- * buffer to read from
- * @param length
- * length of statistics
- * @param limit
- * number of statistics to grab; 0 == all
- *
- * @return list of statistics
- */
-
- @Override
- public List<OFStatistics> parseStatistics(OFType t, OFStatisticsType st,
- ByteBuffer data, int length, int limit) {
- List<OFStatistics> results = new ArrayList<OFStatistics>();
- OFStatistics statistics = getStatistics(t, st);
-
- int start = data.position();
- int count = 0;
-
- while (limit == 0 || results.size() <= limit) {
- // TODO Create a separate MUX/DEMUX path for vendor stats
- if (statistics instanceof OFVendorStatistics)
- ((OFVendorStatistics)statistics).setLength(length);
-
- /**
- * can't use data.remaining() here, b/c there could be other data
- * buffered past this message
- */
- if ((length - count) >= statistics.getLength()) {
- if (statistics instanceof OFActionFactoryAware)
- ((OFActionFactoryAware)statistics).setActionFactory(this);
- statistics.readFrom(data);
- results.add(statistics);
- count += statistics.getLength();
- statistics = getStatistics(t, st);
- } else {
- if (count < length) {
- /**
- * Nasty case: partial/incomplete statistic found even
- * though we have a full message. Found when NOX sent
- * agg_stats request with wrong agg statistics length (52
- * instead of 56)
- *
- * just throw the rest away, or we will break framing
- */
- data.position(start + length);
- }
- return results;
- }
- }
- return results; // empty; no statistics at all
- }
-
- @Override
- public OFQueueProperty getQueueProperty(OFQueuePropertyType t) {
- return t.newInstance();
- }
-
- @Override
- public List<OFQueueProperty> parseQueueProperties(ByteBuffer data,
- int length) {
- return parseQueueProperties(data, length, 0);
- }
-
- @Override
- public List<OFQueueProperty> parseQueueProperties(ByteBuffer data,
- int length, int limit) {
- List<OFQueueProperty> results = new ArrayList<OFQueueProperty>();
- OFQueueProperty demux = new OFQueueProperty();
- OFQueueProperty ofqp;
- int end = data.position() + length;
-
- while (limit == 0 || results.size() <= limit) {
- if (data.remaining() < OFQueueProperty.MINIMUM_LENGTH ||
- (data.position() + OFQueueProperty.MINIMUM_LENGTH) > end)
- return results;
-
- data.mark();
- demux.readFrom(data);
- data.reset();
-
- if (demux.getLengthU() > data.remaining() ||
- (data.position() + demux.getLengthU()) > end)
- return results;
-
- ofqp = getQueueProperty(demux.getType());
- ofqp.readFrom(data);
- if (OFQueueProperty.class.equals(ofqp.getClass())) {
- // advance the position for un-implemented messages
- data.position(data.position()+(ofqp.getLengthU() -
- OFQueueProperty.MINIMUM_LENGTH));
- }
- results.add(ofqp);
- }
-
- return results;
- }
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.action.OFActionType;
-
-
-/**
- * The interface to factories used for retrieving OFAction instances. All
- * methods are expected to be thread-safe.
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFActionFactory {
- /**
- * Retrieves an OFAction instance corresponding to the specified
- * OFActionType
- * @param t the type of the OFAction to be retrieved
- * @return an OFAction instance
- */
- public OFAction getAction(OFActionType t);
-
- /**
- * Attempts to parse and return all OFActions contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at
- * position+length.
- * @param data the ByteBuffer to parse for OpenFlow actions
- * @param length the number of Bytes to examine for OpenFlow actions
- * @return a list of OFAction instances
- */
- public List<OFAction> parseActions(ByteBuffer data, int length);
-
- /**
- * Attempts to parse and return all OFActions contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at
- * position+length.
- * @param data the ByteBuffer to parse for OpenFlow actions
- * @param length the number of Bytes to examine for OpenFlow actions
- * @param limit the maximum number of messages to return, 0 means no limit
- * @return a list of OFAction instances
- */
- public List<OFAction> parseActions(ByteBuffer data, int length, int limit);
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-/**
- * Objects implementing this interface are expected to be instantiated with an
- * instance of an OFActionFactory
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFActionFactoryAware {
- /**
- * Sets the OFActionFactory
- * @param actionFactory
- */
- public void setActionFactory(OFActionFactory actionFactory);
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.OFMessage;
-import org.openflow.protocol.OFType;
-
-
-/**
- * The interface to factories used for retrieving OFMessage instances. All
- * methods are expected to be thread-safe.
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFMessageFactory {
- /**
- * Retrieves an OFMessage instance corresponding to the specified OFType
- * @param t the type of the OFMessage to be retrieved
- * @return an OFMessage instance
- */
- public OFMessage getMessage(OFType t);
-
- /**
- * Attempts to parse and return all OFMessages contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at the
- * ByteBuffer's limit.
- * @param data the ByteBuffer to parse for an OpenFlow message
- * @return a list of OFMessage instances
- */
- public List<OFMessage> parseMessages(ByteBuffer data);
-
- /**
- * Attempts to parse and return all OFMessages contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at the
- * ByteBuffer's limit.
- * @param data the ByteBuffer to parse for an OpenFlow message
- * @param limit the maximum number of messages to return, 0 means no limit
- * @return a list of OFMessage instances
- */
- public List<OFMessage> parseMessages(ByteBuffer data, int limit);
-
- /**
- * Retrieves an OFActionFactory
- * @return an OFActionFactory
- */
- public OFActionFactory getActionFactory();
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.protocol.factory;
-
-/**
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public interface OFMessageFactoryAware {
-
- /**
- * Sets the message factory for this object
- *
- * @param factory
- */
- void setMessageFactory(OFMessageFactory factory);
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.queue.OFQueueProperty;
-import org.openflow.protocol.queue.OFQueuePropertyType;
-
-
-/**
- * The interface to factories used for retrieving OFQueueProperty instances. All
- * methods are expected to be thread-safe.
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFQueuePropertyFactory {
- /**
- * Retrieves an OFQueueProperty instance corresponding to the specified
- * OFQueuePropertyType
- * @param t the type of the OFQueueProperty to be retrieved
- * @return an OFQueueProperty instance
- */
- public OFQueueProperty getQueueProperty(OFQueuePropertyType t);
-
- /**
- * Attempts to parse and return all OFQueueProperties contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at
- * position+length.
- * @param data the ByteBuffer to parse for OpenFlow OFQueueProperties
- * @param length the number of Bytes to examine for OpenFlow OFQueueProperties
- * @return a list of OFQueueProperty instances
- */
- public List<OFQueueProperty> parseQueueProperties(ByteBuffer data, int length);
-
- /**
- * Attempts to parse and return all OFQueueProperties contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at
- * position+length.
- * @param data the ByteBuffer to parse for OpenFlow OFQueueProperties
- * @param length the number of Bytes to examine for OpenFlow OFQueueProperties
- * @param limit the maximum number of OFQueueProperties to return, 0 means no limit
- * @return a list of OFQueueProperty instances
- */
- public List<OFQueueProperty> parseQueueProperties(ByteBuffer data, int length, int limit);
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-/**
- * Objects implementing this interface are expected to be instantiated with an
- * instance of an OFQueuePropertyFactory
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFQueuePropertyFactoryAware {
- /**
- * Sets the OFQueuePropertyFactory
- * @param queuePropertyFactory
- */
- public void setQueuePropertyFactory(OFQueuePropertyFactory queuePropertyFactory);
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.OFType;
-import org.openflow.protocol.statistics.OFStatistics;
-import org.openflow.protocol.statistics.OFStatisticsType;
-
-
-/**
- * The interface to factories used for retrieving OFStatistics instances. All
- * methods are expected to be thread-safe.
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFStatisticsFactory {
- /**
- * Retrieves an OFStatistics instance corresponding to the specified
- * OFStatisticsType
- * @param t the type of the containing OFMessage, only accepts statistics
- * request or reply
- * @param st the type of the OFStatistics to be retrieved
- * @return an OFStatistics instance
- */
- public OFStatistics getStatistics(OFType t, OFStatisticsType st);
-
- /**
- * Attempts to parse and return all OFStatistics contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at
- * position+length.
- * @param t the type of the containing OFMessage, only accepts statistics
- * request or reply
- * @param st the type of the OFStatistics to be retrieved
- * @param data the ByteBuffer to parse for OpenFlow Statistics
- * @param length the number of Bytes to examine for OpenFlow Statistics
- * @return a list of OFStatistics instances
- */
- public List<OFStatistics> parseStatistics(OFType t,
- OFStatisticsType st, ByteBuffer data, int length);
-
- /**
- * Attempts to parse and return all OFStatistics contained in the given
- * ByteBuffer, beginning at the ByteBuffer's position, and ending at
- * position+length.
- * @param t the type of the containing OFMessage, only accepts statistics
- * request or reply
- * @param st the type of the OFStatistics to be retrieved
- * @param data the ByteBuffer to parse for OpenFlow Statistics
- * @param length the number of Bytes to examine for OpenFlow Statistics
- * @param limit the maximum number of messages to return, 0 means no limit
- * @return a list of OFStatistics instances
- */
- public List<OFStatistics> parseStatistics(OFType t,
- OFStatisticsType st, ByteBuffer data, int length, int limit);
-}
+++ /dev/null
-package org.openflow.protocol.factory;
-
-/**
- * Objects implementing this interface are expected to be instantiated with an
- * instance of an OFStatisticsFactory
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public interface OFStatisticsFactoryAware {
- /**
- * Sets the OFStatisticsFactory
- * @param statisticsFactory
- */
- public void setStatisticsFactory(OFStatisticsFactory statisticsFactory);
-}
+++ /dev/null
-package org.openflow.protocol.queue;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.openflow.protocol.factory.OFQueuePropertyFactory;
-import org.openflow.protocol.factory.OFQueuePropertyFactoryAware;
-import org.openflow.util.U16;
-
-/**
- * Corresponds to the struct ofp_packet_queue OpenFlow structure
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFPacketQueue implements Cloneable, OFQueuePropertyFactoryAware {
- public static int MINIMUM_LENGTH = 8;
-
- protected OFQueuePropertyFactory queuePropertyFactory;
-
- protected int queueId;
- protected short length;
- protected List<OFQueueProperty> properties;
-
- /**
- * @return the queueId
- */
- public int getQueueId() {
- return queueId;
- }
-
- /**
- * @param queueId the queueId to set
- */
- public OFPacketQueue setQueueId(int queueId) {
- this.queueId = queueId;
- return this;
- }
-
- /**
- * @return the length
- */
- public short getLength() {
- return length;
- }
-
- /**
- * @param length the length to set
- */
- public void setLength(short length) {
- this.length = length;
- }
-
- /**
- * @return the properties
- */
- public List<OFQueueProperty> getProperties() {
- return properties;
- }
-
- /**
- * @param properties the properties to set
- */
- public OFPacketQueue setProperties(List<OFQueueProperty> properties) {
- this.properties = properties;
- return this;
- }
-
- public void readFrom(ByteBuffer data) {
- this.queueId = data.getInt();
- this.length = data.getShort();
- data.getShort(); // pad
- if (this.queuePropertyFactory == null)
- throw new RuntimeException("OFQueuePropertyFactory not set");
- this.properties = queuePropertyFactory.parseQueueProperties(data,
- U16.f(this.length) - MINIMUM_LENGTH);
- }
-
- public void writeTo(ByteBuffer data) {
- data.putInt(this.queueId);
- data.putShort(this.length);
- data.putShort((short) 0); // pad
- if (this.properties != null) {
- for (OFQueueProperty queueProperty : this.properties) {
- queueProperty.writeTo(data);
- }
- }
- }
-
- @Override
- public int hashCode() {
- final int prime = 6367;
- int result = 1;
- result = prime * result + length;
- result = prime * result
- + ((properties == null) ? 0 : properties.hashCode());
- result = prime * result + queueId;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (!(obj instanceof OFPacketQueue))
- return false;
- OFPacketQueue other = (OFPacketQueue) obj;
- if (length != other.length)
- return false;
- if (properties == null) {
- if (other.properties != null)
- return false;
- } else if (!properties.equals(other.properties))
- return false;
- if (queueId != other.queueId)
- return false;
- return true;
- }
-
- @Override
- public OFPacketQueue clone() {
- try {
- OFPacketQueue clone = (OFPacketQueue) super.clone();
- if (this.properties != null) {
- List<OFQueueProperty> queueProps = new ArrayList<OFQueueProperty>();
- for (OFQueueProperty prop : this.properties) {
- queueProps.add(prop.clone());
- }
- clone.setProperties(queueProps);
- }
- return clone;
- } catch (CloneNotSupportedException e) {
- throw new RuntimeException(e);
- }
- }
-
- @Override
- public void setQueuePropertyFactory(
- OFQueuePropertyFactory queuePropertyFactory) {
- this.queuePropertyFactory = queuePropertyFactory;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return "OFPacketQueue [queueId=" + queueId + ", properties="
- + properties + "]";
- }
-}
+++ /dev/null
-package org.openflow.protocol.queue;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * Corresponds to the struct ofp_queue_prop_header OpenFlow structure
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueueProperty implements Cloneable {
- public static int MINIMUM_LENGTH = 8;
-
- protected OFQueuePropertyType type;
- protected short length;
-
- /**
- * @return the type
- */
- public OFQueuePropertyType getType() {
- return type;
- }
-
- /**
- * @param type the type to set
- */
- public void setType(OFQueuePropertyType type) {
- this.type = type;
- }
-
- /**
- * @return the length
- */
- public short getLength() {
- return length;
- }
-
- /**
- * Returns the unsigned length
- *
- * @return the length
- */
- public int getLengthU() {
- return U16.f(length);
- }
-
- /**
- * @param length the length to set
- */
- public void setLength(short length) {
- this.length = length;
- }
-
- public void readFrom(ByteBuffer data) {
- this.type = OFQueuePropertyType.valueOf(data.getShort());
- this.length = data.getShort();
- data.getInt(); // pad
- }
-
- public void writeTo(ByteBuffer data) {
- data.putShort(this.type.getTypeValue());
- data.putShort(this.length);
- data.putInt(0); // pad
- }
-
- @Override
- public int hashCode() {
- final int prime = 2777;
- int result = 1;
- result = prime * result + length;
- result = prime * result + ((type == null) ? 0 : type.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (!(obj instanceof OFQueueProperty))
- return false;
- OFQueueProperty other = (OFQueueProperty) obj;
- if (length != other.length)
- return false;
- if (type != other.type)
- return false;
- return true;
- }
-
- @Override
- protected OFQueueProperty clone() {
- try {
- return (OFQueueProperty) super.clone();
- } catch (CloneNotSupportedException e) {
- throw new RuntimeException(e);
- }
- }
-}
+++ /dev/null
-package org.openflow.protocol.queue;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.util.U16;
-
-/**
- * Corresponds to the struct struct ofp_queue_prop_min_rate OpenFlow structure
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueuePropertyMinRate extends OFQueueProperty {
- public static int MINIMUM_LENGTH = 16;
-
- protected short rate;
-
- /**
- *
- */
- public OFQueuePropertyMinRate() {
- super();
- this.type = OFQueuePropertyType.MIN_RATE;
- this.length = U16.t(MINIMUM_LENGTH);
- }
-
- /**
- * @return the rate
- */
- public short getRate() {
- return rate;
- }
-
- /**
- * @param rate the rate to set
- */
- public OFQueuePropertyMinRate setRate(short rate) {
- this.rate = rate;
- return this;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- super.readFrom(data);
- this.rate = data.getShort();
- data.getInt(); // pad
- data.getShort(); // pad
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- super.writeTo(data);
- data.putShort(this.rate);
- data.putInt(0); // pad
- data.putShort((short) 0); // pad
- }
-
- @Override
- public int hashCode() {
- final int prime = 3259;
- int result = super.hashCode();
- result = prime * result + rate;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (!(obj instanceof OFQueuePropertyMinRate))
- return false;
- OFQueuePropertyMinRate other = (OFQueuePropertyMinRate) obj;
- if (rate != other.rate)
- return false;
- return true;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- return "OFQueuePropertyMinRate [type=" + type + ", rate=" + U16.f(rate) + "]";
- }
-
-}
+++ /dev/null
-/**
- *
- */
-package org.openflow.protocol.queue;
-
-import java.lang.reflect.Constructor;
-
-import org.openflow.protocol.Instantiable;
-
-/**
- * List of OpenFlow Queue Property types and mappings to wire protocol value and
- * derived classes
- *
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueuePropertyType {
- public static OFQueuePropertyType NONE = new OFQueuePropertyType(0, "NONE",
- OFQueueProperty.class, new Instantiable<OFQueueProperty>() {
- @Override
- public OFQueueProperty instantiate() {
- return new OFQueueProperty();
- }
- });
-
- public static OFQueuePropertyType MIN_RATE = new OFQueuePropertyType(1, "MIN_RATE",
- OFQueuePropertyMinRate.class, new Instantiable<OFQueueProperty>() {
- @Override
- public OFQueueProperty instantiate() {
- return new OFQueuePropertyMinRate();
- }
- });
-
- protected static OFQueuePropertyType[] mapping;
-
- protected Class<? extends OFQueueProperty> clazz;
- protected Constructor<? extends OFQueueProperty> constructor;
- protected Instantiable<OFQueueProperty> instantiable;
- protected int minLen;
- protected String name;
- protected short type;
-
- /**
- * Store some information about the OpenFlow Queue Property type, including wire
- * protocol type number, length, and derived class
- *
- * @param type Wire protocol number associated with this OFQueuePropertyType
- * @param name The name of this type
- * @param clazz The Java class corresponding to this type of OpenFlow Queue Property
- * @param instantiable the instantiable for the OFQueueProperty this type represents
- */
- OFQueuePropertyType(int type, String name, Class<? extends OFQueueProperty> clazz, Instantiable<OFQueueProperty> instantiable) {
- this.type = (short) type;
- this.name = name;
- this.clazz = clazz;
- this.instantiable = instantiable;
- try {
- this.constructor = clazz.getConstructor(new Class[]{});
- } catch (Exception e) {
- throw new RuntimeException(
- "Failure getting constructor for class: " + clazz, e);
- }
- OFQueuePropertyType.addMapping(this.type, this);
- }
-
- /**
- * Adds a mapping from type value to OFQueuePropertyType enum
- *
- * @param i OpenFlow wire protocol Action type value
- * @param t type
- */
- static public void addMapping(short i, OFQueuePropertyType t) {
- if (mapping == null)
- mapping = new OFQueuePropertyType[16];
- OFQueuePropertyType.mapping[i] = t;
- }
-
- /**
- * Given a wire protocol OpenFlow type number, return the OFType associated
- * with it
- *
- * @param i wire protocol number
- * @return OFType enum type
- */
-
- static public OFQueuePropertyType valueOf(short i) {
- return OFQueuePropertyType.mapping[i];
- }
-
- /**
- * @return Returns the wire protocol value corresponding to this
- * OFQueuePropertyType
- */
- public short getTypeValue() {
- return this.type;
- }
-
- /**
- * @return return the OFQueueProperty subclass corresponding to this OFQueuePropertyType
- */
- public Class<? extends OFQueueProperty> toClass() {
- return clazz;
- }
-
- /**
- * Returns the no-argument Constructor of the implementation class for
- * this OFQueuePropertyType
- * @return the constructor
- */
- public Constructor<? extends OFQueueProperty> getConstructor() {
- return constructor;
- }
-
- /**
- * Returns a new instance of the OFQueueProperty represented by this OFQueuePropertyType
- * @return the new object
- */
- public OFQueueProperty newInstance() {
- return instantiable.instantiate();
- }
-
- /**
- * @return the instantiable
- */
- public Instantiable<OFQueueProperty> getInstantiable() {
- return instantiable;
- }
-
- /**
- * @param instantiable the instantiable to set
- */
- public void setInstantiable(Instantiable<OFQueueProperty> instantiable) {
- this.instantiable = instantiable;
- }
-
- public String getName() {
- return this.name;
- }
-
- @Override
- public String toString() {
- return this.name;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_aggregate_stats_reply structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFAggregateStatisticsReply implements OFStatistics {
- protected long packetCount;
- protected long byteCount;
- protected int flowCount;
-
- /**
- * @return the packetCount
- */
- public long getPacketCount() {
- return packetCount;
- }
-
- /**
- * @param packetCount the packetCount to set
- */
- public void setPacketCount(long packetCount) {
- this.packetCount = packetCount;
- }
-
- /**
- * @return the byteCount
- */
- public long getByteCount() {
- return byteCount;
- }
-
- /**
- * @param byteCount the byteCount to set
- */
- public void setByteCount(long byteCount) {
- this.byteCount = byteCount;
- }
-
- /**
- * @return the flowCount
- */
- public int getFlowCount() {
- return flowCount;
- }
-
- /**
- * @param flowCount the flowCount to set
- */
- public void setFlowCount(int flowCount) {
- this.flowCount = flowCount;
- }
-
- @Override
- public int getLength() {
- return 24;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.packetCount = data.getLong();
- this.byteCount = data.getLong();
- this.flowCount = data.getInt();
- data.getInt(); // pad
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putLong(this.packetCount);
- data.putLong(this.byteCount);
- data.putInt(this.flowCount);
- data.putInt(0); // pad
- }
-
- @Override
- public int hashCode() {
- final int prime = 397;
- int result = 1;
- result = prime * result + (int) (byteCount ^ (byteCount >>> 32));
- result = prime * result + flowCount;
- result = prime * result + (int) (packetCount ^ (packetCount >>> 32));
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFAggregateStatisticsReply)) {
- return false;
- }
- OFAggregateStatisticsReply other = (OFAggregateStatisticsReply) obj;
- if (byteCount != other.byteCount) {
- return false;
- }
- if (flowCount != other.flowCount) {
- return false;
- }
- if (packetCount != other.packetCount) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-import org.openflow.protocol.OFMatch;
-
-/**
- * Represents an ofp_aggregate_stats_request structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFAggregateStatisticsRequest implements OFStatistics {
- protected OFMatch match;
- protected byte tableId;
- protected short outPort;
-
- /**
- * @return the match
- */
- public OFMatch getMatch() {
- return match;
- }
-
- /**
- * @param match the match to set
- */
- public void setMatch(OFMatch match) {
- this.match = match;
- }
-
- /**
- * @return the tableId
- */
- public byte getTableId() {
- return tableId;
- }
-
- /**
- * @param tableId the tableId to set
- */
- public void setTableId(byte tableId) {
- this.tableId = tableId;
- }
-
- /**
- * @return the outPort
- */
- public short getOutPort() {
- return outPort;
- }
-
- /**
- * @param outPort the outPort to set
- */
- public void setOutPort(short outPort) {
- this.outPort = outPort;
- }
-
- @Override
- public int getLength() {
- return 44;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- if (this.match == null)
- this.match = new OFMatch();
- this.match.readFrom(data);
- this.tableId = data.get();
- data.get(); // pad
- this.outPort = data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- this.match.writeTo(data);
- data.put(this.tableId);
- data.put((byte) 0);
- data.putShort(this.outPort);
- }
-
- @Override
- public int hashCode() {
- final int prime = 401;
- int result = 1;
- result = prime * result + ((match == null) ? 0 : match.hashCode());
- result = prime * result + outPort;
- result = prime * result + tableId;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFAggregateStatisticsRequest)) {
- return false;
- }
- OFAggregateStatisticsRequest other = (OFAggregateStatisticsRequest) obj;
- if (match == null) {
- if (other.match != null) {
- return false;
- }
- } else if (!match.equals(other.match)) {
- return false;
- }
- if (outPort != other.outPort) {
- return false;
- }
- if (tableId != other.tableId) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-import org.openflow.util.StringByteSerializer;
-
-/**
- * Represents an ofp_desc_stats structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFDescriptionStatistics implements OFStatistics, Serializable {
- public static int DESCRIPTION_STRING_LENGTH = 256;
- public static int SERIAL_NUMBER_LENGTH = 32;
-
- protected String manufacturerDescription;
- protected String hardwareDescription;
- protected String softwareDescription;
- protected String serialNumber;
- protected String datapathDescription;
-
- /**
- * @return the manufacturerDescription
- */
- public String getManufacturerDescription() {
- return manufacturerDescription;
- }
-
- /**
- * @param manufacturerDescription the manufacturerDescription to set
- */
- public void setManufacturerDescription(String manufacturerDescription) {
- this.manufacturerDescription = manufacturerDescription;
- }
-
- /**
- * @return the hardwareDescription
- */
- public String getHardwareDescription() {
- return hardwareDescription;
- }
-
- /**
- * @param hardwareDescription the hardwareDescription to set
- */
- public void setHardwareDescription(String hardwareDescription) {
- this.hardwareDescription = hardwareDescription;
- }
-
- /**
- * @return the softwareDescription
- */
- public String getSoftwareDescription() {
- return softwareDescription;
- }
-
- /**
- * @param softwareDescription the softwareDescription to set
- */
- public void setSoftwareDescription(String softwareDescription) {
- this.softwareDescription = softwareDescription;
- }
-
- /**
- * @return the serialNumber
- */
- public String getSerialNumber() {
- if (serialNumber.equals("None"))
- return "";
- return serialNumber;
- }
-
- /**
- * @param serialNumber the serialNumber to set
- */
- public void setSerialNumber(String serialNumber) {
- this.serialNumber = serialNumber;
- }
-
- /**
- * @return the datapathDescription
- */
- public String getDatapathDescription() {
- return datapathDescription;
- }
-
- /**
- * @param datapathDescription the datapathDescription to set
- */
- public void setDatapathDescription(String datapathDescription) {
- this.datapathDescription = datapathDescription;
- }
-
- @Override
- public int getLength() {
- return 1056;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.manufacturerDescription = StringByteSerializer.readFrom(data,
- DESCRIPTION_STRING_LENGTH);
- this.hardwareDescription = StringByteSerializer.readFrom(data,
- DESCRIPTION_STRING_LENGTH);
- this.softwareDescription = StringByteSerializer.readFrom(data,
- DESCRIPTION_STRING_LENGTH);
- this.serialNumber = StringByteSerializer.readFrom(data,
- SERIAL_NUMBER_LENGTH);
- this.datapathDescription = StringByteSerializer.readFrom(data,
- DESCRIPTION_STRING_LENGTH);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- StringByteSerializer.writeTo(data, DESCRIPTION_STRING_LENGTH,
- this.manufacturerDescription);
- StringByteSerializer.writeTo(data, DESCRIPTION_STRING_LENGTH,
- this.hardwareDescription);
- StringByteSerializer.writeTo(data, DESCRIPTION_STRING_LENGTH,
- this.softwareDescription);
- StringByteSerializer.writeTo(data, SERIAL_NUMBER_LENGTH,
- this.serialNumber);
- StringByteSerializer.writeTo(data, DESCRIPTION_STRING_LENGTH,
- this.datapathDescription);
- }
-
- @Override
- public int hashCode() {
- final int prime = 409;
- int result = 1;
- result = prime
- * result
- + ((datapathDescription == null) ? 0 : datapathDescription
- .hashCode());
- result = prime
- * result
- + ((hardwareDescription == null) ? 0 : hardwareDescription
- .hashCode());
- result = prime
- * result
- + ((manufacturerDescription == null) ? 0
- : manufacturerDescription.hashCode());
- result = prime * result
- + ((serialNumber == null) ? 0 : serialNumber.hashCode());
- result = prime
- * result
- + ((softwareDescription == null) ? 0 : softwareDescription
- .hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFDescriptionStatistics)) {
- return false;
- }
- OFDescriptionStatistics other = (OFDescriptionStatistics) obj;
- if (datapathDescription == null) {
- if (other.datapathDescription != null) {
- return false;
- }
- } else if (!datapathDescription.equals(other.datapathDescription)) {
- return false;
- }
- if (hardwareDescription == null) {
- if (other.hardwareDescription != null) {
- return false;
- }
- } else if (!hardwareDescription.equals(other.hardwareDescription)) {
- return false;
- }
- if (manufacturerDescription == null) {
- if (other.manufacturerDescription != null) {
- return false;
- }
- } else if (!manufacturerDescription
- .equals(other.manufacturerDescription)) {
- return false;
- }
- if (serialNumber == null) {
- if (other.serialNumber != null) {
- return false;
- }
- } else if (!serialNumber.equals(other.serialNumber)) {
- return false;
- }
- if (softwareDescription == null) {
- if (other.softwareDescription != null) {
- return false;
- }
- } else if (!softwareDescription.equals(other.softwareDescription)) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.OFMatch;
-import org.openflow.protocol.action.OFAction;
-import org.openflow.protocol.factory.OFActionFactory;
-import org.openflow.protocol.factory.OFActionFactoryAware;
-import org.openflow.util.U16;
-
-/**
- * Represents an ofp_flow_stats structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFFlowStatisticsReply implements OFStatistics, OFActionFactoryAware, Serializable {
- public static int MINIMUM_LENGTH = 88;
-
- protected transient OFActionFactory actionFactory;
- protected short length = (short) MINIMUM_LENGTH;
- protected byte tableId;
- protected OFMatch match;
- protected int durationSeconds;
- protected int durationNanoseconds;
- protected short priority;
- protected short idleTimeout;
- protected short hardTimeout;
- protected long cookie;
- protected long packetCount;
- protected long byteCount;
- protected List<OFAction> actions;
-
- /**
- * @return the tableId
- */
- public byte getTableId() {
- return tableId;
- }
-
- /**
- * @param tableId the tableId to set
- */
- public void setTableId(byte tableId) {
- this.tableId = tableId;
- }
-
- /**
- * @return the match
- */
- public OFMatch getMatch() {
- return match;
- }
-
- /**
- * @param match the match to set
- */
- public void setMatch(OFMatch match) {
- this.match = match;
- }
-
- /**
- * @return the durationSeconds
- */
- public int getDurationSeconds() {
- return durationSeconds;
- }
-
- /**
- * @param durationSeconds the durationSeconds to set
- */
- public void setDurationSeconds(int durationSeconds) {
- this.durationSeconds = durationSeconds;
- }
-
- /**
- * @return the durationNanoseconds
- */
- public int getDurationNanoseconds() {
- return durationNanoseconds;
- }
-
- /**
- * @param durationNanoseconds the durationNanoseconds to set
- */
- public void setDurationNanoseconds(int durationNanoseconds) {
- this.durationNanoseconds = durationNanoseconds;
- }
-
- /**
- * @return the priority
- */
- public short getPriority() {
- return priority;
- }
-
- /**
- * @param priority the priority to set
- */
- public void setPriority(short priority) {
- this.priority = priority;
- }
-
- /**
- * @return the idleTimeout
- */
- public short getIdleTimeout() {
- return idleTimeout;
- }
-
- /**
- * @param idleTimeout the idleTimeout to set
- */
- public void setIdleTimeout(short idleTimeout) {
- this.idleTimeout = idleTimeout;
- }
-
- /**
- * @return the hardTimeout
- */
- public short getHardTimeout() {
- return hardTimeout;
- }
-
- /**
- * @param hardTimeout the hardTimeout to set
- */
- public void setHardTimeout(short hardTimeout) {
- this.hardTimeout = hardTimeout;
- }
-
- /**
- * @return the cookie
- */
- public long getCookie() {
- return cookie;
- }
-
- /**
- * @param cookie the cookie to set
- */
- public void setCookie(long cookie) {
- this.cookie = cookie;
- }
-
- /**
- * @return the packetCount
- */
- public long getPacketCount() {
- return packetCount;
- }
-
- /**
- * @param packetCount the packetCount to set
- */
- public void setPacketCount(long packetCount) {
- this.packetCount = packetCount;
- }
-
- /**
- * @return the byteCount
- */
- public long getByteCount() {
- return byteCount;
- }
-
- /**
- * @param byteCount the byteCount to set
- */
- public void setByteCount(long byteCount) {
- this.byteCount = byteCount;
- }
-
- /**
- * @param length the length to set
- */
- public void setLength(short length) {
- this.length = length;
- }
-
- @Override
- public int getLength() {
- return U16.f(length);
- }
-
- /**
- * @param actionFactory the actionFactory to set
- */
- @Override
- public void setActionFactory(OFActionFactory actionFactory) {
- this.actionFactory = actionFactory;
- }
-
- /**
- * @return the actions
- */
- public List<OFAction> getActions() {
- return actions;
- }
-
- /**
- * @param actions the actions to set
- */
- public void setActions(List<OFAction> actions) {
- this.actions = actions;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.length = data.getShort();
- this.tableId = data.get();
- data.get(); // pad
- if (this.match == null)
- this.match = new OFMatch();
- this.match.readFrom(data);
- this.durationSeconds = data.getInt();
- this.durationNanoseconds = data.getInt();
- this.priority = data.getShort();
- this.idleTimeout = data.getShort();
- this.hardTimeout = data.getShort();
- data.getInt(); // pad
- data.getShort(); // pad
- this.cookie = data.getLong();
- this.packetCount = data.getLong();
- this.byteCount = data.getLong();
- if (this.actionFactory == null)
- throw new RuntimeException("OFActionFactory not set");
- this.actions = this.actionFactory.parseActions(data, getLength() -
- MINIMUM_LENGTH);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putShort(this.length);
- data.put(this.tableId);
- data.put((byte) 0);
- this.match.writeTo(data);
- data.putInt(this.durationSeconds);
- data.putInt(this.durationNanoseconds);
- data.putShort(this.priority);
- data.putShort(this.idleTimeout);
- data.putShort(this.hardTimeout);
- data.getInt(); // pad
- data.getShort(); // pad
- data.putLong(this.cookie);
- data.putLong(this.packetCount);
- data.putLong(this.byteCount);
- if (actions != null) {
- for (OFAction action : actions) {
- action.writeTo(data);
- }
- }
- }
-
- @Override
- public int hashCode() {
- final int prime = 419;
- int result = 1;
- result = prime * result + (int) (byteCount ^ (byteCount >>> 32));
- result = prime * result + (int) (cookie ^ (cookie >>> 32));
- result = prime * result + durationNanoseconds;
- result = prime * result + durationSeconds;
- result = prime * result + hardTimeout;
- result = prime * result + idleTimeout;
- result = prime * result + length;
- result = prime * result + ((match == null) ? 0 : match.hashCode());
- result = prime * result + (int) (packetCount ^ (packetCount >>> 32));
- result = prime * result + priority;
- result = prime * result + tableId;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFFlowStatisticsReply)) {
- return false;
- }
- OFFlowStatisticsReply other = (OFFlowStatisticsReply) obj;
- if (byteCount != other.byteCount) {
- return false;
- }
- if (cookie != other.cookie) {
- return false;
- }
- if (durationNanoseconds != other.durationNanoseconds) {
- return false;
- }
- if (durationSeconds != other.durationSeconds) {
- return false;
- }
- if (hardTimeout != other.hardTimeout) {
- return false;
- }
- if (idleTimeout != other.idleTimeout) {
- return false;
- }
- if (length != other.length) {
- return false;
- }
- if (match == null) {
- if (other.match != null) {
- return false;
- }
- } else if (!match.equals(other.match)) {
- return false;
- }
- if (packetCount != other.packetCount) {
- return false;
- }
- if (priority != other.priority) {
- return false;
- }
- if (tableId != other.tableId) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-import org.openflow.protocol.OFMatch;
-
-/**
- * Represents an ofp_flow_stats_request structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFFlowStatisticsRequest implements OFStatistics, Serializable {
- protected OFMatch match;
- protected byte tableId;
- protected short outPort;
-
- /**
- * @return the match
- */
- public OFMatch getMatch() {
- return match;
- }
-
- /**
- * @param match the match to set
- */
- public void setMatch(OFMatch match) {
- this.match = match;
- }
-
- /**
- * @return the tableId
- */
- public byte getTableId() {
- return tableId;
- }
-
- /**
- * @param tableId the tableId to set
- */
- public void setTableId(byte tableId) {
- this.tableId = tableId;
- }
-
- /**
- * @return the outPort
- */
- public short getOutPort() {
- return outPort;
- }
-
- /**
- * @param outPort the outPort to set
- */
- public void setOutPort(short outPort) {
- this.outPort = outPort;
- }
-
- @Override
- public int getLength() {
- return 44;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- if (this.match == null)
- this.match = new OFMatch();
- this.match.readFrom(data);
- this.tableId = data.get();
- data.get(); // pad
- this.outPort = data.getShort();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- this.match.writeTo(data);
- data.put(this.tableId);
- data.put((byte) 0);
- data.putShort(this.outPort);
- }
-
- @Override
- public int hashCode() {
- final int prime = 421;
- int result = 1;
- result = prime * result + ((match == null) ? 0 : match.hashCode());
- result = prime * result + outPort;
- result = prime * result + tableId;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFFlowStatisticsRequest)) {
- return false;
- }
- OFFlowStatisticsRequest other = (OFFlowStatisticsRequest) obj;
- if (match == null) {
- if (other.match != null) {
- return false;
- }
- } else if (!match.equals(other.match)) {
- return false;
- }
- if (outPort != other.outPort) {
- return false;
- }
- if (tableId != other.tableId) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_port_stats structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFPortStatisticsReply implements OFStatistics {
- protected short portNumber;
- protected long receivePackets;
- protected long transmitPackets;
- protected long receiveBytes;
- protected long transmitBytes;
- protected long receiveDropped;
- protected long transmitDropped;
- protected long receiveErrors;
- protected long transmitErrors;
- protected long receiveFrameErrors;
- protected long receiveOverrunErrors;
- protected long receiveCRCErrors;
- protected long collisions;
-
- /**
- * @return the portNumber
- */
- public short getPortNumber() {
- return portNumber;
- }
-
- /**
- * @param portNumber the portNumber to set
- */
- public void setPortNumber(short portNumber) {
- this.portNumber = portNumber;
- }
-
- /**
- * @return the receivePackets
- */
- public long getreceivePackets() {
- return receivePackets;
- }
-
- /**
- * @param receivePackets the receivePackets to set
- */
- public void setreceivePackets(long receivePackets) {
- this.receivePackets = receivePackets;
- }
-
- /**
- * @return the transmitPackets
- */
- public long getTransmitPackets() {
- return transmitPackets;
- }
-
- /**
- * @param transmitPackets the transmitPackets to set
- */
- public void setTransmitPackets(long transmitPackets) {
- this.transmitPackets = transmitPackets;
- }
-
- /**
- * @return the receiveBytes
- */
- public long getReceiveBytes() {
- return receiveBytes;
- }
-
- /**
- * @param receiveBytes the receiveBytes to set
- */
- public void setReceiveBytes(long receiveBytes) {
- this.receiveBytes = receiveBytes;
- }
-
- /**
- * @return the transmitBytes
- */
- public long getTransmitBytes() {
- return transmitBytes;
- }
-
- /**
- * @param transmitBytes the transmitBytes to set
- */
- public void setTransmitBytes(long transmitBytes) {
- this.transmitBytes = transmitBytes;
- }
-
- /**
- * @return the receiveDropped
- */
- public long getReceiveDropped() {
- return receiveDropped;
- }
-
- /**
- * @param receiveDropped the receiveDropped to set
- */
- public void setReceiveDropped(long receiveDropped) {
- this.receiveDropped = receiveDropped;
- }
-
- /**
- * @return the transmitDropped
- */
- public long getTransmitDropped() {
- return transmitDropped;
- }
-
- /**
- * @param transmitDropped the transmitDropped to set
- */
- public void setTransmitDropped(long transmitDropped) {
- this.transmitDropped = transmitDropped;
- }
-
- /**
- * @return the receiveErrors
- */
- public long getreceiveErrors() {
- return receiveErrors;
- }
-
- /**
- * @param receiveErrors the receiveErrors to set
- */
- public void setreceiveErrors(long receiveErrors) {
- this.receiveErrors = receiveErrors;
- }
-
- /**
- * @return the transmitErrors
- */
- public long getTransmitErrors() {
- return transmitErrors;
- }
-
- /**
- * @param transmitErrors the transmitErrors to set
- */
- public void setTransmitErrors(long transmitErrors) {
- this.transmitErrors = transmitErrors;
- }
-
- /**
- * @return the receiveFrameErrors
- */
- public long getReceiveFrameErrors() {
- return receiveFrameErrors;
- }
-
- /**
- * @param receiveFrameErrors the receiveFrameErrors to set
- */
- public void setReceiveFrameErrors(long receiveFrameErrors) {
- this.receiveFrameErrors = receiveFrameErrors;
- }
-
- /**
- * @return the receiveOverrunErrors
- */
- public long getReceiveOverrunErrors() {
- return receiveOverrunErrors;
- }
-
- /**
- * @param receiveOverrunErrors the receiveOverrunErrors to set
- */
- public void setReceiveOverrunErrors(long receiveOverrunErrors) {
- this.receiveOverrunErrors = receiveOverrunErrors;
- }
-
- /**
- * @return the receiveCRCErrors
- */
- public long getReceiveCRCErrors() {
- return receiveCRCErrors;
- }
-
- /**
- * @param receiveCRCErrors the receiveCRCErrors to set
- */
- public void setReceiveCRCErrors(long receiveCRCErrors) {
- this.receiveCRCErrors = receiveCRCErrors;
- }
-
- /**
- * @return the collisions
- */
- public long getCollisions() {
- return collisions;
- }
-
- /**
- * @param collisions the collisions to set
- */
- public void setCollisions(long collisions) {
- this.collisions = collisions;
- }
-
- @Override
- public int getLength() {
- return 104;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.portNumber = data.getShort();
- data.getShort(); // pad
- data.getInt(); // pad
- this.receivePackets = data.getLong();
- this.transmitPackets = data.getLong();
- this.receiveBytes = data.getLong();
- this.transmitBytes = data.getLong();
- this.receiveDropped = data.getLong();
- this.transmitDropped = data.getLong();
- this.receiveErrors = data.getLong();
- this.transmitErrors = data.getLong();
- this.receiveFrameErrors = data.getLong();
- this.receiveOverrunErrors = data.getLong();
- this.receiveCRCErrors = data.getLong();
- this.collisions = data.getLong();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putShort(this.portNumber);
- data.putShort((short) 0); // pad
- data.putInt(0); // pad
- data.putLong(this.receivePackets);
- data.putLong(this.transmitPackets);
- data.putLong(this.receiveBytes);
- data.putLong(this.transmitBytes);
- data.putLong(this.receiveDropped);
- data.putLong(this.transmitDropped);
- data.putLong(this.receiveErrors);
- data.putLong(this.transmitErrors);
- data.putLong(this.receiveFrameErrors);
- data.putLong(this.receiveOverrunErrors);
- data.putLong(this.receiveCRCErrors);
- data.putLong(this.collisions);
- }
-
- @Override
- public int hashCode() {
- final int prime = 431;
- int result = 1;
- result = prime * result + (int) (collisions ^ (collisions >>> 32));
- result = prime * result + portNumber;
- result = prime * result
- + (int) (receivePackets ^ (receivePackets >>> 32));
- result = prime * result + (int) (receiveBytes ^ (receiveBytes >>> 32));
- result = prime * result
- + (int) (receiveCRCErrors ^ (receiveCRCErrors >>> 32));
- result = prime * result
- + (int) (receiveDropped ^ (receiveDropped >>> 32));
- result = prime * result
- + (int) (receiveFrameErrors ^ (receiveFrameErrors >>> 32));
- result = prime * result
- + (int) (receiveOverrunErrors ^ (receiveOverrunErrors >>> 32));
- result = prime * result
- + (int) (receiveErrors ^ (receiveErrors >>> 32));
- result = prime * result
- + (int) (transmitBytes ^ (transmitBytes >>> 32));
- result = prime * result
- + (int) (transmitDropped ^ (transmitDropped >>> 32));
- result = prime * result
- + (int) (transmitErrors ^ (transmitErrors >>> 32));
- result = prime * result
- + (int) (transmitPackets ^ (transmitPackets >>> 32));
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFPortStatisticsReply)) {
- return false;
- }
- OFPortStatisticsReply other = (OFPortStatisticsReply) obj;
- if (collisions != other.collisions) {
- return false;
- }
- if (portNumber != other.portNumber) {
- return false;
- }
- if (receivePackets != other.receivePackets) {
- return false;
- }
- if (receiveBytes != other.receiveBytes) {
- return false;
- }
- if (receiveCRCErrors != other.receiveCRCErrors) {
- return false;
- }
- if (receiveDropped != other.receiveDropped) {
- return false;
- }
- if (receiveFrameErrors != other.receiveFrameErrors) {
- return false;
- }
- if (receiveOverrunErrors != other.receiveOverrunErrors) {
- return false;
- }
- if (receiveErrors != other.receiveErrors) {
- return false;
- }
- if (transmitBytes != other.transmitBytes) {
- return false;
- }
- if (transmitDropped != other.transmitDropped) {
- return false;
- }
- if (transmitErrors != other.transmitErrors) {
- return false;
- }
- if (transmitPackets != other.transmitPackets) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_port_stats_request structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFPortStatisticsRequest implements OFStatistics {
- protected short portNumber;
-
- /**
- * @return the portNumber
- */
- public short getPortNumber() {
- return portNumber;
- }
-
- /**
- * @param portNumber the portNumber to set
- */
- public void setPortNumber(short portNumber) {
- this.portNumber = portNumber;
- }
-
- @Override
- public int getLength() {
- return 8;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.portNumber = data.getShort();
- data.getShort(); // pad
- data.getInt(); // pad
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putShort(this.portNumber);
- data.putShort((short) 0); // pad
- data.putInt(0); // pad
- }
-
- @Override
- public int hashCode() {
- final int prime = 433;
- int result = 1;
- result = prime * result + portNumber;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFPortStatisticsRequest)) {
- return false;
- }
- OFPortStatisticsRequest other = (OFPortStatisticsRequest) obj;
- if (portNumber != other.portNumber) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_queue_stats structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueueStatisticsReply implements OFStatistics {
- protected short portNumber;
- protected int queueId;
- protected long transmitBytes;
- protected long transmitPackets;
- protected long transmitErrors;
-
- /**
- * @return the portNumber
- */
- public short getPortNumber() {
- return portNumber;
- }
-
- /**
- * @param portNumber the portNumber to set
- */
- public void setPortNumber(short portNumber) {
- this.portNumber = portNumber;
- }
-
- /**
- * @return the queueId
- */
- public int getQueueId() {
- return queueId;
- }
-
- /**
- * @param queueId the queueId to set
- */
- public void setQueueId(int queueId) {
- this.queueId = queueId;
- }
-
- /**
- * @return the transmitBytes
- */
- public long getTransmitBytes() {
- return transmitBytes;
- }
-
- /**
- * @param transmitBytes the transmitBytes to set
- */
- public void setTransmitBytes(long transmitBytes) {
- this.transmitBytes = transmitBytes;
- }
-
- /**
- * @return the transmitPackets
- */
- public long getTransmitPackets() {
- return transmitPackets;
- }
-
- /**
- * @param transmitPackets the transmitPackets to set
- */
- public void setTransmitPackets(long transmitPackets) {
- this.transmitPackets = transmitPackets;
- }
-
- /**
- * @return the transmitErrors
- */
- public long getTransmitErrors() {
- return transmitErrors;
- }
-
- /**
- * @param transmitErrors the transmitErrors to set
- */
- public void setTransmitErrors(long transmitErrors) {
- this.transmitErrors = transmitErrors;
- }
-
- @Override
- public int getLength() {
- return 32;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.portNumber = data.getShort();
- data.getShort(); // pad
- this.queueId = data.getInt();
- this.transmitBytes = data.getLong();
- this.transmitPackets = data.getLong();
- this.transmitErrors = data.getLong();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putShort(this.portNumber);
- data.putShort((short) 0); // pad
- data.putInt(this.queueId);
- data.putLong(this.transmitBytes);
- data.putLong(this.transmitPackets);
- data.putLong(this.transmitErrors);
- }
-
- @Override
- public int hashCode() {
- final int prime = 439;
- int result = 1;
- result = prime * result + portNumber;
- result = prime * result + queueId;
- result = prime * result
- + (int) (transmitBytes ^ (transmitBytes >>> 32));
- result = prime * result
- + (int) (transmitErrors ^ (transmitErrors >>> 32));
- result = prime * result
- + (int) (transmitPackets ^ (transmitPackets >>> 32));
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFQueueStatisticsReply)) {
- return false;
- }
- OFQueueStatisticsReply other = (OFQueueStatisticsReply) obj;
- if (portNumber != other.portNumber) {
- return false;
- }
- if (queueId != other.queueId) {
- return false;
- }
- if (transmitBytes != other.transmitBytes) {
- return false;
- }
- if (transmitErrors != other.transmitErrors) {
- return false;
- }
- if (transmitPackets != other.transmitPackets) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents an ofp_queue_stats_request structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFQueueStatisticsRequest implements OFStatistics {
- protected short portNumber;
- protected int queueId;
-
- /**
- * @return the portNumber
- */
- public short getPortNumber() {
- return portNumber;
- }
-
- /**
- * @param portNumber the portNumber to set
- */
- public void setPortNumber(short portNumber) {
- this.portNumber = portNumber;
- }
-
- /**
- * @return the queueId
- */
- public int getQueueId() {
- return queueId;
- }
-
- /**
- * @param queueId the queueId to set
- */
- public void setQueueId(int queueId) {
- this.queueId = queueId;
- }
-
- @Override
- public int getLength() {
- return 8;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.portNumber = data.getShort();
- data.getShort(); // pad
- this.queueId = data.getInt();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putShort(this.portNumber);
- data.putShort((short) 0); // pad
- data.putInt(this.queueId);
- }
-
- @Override
- public int hashCode() {
- final int prime = 443;
- int result = 1;
- result = prime * result + portNumber;
- result = prime * result + queueId;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFQueueStatisticsRequest)) {
- return false;
- }
- OFQueueStatisticsRequest other = (OFQueueStatisticsRequest) obj;
- if (portNumber != other.portNumber) {
- return false;
- }
- if (queueId != other.queueId) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.nio.ByteBuffer;
-
-/**
- * The base class for all OpenFlow statistics.
- *
- * @author David Erickson (daviderickson@cs.stanford.edu) - Mar 11, 2010
- */
-public interface OFStatistics {
- /**
- * Returns the wire length of this message in bytes
- * @return the length
- */
- public int getLength();
-
- /**
- * Read this message off the wire from the specified ByteBuffer
- * @param data
- */
- public void readFrom(ByteBuffer data);
-
- /**
- * Write this message's binary format to the specified ByteBuffer
- * @param data
- */
- public void writeTo(ByteBuffer data);
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.lang.reflect.Constructor;
-
-import org.openflow.protocol.Instantiable;
-import org.openflow.protocol.OFType;
-
-public enum OFStatisticsType {
- DESC (0, OFDescriptionStatistics.class, OFDescriptionStatistics.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFDescriptionStatistics();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFDescriptionStatistics();
- }
- }),
- FLOW (1, OFFlowStatisticsRequest.class, OFFlowStatisticsReply.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFFlowStatisticsRequest();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFFlowStatisticsReply();
- }
- }),
- AGGREGATE (2, OFAggregateStatisticsRequest.class, OFAggregateStatisticsReply.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFAggregateStatisticsRequest();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFAggregateStatisticsReply();
- }
- }),
- TABLE (3, OFTableStatistics.class, OFTableStatistics.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFTableStatistics();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFTableStatistics();
- }
- }),
- PORT (4, OFPortStatisticsRequest.class, OFPortStatisticsReply.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFPortStatisticsRequest();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFPortStatisticsReply();
- }
- }),
- QUEUE (5, OFQueueStatisticsRequest.class, OFQueueStatisticsReply.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFQueueStatisticsRequest();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFQueueStatisticsReply();
- }
- }),
- VENDOR (0xffff, OFVendorStatistics.class, OFVendorStatistics.class,
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFVendorStatistics();
- }
- },
- new Instantiable<OFStatistics>() {
- @Override
- public OFStatistics instantiate() {
- return new OFVendorStatistics();
- }
- });
-
- static OFStatisticsType[] requestMapping;
- static OFStatisticsType[] replyMapping;
-
- protected Class<? extends OFStatistics> requestClass;
- protected Constructor<? extends OFStatistics> requestConstructor;
- protected Instantiable<OFStatistics> requestInstantiable;
- protected Class<? extends OFStatistics> replyClass;
- protected Constructor<? extends OFStatistics> replyConstructor;
- protected Instantiable<OFStatistics> replyInstantiable;
- protected short type;
-
- /**
- * Store some information about the OpenFlow Statistic type, including wire
- * protocol type number, and derived class
- *
- * @param type Wire protocol number associated with this OFStatisticsType
- * @param requestClass The Statistics Java class to return when the
- * containing OFType is STATS_REQUEST
- * @param replyClass The Statistics Java class to return when the
- * containing OFType is STATS_REPLY
- */
- OFStatisticsType(int type, Class<? extends OFStatistics> requestClass,
- Class<? extends OFStatistics> replyClass,
- Instantiable<OFStatistics> requestInstantiable,
- Instantiable<OFStatistics> replyInstantiable) {
- this.type = (short) type;
- this.requestClass = requestClass;
- try {
- this.requestConstructor = requestClass.getConstructor(new Class[]{});
- } catch (Exception e) {
- throw new RuntimeException(
- "Failure getting constructor for class: " + requestClass, e);
- }
-
- this.replyClass = replyClass;
- try {
- this.replyConstructor = replyClass.getConstructor(new Class[]{});
- } catch (Exception e) {
- throw new RuntimeException(
- "Failure getting constructor for class: " + replyClass, e);
- }
- this.requestInstantiable = requestInstantiable;
- this.replyInstantiable = replyInstantiable;
- OFStatisticsType.addMapping(this.type, OFType.STATS_REQUEST, this);
- OFStatisticsType.addMapping(this.type, OFType.STATS_REPLY, this);
- }
-
- /**
- * Adds a mapping from type value to OFStatisticsType enum
- *
- * @param i OpenFlow wire protocol type
- * @param t type of containing OFMessage, only accepts STATS_REQUEST or
- * STATS_REPLY
- * @param st type
- */
- static public void addMapping(short i, OFType t, OFStatisticsType st) {
- if (i < 0)
- i = (short) (16+i);
- if (t == OFType.STATS_REQUEST) {
- if (requestMapping == null)
- requestMapping = new OFStatisticsType[16];
- OFStatisticsType.requestMapping[i] = st;
- } else if (t == OFType.STATS_REPLY){
- if (replyMapping == null)
- replyMapping = new OFStatisticsType[16];
- OFStatisticsType.replyMapping[i] = st;
- } else {
- throw new RuntimeException(t.toString() + " is an invalid OFType");
- }
- }
-
- /**
- * Remove a mapping from type value to OFStatisticsType enum
- *
- * @param i OpenFlow wire protocol type
- * @param t type of containing OFMessage, only accepts STATS_REQUEST or
- * STATS_REPLY
- */
- static public void removeMapping(short i, OFType t) {
- if (i < 0)
- i = (short) (16+i);
- if (t == OFType.STATS_REQUEST) {
- requestMapping[i] = null;
- } else if (t == OFType.STATS_REPLY){
- replyMapping[i] = null;
- } else {
- throw new RuntimeException(t.toString() + " is an invalid OFType");
- }
- }
-
- /**
- * Given a wire protocol OpenFlow type number, return the OFStatisticsType
- * associated with it
- *
- * @param i wire protocol number
- * @param t type of containing OFMessage, only accepts STATS_REQUEST or
- * STATS_REPLY
- * @return OFStatisticsType enum type
- */
- static public OFStatisticsType valueOf(short i, OFType t) {
- if (i < 0)
- i = (short) (16+i);
- if (t == OFType.STATS_REQUEST) {
- return requestMapping[i];
- } else if (t == OFType.STATS_REPLY){
- return replyMapping[i];
- } else {
- throw new RuntimeException(t.toString() + " is an invalid OFType");
- }
- }
-
- /**
- * @return Returns the wire protocol value corresponding to this
- * OFStatisticsType
- */
- public short getTypeValue() {
- return this.type;
- }
-
- /**
- * @param t type of containing OFMessage, only accepts STATS_REQUEST or
- * STATS_REPLY
- * @return return the OFMessage subclass corresponding to this
- * OFStatisticsType
- */
- public Class<? extends OFStatistics> toClass(OFType t) {
- if (t == OFType.STATS_REQUEST) {
- return requestClass;
- } else if (t == OFType.STATS_REPLY){
- return replyClass;
- } else {
- throw new RuntimeException(t.toString() + " is an invalid OFType");
- }
- }
-
- /**
- * Returns the no-argument Constructor of the implementation class for
- * this OFStatisticsType, either request or reply based on the supplied
- * OFType
- *
- * @param t
- * @return
- */
- public Constructor<? extends OFStatistics> getConstructor(OFType t) {
- if (t == OFType.STATS_REQUEST) {
- return requestConstructor;
- } else if (t == OFType.STATS_REPLY) {
- return replyConstructor;
- } else {
- throw new RuntimeException(t.toString() + " is an invalid OFType");
- }
- }
-
- /**
- * @return the requestInstantiable
- */
- public Instantiable<OFStatistics> getRequestInstantiable() {
- return requestInstantiable;
- }
-
- /**
- * @param requestInstantiable the requestInstantiable to set
- */
- public void setRequestInstantiable(
- Instantiable<OFStatistics> requestInstantiable) {
- this.requestInstantiable = requestInstantiable;
- }
-
- /**
- * @return the replyInstantiable
- */
- public Instantiable<OFStatistics> getReplyInstantiable() {
- return replyInstantiable;
- }
-
- /**
- * @param replyInstantiable the replyInstantiable to set
- */
- public void setReplyInstantiable(Instantiable<OFStatistics> replyInstantiable) {
- this.replyInstantiable = replyInstantiable;
- }
-
- /**
- * Returns a new instance of the implementation class for
- * this OFStatisticsType, either request or reply based on the supplied
- * OFType
- *
- * @param t
- * @return
- */
- public OFStatistics newInstance(OFType t) {
- if (t == OFType.STATS_REQUEST) {
- return requestInstantiable.instantiate();
- } else if (t == OFType.STATS_REPLY) {
- return replyInstantiable.instantiate();
- } else {
- throw new RuntimeException(t.toString() + " is an invalid OFType");
- }
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-import org.openflow.util.StringByteSerializer;
-
-/**
- * Represents an ofp_table_stats structure
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFTableStatistics implements OFStatistics, Serializable {
- public static int MAX_TABLE_NAME_LEN = 32;
-
- protected byte tableId;
- protected String name;
- protected int wildcards;
- protected int maximumEntries;
- protected int activeCount;
- protected long lookupCount;
- protected long matchedCount;
-
- /**
- * @return the tableId
- */
- public byte getTableId() {
- return tableId;
- }
-
- /**
- * @param tableId the tableId to set
- */
- public void setTableId(byte tableId) {
- this.tableId = tableId;
- }
-
- /**
- * @return the name
- */
- public String getName() {
- return name;
- }
-
- /**
- * @param name the name to set
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /**
- * @return the wildcards
- */
- public int getWildcards() {
- return wildcards;
- }
-
- /**
- * @param wildcards the wildcards to set
- */
- public void setWildcards(int wildcards) {
- this.wildcards = wildcards;
- }
-
- /**
- * @return the maximumEntries
- */
- public int getMaximumEntries() {
- return maximumEntries;
- }
-
- /**
- * @param maximumEntries the maximumEntries to set
- */
- public void setMaximumEntries(int maximumEntries) {
- this.maximumEntries = maximumEntries;
- }
-
- /**
- * @return the activeCount
- */
- public int getActiveCount() {
- return activeCount;
- }
-
- /**
- * @param activeCount the activeCount to set
- */
- public void setActiveCount(int activeCount) {
- this.activeCount = activeCount;
- }
-
- /**
- * @return the lookupCount
- */
- public long getLookupCount() {
- return lookupCount;
- }
-
- /**
- * @param lookupCount the lookupCount to set
- */
- public void setLookupCount(long lookupCount) {
- this.lookupCount = lookupCount;
- }
-
- /**
- * @return the matchedCount
- */
- public long getMatchedCount() {
- return matchedCount;
- }
-
- /**
- * @param matchedCount the matchedCount to set
- */
- public void setMatchedCount(long matchedCount) {
- this.matchedCount = matchedCount;
- }
-
- @Override
- public int getLength() {
- return 64;
- }
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.tableId = data.get();
- data.get(); // pad
- data.get(); // pad
- data.get(); // pad
- this.name = StringByteSerializer.readFrom(data, MAX_TABLE_NAME_LEN);
- this.wildcards = data.getInt();
- this.maximumEntries = data.getInt();
- this.activeCount = data.getInt();
- this.lookupCount = data.getLong();
- this.matchedCount = data.getLong();
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.put(this.tableId);
- data.put((byte) 0); // pad
- data.put((byte) 0); // pad
- data.put((byte) 0); // pad
- StringByteSerializer.writeTo(data, MAX_TABLE_NAME_LEN, this.name);
- data.putInt(this.wildcards);
- data.putInt(this.maximumEntries);
- data.putInt(this.activeCount);
- data.putLong(this.lookupCount);
- data.putLong(this.matchedCount);
- }
-
- @Override
- public int hashCode() {
- final int prime = 449;
- int result = 1;
- result = prime * result + activeCount;
- result = prime * result + (int) (lookupCount ^ (lookupCount >>> 32));
- result = prime * result + (int) (matchedCount ^ (matchedCount >>> 32));
- result = prime * result + maximumEntries;
- result = prime * result + ((name == null) ? 0 : name.hashCode());
- result = prime * result + tableId;
- result = prime * result + wildcards;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFTableStatistics)) {
- return false;
- }
- OFTableStatistics other = (OFTableStatistics) obj;
- if (activeCount != other.activeCount) {
- return false;
- }
- if (lookupCount != other.lookupCount) {
- return false;
- }
- if (matchedCount != other.matchedCount) {
- return false;
- }
- if (maximumEntries != other.maximumEntries) {
- return false;
- }
- if (name == null) {
- if (other.name != null) {
- return false;
- }
- } else if (!name.equals(other.name)) {
- return false;
- }
- if (tableId != other.tableId) {
- return false;
- }
- if (wildcards != other.wildcards) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-package org.openflow.protocol.statistics;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-import org.openflow.protocol.factory.OFActionFactory;
-import org.openflow.protocol.factory.OFActionFactoryAware;
-
-/**
- * The base class for vendor implemented statistics
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-public class OFVendorStatistics implements OFStatistics, OFActionFactoryAware, Serializable {
- protected transient OFActionFactory actionFactory;
- protected int vendor;
- protected byte[] body;
-
- // non-message fields
- protected int length = 0;
-
- @Override
- public void readFrom(ByteBuffer data) {
- this.vendor = data.getInt();
- if (body == null)
- body = new byte[length - 4];
- data.get(body);
- }
-
- @Override
- public void writeTo(ByteBuffer data) {
- data.putInt(this.vendor);
- if (body != null)
- data.put(body);
- }
-
- @Override
- public int hashCode() {
- final int prime = 457;
- int result = 1;
- result = prime * result + vendor;
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof OFVendorStatistics)) {
- return false;
- }
- OFVendorStatistics other = (OFVendorStatistics) obj;
- if (vendor != other.vendor) {
- return false;
- }
- return true;
- }
-
- @Override
- public int getLength() {
- return length;
- }
-
- public void setLength(int length) {
- this.length = length;
- }
-
- /**
- * @param actionFactory the actionFactory to set
- */
- @Override
- public void setActionFactory(OFActionFactory actionFactory) {
- this.actionFactory = actionFactory;
- }
-
- public OFActionFactory getActionFactory() {
- return this.actionFactory;
- }
-
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.math.BigInteger;
-
-public class HexString {
- /**
- * Convert a string of bytes to a ':' separated hex string
- * @param bytes
- * @return "0f:ca:fe:de:ad:be:ef"
- */
- public static String toHexString(byte[] bytes) {
- int i;
- String ret = "";
- String tmp;
- for(i=0; i< bytes.length; i++) {
- if(i> 0)
- ret += ":";
- tmp = Integer.toHexString(U8.f(bytes[i]));
- if (tmp.length() == 1)
- ret += "0";
- ret += tmp;
- }
- return ret;
- }
-
- public static String toHexString(long val) {
- char arr[] = Long.toHexString(val).toCharArray();
- String ret = "";
- // prepend the right number of leading zeros
- int i = 0;
- for (; i < (16 - arr.length); i++) {
- ret += "0";
- if ((i % 2) == 1)
- ret += ":";
- }
- for (int j = 0; j < arr.length; j++) {
- ret += arr[j];
- if ((((i + j) % 2) == 1) && (j < (arr.length - 1)))
- ret += ":";
- }
- return ret;
- }
-
-
- /**
- * Convert a string of hex values into a string of bytes
- * @param values "0f:ca:fe:de:ad:be:ef"
- * @return [15, 5 ,2, 5, 17]
- */
-
- public static byte[] fromHexString(String values) {
- String[] octets = values.split(":");
- byte[] ret = new byte[octets.length];
- int i;
-
- for(i=0;i<octets.length; i++)
- ret[i] = Integer.valueOf(octets[i], 16).byteValue();
- return ret;
- }
-
- public static long toLong(String values) {
- long value = new BigInteger(values.replaceAll(":", ""), 16).longValue();
- return value;
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.util.LinkedHashMap;
-
-public class LRULinkedHashMap<K, V> extends LinkedHashMap<K, V> {
- private static final long serialVersionUID = -2964986094089626647L;
- protected int maximumCapacity;
-
- public LRULinkedHashMap(int initialCapacity, int maximumCapacity) {
- super(initialCapacity, 0.75f, true);
- this.maximumCapacity = maximumCapacity;
- }
-
- public LRULinkedHashMap(int maximumCapacity) {
- super(16, 0.75f, true);
- this.maximumCapacity = maximumCapacity;
- }
-
- @Override
- protected boolean removeEldestEntry(java.util.Map.Entry<K, V> eldest) {
- if (this.size() > maximumCapacity)
- return true;
- return false;
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.Arrays;
-
-public class StringByteSerializer {
- public static String readFrom(ByteBuffer data, int length) {
- byte[] stringBytes = new byte[length];
- data.get(stringBytes);
- // find the first index of 0
- int index = 0;
- for (byte b : stringBytes) {
- if (0 == b)
- break;
- ++index;
- }
- return new String(Arrays.copyOf(stringBytes, index),
- Charset.forName("ascii"));
- }
-
- public static void writeTo(ByteBuffer data, int length, String value) {
- try {
- byte[] name = value.getBytes("ASCII");
- if (name.length < length) {
- data.put(name);
- for (int i = name.length; i < length; ++i) {
- data.put((byte) 0);
- }
- } else {
- data.put(name, 0, length-1);
- data.put((byte) 0);
- }
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException(e);
- }
-
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-public class U16 {
- public static int f(short i) {
- return (int)i & 0xffff;
- }
-
- public static short t(int l) {
- return (short) l;
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-public class U32 {
- public static long f(int i) {
- return (long)i & 0xffffffffL;
- }
-
- public static int t(long l) {
- return (int) l;
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.math.BigInteger;
-
-public class U64 {
- public static BigInteger f(long i) {
- return new BigInteger(Long.toBinaryString(i), 2);
- }
-
- public static long t(BigInteger l) {
- return l.longValue();
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-public class U8 {
- public static short f(byte i) {
- return (short) ((short)i & 0xff);
- }
-
- public static byte t(short l) {
- return (byte) l;
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-
-/*****
- * A util library class for dealing with the lack of unsigned datatypes in Java
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- * @author David Erickson (daviderickson@cs.stanford.edu)
- */
-
-public class Unsigned {
- /**
- * Get an unsigned byte from the current position of the ByteBuffer
- *
- * @param bb ByteBuffer to get the byte from
- * @return an unsigned byte contained in a short
- */
- public static short getUnsignedByte(ByteBuffer bb) {
- return ((short) (bb.get() & (short) 0xff));
- }
-
- /**
- * Get an unsigned byte from the specified offset in the ByteBuffer
- *
- * @param bb ByteBuffer to get the byte from
- * @param offset the offset to get the byte from
- * @return an unsigned byte contained in a short
- */
- public static short getUnsignedByte(ByteBuffer bb, int offset) {
- return ((short) (bb.get(offset) & (short) 0xff));
- }
-
- /**
- * Put an unsigned byte into the specified ByteBuffer at the current
- * position
- *
- * @param bb ByteBuffer to put the byte into
- * @param v the short containing the unsigned byte
- */
- public static void putUnsignedByte(ByteBuffer bb, short v) {
- bb.put((byte) (v & 0xff));
- }
-
- /**
- * Put an unsigned byte into the specified ByteBuffer at the specified
- * offset
- *
- * @param bb ByteBuffer to put the byte into
- * @param v the short containing the unsigned byte
- * @param offset the offset to insert the unsigned byte at
- */
- public static void putUnsignedByte(ByteBuffer bb, short v, int offset) {
- bb.put(offset, (byte) (v & 0xff));
- }
-
- /**
- * Get an unsigned short from the current position of the ByteBuffer
- *
- * @param bb ByteBuffer to get the byte from
- * @return an unsigned short contained in a int
- */
- public static int getUnsignedShort(ByteBuffer bb) {
- return (bb.getShort() & 0xffff);
- }
-
- /**
- * Get an unsigned short from the specified offset in the ByteBuffer
- *
- * @param bb ByteBuffer to get the short from
- * @param offset the offset to get the short from
- * @return an unsigned short contained in a int
- */
- public static int getUnsignedShort(ByteBuffer bb, int offset) {
- return (bb.getShort(offset) & 0xffff);
- }
-
- /**
- * Put an unsigned short into the specified ByteBuffer at the current
- * position
- *
- * @param bb ByteBuffer to put the short into
- * @param v the int containing the unsigned short
- */
- public static void putUnsignedShort(ByteBuffer bb, int v) {
- bb.putShort((short) (v & 0xffff));
- }
-
- /**
- * Put an unsigned short into the specified ByteBuffer at the specified
- * offset
- *
- * @param bb ByteBuffer to put the short into
- * @param v the int containing the unsigned short
- * @param offset the offset to insert the unsigned short at
- */
- public static void putUnsignedShort(ByteBuffer bb, int v, int offset) {
- bb.putShort(offset, (short) (v & 0xffff));
- }
-
- /**
- * Get an unsigned int from the current position of the ByteBuffer
- *
- * @param bb ByteBuffer to get the int from
- * @return an unsigned int contained in a long
- */
- public static long getUnsignedInt(ByteBuffer bb) {
- return ((long) bb.getInt() & 0xffffffffL);
- }
-
- /**
- * Get an unsigned int from the specified offset in the ByteBuffer
- *
- * @param bb ByteBuffer to get the int from
- * @param offset the offset to get the int from
- * @return an unsigned int contained in a long
- */
- public static long getUnsignedInt(ByteBuffer bb, int offset) {
- return ((long) bb.getInt(offset) & 0xffffffffL);
- }
-
- /**
- * Put an unsigned int into the specified ByteBuffer at the current position
- *
- * @param bb ByteBuffer to put the int into
- * @param v the long containing the unsigned int
- */
- public static void putUnsignedInt(ByteBuffer bb, long v) {
- bb.putInt((int) (v & 0xffffffffL));
- }
-
- /**
- * Put an unsigned int into the specified ByteBuffer at the specified offset
- *
- * @param bb ByteBuffer to put the int into
- * @param v the long containing the unsigned int
- * @param offset the offset to insert the unsigned int at
- */
- public static void putUnsignedInt(ByteBuffer bb, long v, int offset) {
- bb.putInt(offset, (int) (v & 0xffffffffL));
- }
-
- /**
- * Get an unsigned long from the current position of the ByteBuffer
- *
- * @param bb ByteBuffer to get the long from
- * @return an unsigned long contained in a BigInteger
- */
- public static BigInteger getUnsignedLong(ByteBuffer bb) {
- byte[] v = new byte[8];
- for (int i = 0; i < 8; ++i) {
- v[i] = bb.get(i);
- }
- return new BigInteger(1, v);
- }
-
- /**
- * Get an unsigned long from the specified offset in the ByteBuffer
- *
- * @param bb ByteBuffer to get the long from
- * @param offset the offset to get the long from
- * @return an unsigned long contained in a BigInteger
- */
- public static BigInteger getUnsignedLong(ByteBuffer bb, int offset) {
- byte[] v = new byte[8];
- for (int i = 0; i < 8; ++i) {
- v[i] = bb.get(offset+i);
- }
- return new BigInteger(1, v);
- }
-
- /**
- * Put an unsigned long into the specified ByteBuffer at the current
- * position
- *
- * @param bb ByteBuffer to put the long into
- * @param v the BigInteger containing the unsigned long
- */
- public static void putUnsignedLong(ByteBuffer bb, BigInteger v) {
- bb.putLong(v.longValue());
- }
-
- /**
- * Put an unsigned long into the specified ByteBuffer at the specified
- * offset
- *
- * @param bb ByteBuffer to put the long into
- * @param v the BigInteger containing the unsigned long
- * @param offset the offset to insert the unsigned long at
- */
- public static void putUnsignedLong(ByteBuffer bb, BigInteger v, int offset) {
- bb.putLong(offset, v.longValue());
- }
-}
+++ /dev/null
-package org.openflow.io;
-
-import org.openflow.protocol.*;
-import org.openflow.protocol.factory.BasicFactory;
-
-import java.util.*;
-import java.nio.channels.*;
-import java.net.InetSocketAddress;
-
-import org.junit.Assert;
-
-import org.junit.Test;
-
-
-/**
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-public class OFMessageAsyncStreamTest {
- @Test
- public void testMarshalling() throws Exception {
- OFMessage h = new OFHello();
-
- ServerSocketChannel serverSC = ServerSocketChannel.open();
- serverSC.socket().bind(new java.net.InetSocketAddress(0));
- serverSC.configureBlocking(false);
-
- SocketChannel client = SocketChannel.open(
- new InetSocketAddress("localhost",
- serverSC.socket().getLocalPort())
- );
- SocketChannel server = serverSC.accept();
- OFMessageAsyncStream clientStream = new OFMessageAsyncStream(client, new BasicFactory());
- OFMessageAsyncStream serverStream = new OFMessageAsyncStream(server, new BasicFactory());
-
- clientStream.write(h);
- while(clientStream.needsFlush()) {
- clientStream.flush();
- }
- List<OFMessage> l = serverStream.read();
- Assert.assertEquals(l.size(), 1);
- OFMessage m = l.get(0);
- Assert.assertEquals(m.getLength(),h.getLength());
- Assert.assertEquals(m.getVersion(), h.getVersion());
- Assert.assertEquals(m.getType(), h.getType());
- Assert.assertEquals(m.getType(), h.getType());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.util.U16;
-
-import junit.framework.TestCase;
-
-
-
-public class BasicFactoryTest extends TestCase {
- public void testCreateAndParse() {
- BasicFactory factory = new BasicFactory();
- OFMessage m = factory.getMessage(OFType.HELLO);
- m.setVersion((byte) 1);
- m.setType(OFType.ECHO_REQUEST);
- m.setLength(U16.t(8));
- m.setXid(0xdeadbeef);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- m.writeTo(bb);
- bb.flip();
- bb.limit(bb.limit()-1);
- TestCase.assertEquals(0, factory.parseMessages(bb).size());
- bb.limit(bb.limit()+1);
- List<OFMessage> messages = factory.parseMessages(bb);
- TestCase.assertEquals(1, messages.size());
- TestCase.assertTrue(messages.get(0).getType() == OFType.ECHO_REQUEST);
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-
-import org.junit.Test;
-import org.openflow.protocol.action.OFActionType;
-
-import junit.framework.TestCase;
-
-
-public class OFActionTypeTest extends TestCase {
- @Test
- public void testMapping() throws Exception {
- TestCase.assertEquals(OFActionType.OUTPUT,
- OFActionType.valueOf((short) 0));
- TestCase.assertEquals(OFActionType.OPAQUE_ENQUEUE,
- OFActionType.valueOf((short) 11));
- TestCase.assertEquals(OFActionType.VENDOR,
- OFActionType.valueOf((short) 0xffff));
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFBarrierReplyTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFBarrierReply msg = (OFBarrierReply) messageFactory
- .getMessage(OFType.BARRIER_REPLY);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.BARRIER_REPLY, msg.getType());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFBarrierRequestTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFBarrierRequest msg = (OFBarrierRequest) messageFactory
- .getMessage(OFType.BARRIER_REQUEST);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.BARRIER_REQUEST, msg.getType());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.openflow.protocol.OFError.OFErrorType;
-import org.openflow.protocol.OFError.OFHelloFailedCode;
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.protocol.factory.OFMessageFactory;
-import org.openflow.util.OFTestCase;
-
-public class OFErrorTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFError msg = (OFError) messageFactory.getMessage(OFType.ERROR);
- msg.setMessageFactory(messageFactory);
- msg.setErrorType((short) OFErrorType.OFPET_HELLO_FAILED.ordinal());
- msg.setErrorCode((short) OFHelloFailedCode.OFPHFC_INCOMPATIBLE
- .ordinal());
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals((short) OFErrorType.OFPET_HELLO_FAILED.ordinal(),
- msg.getErrorType());
- TestCase.assertEquals((short) OFHelloFailedCode.OFPHFC_INCOMPATIBLE
- .ordinal(), msg.getErrorType());
- TestCase.assertNull(msg.getOffendingMsg());
-
- msg.setOffendingMsg(new OFHello());
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals((short) OFErrorType.OFPET_HELLO_FAILED.ordinal(),
- msg.getErrorType());
- TestCase.assertEquals((short) OFHelloFailedCode.OFPHFC_INCOMPATIBLE
- .ordinal(), msg.getErrorType());
- TestCase.assertNotNull(msg.getOffendingMsg());
- TestCase.assertEquals(OFHello.MINIMUM_LENGTH,
- msg.getOffendingMsg().length);
- }
-
- public void testGarbageAtEnd() {
- // This is a OFError msg (12 bytes), that encaps a OFVendor msg (24
- // bytes)
- // AND some zeros at the end (40 bytes) for a total of 76 bytes
- // THIS is what an NEC sends in reply to Nox's VENDOR request
- byte[] oferrorRaw = { 0x01, 0x01, 0x00, 0x4c, 0x00, 0x00, 0x10,
- (byte) 0xcc, 0x00, 0x01, 0x00, 0x01, 0x01, 0x04, 0x00, 0x18,
- 0x00, 0x00, 0x10, (byte) 0xcc, 0x00, 0x00, 0x23, 0x20, 0x00,
- 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00 };
- OFMessageFactory factory = new BasicFactory();
- ByteBuffer oferrorBuf = ByteBuffer.wrap(oferrorRaw);
- List<OFMessage> msgs = factory.parseMessages(oferrorBuf,
- oferrorRaw.length);
- TestCase.assertEquals(1, msgs.size());
- OFMessage msg = msgs.get(0);
- TestCase.assertEquals(76, msg.getLengthU());
- ByteBuffer out = ByteBuffer.allocate(1024);
- msg.writeTo(out);
- TestCase.assertEquals(76, out.position());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-
-public class OFFeaturesReplyTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFFeaturesReply ofr = (OFFeaturesReply) messageFactory
- .getMessage(OFType.FEATURES_REPLY);
- List<OFPhysicalPort> ports = new ArrayList<OFPhysicalPort>();
- OFPhysicalPort port = new OFPhysicalPort();
- port.setHardwareAddress(new byte[6]);
- port.setName("eth0");
- ports.add(port);
- ofr.setPorts(ports);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- ofr.writeTo(bb);
- bb.flip();
- ofr.readFrom(bb);
- TestCase.assertEquals(1, ofr.getPorts().size());
- TestCase.assertEquals("eth0", ofr.getPorts().get(0).getName());
-
- // test a 15 character name
- ofr.getPorts().get(0).setName("012345678901234");
- bb.clear();
- ofr.writeTo(bb);
- bb.flip();
- ofr.readFrom(bb);
- TestCase.assertEquals("012345678901234", ofr.getPorts().get(0).getName());
-
- // test a 16 character name getting truncated
- ofr.getPorts().get(0).setName("0123456789012345");
- bb.clear();
- ofr.writeTo(bb);
- bb.flip();
- ofr.readFrom(bb);
- TestCase.assertEquals("012345678901234", ofr.getPorts().get(0).getName());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.protocol.OFFlowRemoved.OFFlowRemovedReason;
-import org.openflow.util.OFTestCase;
-
-public class OFFlowRemovedTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFFlowRemoved msg = (OFFlowRemoved) messageFactory
- .getMessage(OFType.FLOW_REMOVED);
- msg.setMatch(new OFMatch());
- byte[] hwAddr = new byte[6];
- msg.getMatch().setDataLayerDestination(hwAddr);
- msg.getMatch().setDataLayerSource(hwAddr);
- msg.setReason(OFFlowRemovedReason.OFPRR_DELETE);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.FLOW_REMOVED, msg.getType());
- TestCase.assertEquals(OFFlowRemovedReason.OFPRR_DELETE, msg.getReason());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFGetConfigReplyTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFSetConfig msg = (OFSetConfig) messageFactory
- .getMessage(OFType.SET_CONFIG);
- msg.setFlags((short) 1);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.SET_CONFIG, msg.getType());
- TestCase.assertEquals((short)1, msg.getFlags());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFGetConfigRequestTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFGetConfigRequest msg = (OFGetConfigRequest) messageFactory
- .getMessage(OFType.GET_CONFIG_REQUEST);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.GET_CONFIG_REQUEST, msg.getType());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import junit.framework.TestCase;
-
-public class OFMatchTest extends TestCase {
- public void testFromString() {
- OFMatch correct = new OFMatch();
- OFMatch tester = new OFMatch();
-
- // Various combinations of "all"/"any"
- tester.fromString("OFMatch[]");
- // correct is already wildcarded
- TestCase.assertEquals(correct, tester);
- tester.fromString("all");
- TestCase.assertEquals(correct, tester);
- tester.fromString("ANY");
- TestCase.assertEquals(correct, tester);
- tester.fromString("");
- TestCase.assertEquals(correct, tester);
- tester.fromString("[]");
- TestCase.assertEquals(correct, tester);
-
- // ip_src
- correct.setWildcards(~OFMatch.OFPFW_NW_SRC_MASK);
- correct.setNetworkSource(0x01010203);
- tester.fromString("nw_src=1.1.2.3");
- TestCase.assertEquals(correct.getNetworkSourceMaskLen(), tester
- .getNetworkSourceMaskLen());
- TestCase.assertEquals(correct, tester);
- tester.fromString("IP_sRc=1.1.2.3");
- TestCase.assertEquals(correct.getNetworkSourceMaskLen(), tester
- .getNetworkSourceMaskLen());
- TestCase.assertEquals(correct, tester);
- }
-
- public void testToString() {
- OFMatch match = new OFMatch();
- match.fromString("nw_dst=3.4.5.6/8");
- TestCase.assertEquals(8, match.getNetworkDestinationMaskLen());
- String correct = "OFMatch[nw_dst=3.0.0.0/8]";
- String tester = match.toString();
-
- TestCase.assertEquals(correct, tester);
- tester = "OFMatch[dl_type=35020]";
- correct = "OFMatch[dl_type=0x88cc]";
- match = new OFMatch();
- match.fromString(tester);
- TestCase.assertEquals(correct, match.toString());
- OFMatch match2 = new OFMatch();
- match2.fromString(correct);
- TestCase.assertEquals(match, match2);
- }
-
- public void testClone() {
- OFMatch match1 = new OFMatch();
- OFMatch match2 = match1.clone();
- TestCase.assertEquals(match1, match2);
- match2.setNetworkProtocol((byte) 4);
- match2.setWildcards(match2.getWildcards() & ~OFMatch.OFPFW_NW_PROTO);
- TestCase.assertNotSame(match1, match2);
- }
-
- public void testIpToString() {
- String test = OFMatch.ipToString(-1);
- TestCase.assertEquals("255.255.255.255", test);
- }
-
- public void testReverse() {
- OFMatch match1 = new OFMatch();
- OFMatch match2 = match1.reverse((short)0, true);
- TestCase.assertEquals(match1, match2);
-
- match1.fromString("dl_dst=00:11:22:33:44:55");
- match2 = match1.reverse((short)0, true);
- OFMatch match3 = new OFMatch();
- match3.fromString("dl_src=00:11:22:33:44:55");
- TestCase.assertEquals(match2, match3);
-
- match1.fromString("nw_dst=192.168.0.0/24");
- match2 = match1.reverse((short)0, true);
- match3.fromString("nw_src=192.168.0.0/24");
- TestCase.assertEquals(match2, match3);
-
- match1.fromString("in_port=1");
- match2 = match1.reverse((short)2, false);
- match3.fromString("in_port=2");
- TestCase.assertEquals(match2, match3);
- }
-
- public void testSubsumes() {
- OFMatch match1 = new OFMatch();
- OFMatch match2 = new OFMatch();
- match2.fromString("dl_dst=00:11:22:33:44:55");
- TestCase.assertTrue(match1.subsumes(match2));
- TestCase.assertFalse(match2.subsumes(match1));
-
- match1.fromString("nw_dst=192.168.0.0/16");
- match2.fromString("nw_dst=192.168.0.0/24");
- TestCase.assertTrue(match1.subsumes(match2));
- TestCase.assertFalse(match2.subsumes(match1));
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFPortConfigTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFPortMod msg = (OFPortMod) messageFactory
- .getMessage(OFType.PORT_MOD);
- msg.setHardwareAddress(new byte[6]);
- msg.portNumber = 1;
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.PORT_MOD, msg.getType());
- TestCase.assertEquals(1, msg.getPortNumber());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.protocol.OFPortStatus.OFPortReason;
-import org.openflow.util.OFTestCase;
-
-public class OFPortStatusTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFPortStatus msg = (OFPortStatus) messageFactory
- .getMessage(OFType.PORT_STATUS);
- msg.setDesc(new OFPhysicalPort());
- msg.getDesc().setHardwareAddress(new byte[6]);
- msg.getDesc().setName("eth0");
- msg.setReason((byte) OFPortReason.OFPPR_ADD.ordinal());
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.PORT_STATUS, msg.getType());
- TestCase.assertEquals((byte) OFPortReason.OFPPR_ADD.ordinal(), msg
- .getReason());
- TestCase.assertNotNull(msg.getDesc());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.protocol.queue.OFPacketQueue;
-import org.openflow.protocol.queue.OFQueueProperty;
-import org.openflow.protocol.queue.OFQueuePropertyMinRate;
-import org.openflow.protocol.queue.OFQueuePropertyType;
-import org.openflow.util.OFTestCase;
-
-public class OFQueueConfigTest extends OFTestCase {
- public void testRequest() throws Exception {
- OFQueueConfigRequest req = new OFQueueConfigRequest();
- req.setPort((short) 5);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- req.writeTo(bb);
- bb.flip();
-
- OFQueueConfigRequest req2 = new OFQueueConfigRequest();
- req2.readFrom(bb);
- TestCase.assertEquals(req, req2);
- }
-
- public void testReply() throws Exception {
- OFQueueConfigReply reply = new OFQueueConfigReply();
- reply.setPort((short) 5);
-
- OFPacketQueue queue = new OFPacketQueue();
- queue.setQueueId(1);
- List<OFQueueProperty> properties = new ArrayList<OFQueueProperty>();
- properties.add(new OFQueuePropertyMinRate().setRate((short) 1));
- queue.setProperties(properties);
- queue.setLength((short) (OFPacketQueue.MINIMUM_LENGTH + OFQueuePropertyMinRate.MINIMUM_LENGTH));
-
- List<OFPacketQueue> queues = new ArrayList<OFPacketQueue>();
- queues.add(queue);
- reply.setQueues(queues);
- reply.setLengthU(OFQueueConfigReply.MINIMUM_LENGTH + queue.getLength());
-
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- reply.writeTo(bb);
- bb.flip();
-
- OFQueueConfigReply reply2 = new OFQueueConfigReply();
- reply2.setQueuePropertyFactory(new BasicFactory());
- reply2.readFrom(bb);
- TestCase.assertEquals(reply, reply2);
- TestCase.assertEquals(1, reply2.getQueues().size());
- TestCase.assertEquals(1, reply2.getQueues().get(0).getProperties().size());
- TestCase.assertTrue(reply2.getQueues().get(0).getProperties().get(0) instanceof OFQueuePropertyMinRate);
- TestCase.assertEquals(OFQueuePropertyType.MIN_RATE, reply2.getQueues().get(0).getProperties().get(0).getType());
-
- reply.getQueues().add(queue.clone());
- reply.setLengthU(reply.getLengthU() + queue.getLength());
- bb.clear();
- reply.writeTo(bb);
- bb.flip();
- reply2 = new OFQueueConfigReply();
- reply2.setQueuePropertyFactory(new BasicFactory());
- reply2.readFrom(bb);
- TestCase.assertEquals(reply, reply2);
- TestCase.assertEquals(2, reply2.getQueues().size());
-
- queue.getProperties().add(new OFQueuePropertyMinRate().setRate((short) 2));
- queue.setLength((short) (queue.getLength() + OFQueuePropertyMinRate.MINIMUM_LENGTH));
- reply.setLengthU(reply.getLengthU() + OFQueuePropertyMinRate.MINIMUM_LENGTH);
- bb.clear();
- reply.writeTo(bb);
- bb.flip();
- reply2 = new OFQueueConfigReply();
- reply2.setQueuePropertyFactory(new BasicFactory());
- reply2.readFrom(bb);
- TestCase.assertEquals(reply, reply2);
- TestCase.assertEquals(2, reply2.getQueues().size());
- TestCase.assertEquals(2, reply2.getQueues().get(0).getProperties().size());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFSetConfigTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFGetConfigReply msg = (OFGetConfigReply) messageFactory
- .getMessage(OFType.GET_CONFIG_REPLY);
- msg.setFlags((short) 1);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(OFType.GET_CONFIG_REPLY, msg.getType());
- TestCase.assertEquals((short)1, msg.getFlags());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.protocol.factory.OFMessageFactory;
-import org.openflow.protocol.statistics.OFStatisticsType;
-import org.openflow.util.OFTestCase;
-
-public class OFStatisticsReplyTest extends OFTestCase {
- public void testOFFlowStatisticsReply() throws Exception {
- byte[] packet = new byte[] { 0x01, 0x11, 0x01, 0x2c, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, (byte) 0xff,
- (byte) 0xff, 0x00, 0x00, 0x08, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x0a, 0x00, 0x00, 0x03, 0x0a, 0x00, 0x00, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, (byte) 0xa6,
- (byte) 0xa6, 0x00, (byte) 0xff, (byte) 0xff, 0x00, 0x05, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- (byte) 0xc4, 0x00, 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x02, (byte) 0xff, (byte) 0xff, 0x00, 0x00, 0x08, 0x06,
- 0x00, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x03, 0x0a, 0x00,
- 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x3b, 0x2f, (byte) 0xfa, 0x40, (byte) 0xff, (byte) 0xff, 0x00,
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x01, 0x00,
- 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x03, (byte) 0xff, (byte) 0xff, 0x00, 0x62, 0x08,
- 0x00, 0x00, 0x01, 0x62, 0x37, 0x0a, 0x00, 0x00, 0x02, 0x0a,
- 0x00, 0x00, 0x03, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x3a, (byte) 0xc5, 0x2a, (byte) 0x80, (byte) 0xff,
- (byte) 0xff, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, (byte) 0xc4, 0x00, 0x00, 0x00,
- 0x08, 0x00, 0x02, 0x00, 0x00 };
-
- OFMessageFactory factory = new BasicFactory();
- ByteBuffer packetBuf = ByteBuffer.wrap(packet);
- List<OFMessage> msgs = factory.parseMessages(packetBuf, packet.length);
- TestCase.assertEquals(1, msgs.size());
- TestCase.assertTrue(msgs.get(0) instanceof OFStatisticsReply);
- OFStatisticsReply sr = (OFStatisticsReply) msgs.get(0);
- TestCase.assertEquals(OFStatisticsType.FLOW, sr.getStatisticType());
- TestCase.assertEquals(3, sr.getStatistics().size());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.protocol.factory.OFMessageFactory;
-import org.openflow.protocol.statistics.OFFlowStatisticsRequest;
-import org.openflow.protocol.statistics.OFStatisticsType;
-import org.openflow.protocol.statistics.OFVendorStatistics;
-import org.openflow.util.OFTestCase;
-
-public class OFStatisticsRequestTest extends OFTestCase {
- public void testOFFlowStatisticsRequest() throws Exception {
- byte[] packet = new byte[] { 0x01, 0x10, 0x00, 0x38, 0x00, 0x00, 0x00,
- 0x16, 0x00, 0x01, 0x00, 0x00, (byte) 0xff, (byte) 0xff,
- (byte) 0xff, (byte) 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- (byte) 0xff, 0x00, (byte) 0xff, (byte) 0xff };
-
- OFMessageFactory factory = new BasicFactory();
- ByteBuffer packetBuf = ByteBuffer.wrap(packet);
- List<OFMessage> msgs = factory.parseMessages(packetBuf, packet.length);
- TestCase.assertEquals(1, msgs.size());
- TestCase.assertTrue(msgs.get(0) instanceof OFStatisticsRequest);
- OFStatisticsRequest sr = (OFStatisticsRequest) msgs.get(0);
- TestCase.assertEquals(OFStatisticsType.FLOW, sr.getStatisticType());
- TestCase.assertEquals(1, sr.getStatistics().size());
- TestCase.assertTrue(sr.getStatistics().get(0) instanceof OFFlowStatisticsRequest);
- }
-
- public void testOFStatisticsRequestVendor() throws Exception {
- byte[] packet = new byte[] { 0x01, 0x10, 0x00, 0x50, 0x00, 0x00, 0x00,
- 0x63, (byte) 0xff, (byte) 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x4c, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x20,
- (byte) 0xe0, 0x00, 0x11, 0x00, 0x0c, 0x29, (byte) 0xc5,
- (byte) 0x95, 0x57, 0x02, 0x25, 0x5c, (byte) 0xca, 0x00, 0x02,
- (byte) 0xff, (byte) 0xff, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x50, 0x04,
- 0x00, 0x00, 0x00, 0x00, (byte) 0xff, 0x00, 0x00, 0x00,
- (byte) 0xff, (byte) 0xff, 0x4e, 0x20 };
-
- OFMessageFactory factory = new BasicFactory();
- ByteBuffer packetBuf = ByteBuffer.wrap(packet);
- List<OFMessage> msgs = factory.parseMessages(packetBuf, packet.length);
- TestCase.assertEquals(1, msgs.size());
- TestCase.assertTrue(msgs.get(0) instanceof OFStatisticsRequest);
- OFStatisticsRequest sr = (OFStatisticsRequest) msgs.get(0);
- TestCase.assertEquals(OFStatisticsType.VENDOR, sr.getStatisticType());
- TestCase.assertEquals(1, sr.getStatistics().size());
- TestCase.assertTrue(sr.getStatistics().get(0) instanceof OFVendorStatistics);
- TestCase.assertEquals(68, ((OFVendorStatistics)sr.getStatistics().get(0)).getLength());
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-
-import junit.framework.TestCase;
-
-import org.junit.Test;
-import org.openflow.protocol.statistics.OFStatisticsType;
-
-
-public class OFStatisticsTypeTest extends TestCase {
- @Test
- public void testMapping() throws Exception {
- TestCase.assertEquals(OFStatisticsType.DESC,
- OFStatisticsType.valueOf((short) 0, OFType.STATS_REQUEST));
- TestCase.assertEquals(OFStatisticsType.QUEUE,
- OFStatisticsType.valueOf((short) 5, OFType.STATS_REQUEST));
- TestCase.assertEquals(OFStatisticsType.VENDOR,
- OFStatisticsType.valueOf((short) 0xffff, OFType.STATS_REQUEST));
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-
-import junit.framework.TestCase;
-
-import org.junit.Test;
-
-
-public class OFTypeTest extends TestCase {
-
- public void testOFTypeCreate() throws Exception {
- OFType foo = OFType.HELLO;
- Class<? extends OFMessage> c = foo.toClass();
- TestCase.assertEquals(c, OFHello.class);
- }
-
- @Test
- public void testMapping() throws Exception {
- TestCase.assertEquals(OFType.HELLO, OFType.valueOf((byte) 0));
- TestCase.assertEquals(OFType.BARRIER_REPLY, OFType.valueOf((byte) 19));
- }
-}
+++ /dev/null
-package org.openflow.protocol;
-
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-import org.openflow.util.OFTestCase;
-
-public class OFVendorTest extends OFTestCase {
- public void testWriteRead() throws Exception {
- OFVendor msg = (OFVendor) messageFactory.getMessage(OFType.VENDOR);
- msg.setVendor(1);
- ByteBuffer bb = ByteBuffer.allocate(1024);
- bb.clear();
- msg.writeTo(bb);
- bb.flip();
- msg.readFrom(bb);
- TestCase.assertEquals(1, msg.getVendor());
- }
-}
+++ /dev/null
-package org.openflow.protocol.queue;
-
-
-import junit.framework.TestCase;
-
-import org.junit.Test;
-
-
-public class OFQueuePropertyTypeTest extends TestCase {
- @Test
- public void testMapping() throws Exception {
- TestCase.assertEquals(OFQueuePropertyType.NONE,
- OFQueuePropertyType.valueOf((short) 0));
- TestCase.assertEquals(OFQueuePropertyType.MIN_RATE,
- OFQueuePropertyType.valueOf((short) 1));
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import junit.framework.TestCase;
-
-/**
- * Does hexstring conversion work?
- *
- * @author Rob Sherwood (rob.sherwood@stanford.edu)
- *
- */
-
-public class HexStringTest extends TestCase {
-
- public void testMarshalling() throws Exception {
- String dpidStr = "00:00:00:23:20:2d:16:71";
- long dpid = HexString.toLong(dpidStr);
- String testStr = HexString.toHexString(dpid);
- TestCase.assertEquals(dpidStr, testStr);
- }
-
- public void testToStringBytes() {
- byte[] dpid = { 0, 0, 0, 0, 0, 0, 0, -1 };
- String valid = "00:00:00:00:00:00:00:ff";
- String testString = HexString.toHexString(dpid);
- TestCase.assertEquals(valid, testString);
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import org.openflow.protocol.factory.BasicFactory;
-import org.openflow.protocol.factory.OFMessageFactory;
-
-import junit.framework.TestCase;
-
-public class OFTestCase extends TestCase {
- public OFMessageFactory messageFactory;
-
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- messageFactory = new BasicFactory();
- }
-
- public void test() throws Exception {
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import junit.framework.TestCase;
-
-public class U16Test extends TestCase {
- /**
- * Tests that we correctly translate unsigned values in and out of a short
- * @throws Exception
- */
- public void test() throws Exception {
- int val = 0xffff;
- TestCase.assertEquals((short)-1, U16.t(val));
- TestCase.assertEquals((short)32767, U16.t(0x7fff));
- TestCase.assertEquals(val, U16.f((short)-1));
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import junit.framework.TestCase;
-
-public class U32Test extends TestCase {
- /**
- * Tests that we correctly translate unsigned values in and out of an int
- * @throws Exception
- */
- public void test() throws Exception {
- long val = 0xffffffffL;
- TestCase.assertEquals(-1, U32.t(val));
- TestCase.assertEquals(val, U32.f(-1));
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.math.BigInteger;
-
-import junit.framework.TestCase;
-
-public class U64Test extends TestCase {
- /**
- * Tests that we correctly translate unsigned values in and out of a long
- * @throws Exception
- */
- public void test() throws Exception {
- BigInteger val = new BigInteger("ffffffffffffffff", 16);
- TestCase.assertEquals(-1, U64.t(val));
- TestCase.assertEquals(val, U64.f(-1));
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import junit.framework.TestCase;
-
-public class U8Test extends TestCase {
- /**
- * Tests that we correctly translate unsigned values in and out of a byte
- * @throws Exception
- */
- public void test() throws Exception {
- short val = 0xff;
- TestCase.assertEquals(-1, U8.t(val));
- TestCase.assertEquals(val, U8.f((byte)-1));
- }
-}
+++ /dev/null
-package org.openflow.util;
-
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-
-import junit.framework.TestCase;
-
-public class UnsignedTest extends TestCase {
- public static String ULONG_MAX = "18446744073709551615";
-
- /**
- * Tests that we correctly extract an unsigned long into a BigInteger
- * @throws Exception
- */
- public void testGetUnsignedLong() throws Exception {
- ByteBuffer bb = ByteBuffer.allocate(8);
- bb.put((byte)0xff).put((byte)0xff).put((byte)0xff).put((byte)0xff);
- bb.put((byte)0xff).put((byte)0xff).put((byte)0xff).put((byte)0xff);
- bb.position(0);
- bb.limit(8);
- BigInteger bi = Unsigned.getUnsignedLong(bb);
- BigInteger uLongMax = new BigInteger(ULONG_MAX);
- for (int i = 0; i < uLongMax.bitCount(); ++i) {
- TestCase.assertTrue("Bit: " + i + " should be: " + uLongMax.testBit(i),
- uLongMax.testBit(i) == bi.testBit(i));
- }
- TestCase.assertEquals(ULONG_MAX, bi.toString());
-
- bb = ByteBuffer.allocate(10);
- bb.put((byte)0x00);
- bb.put((byte)0xff).put((byte)0xff).put((byte)0xff).put((byte)0xff);
- bb.put((byte)0xff).put((byte)0xff).put((byte)0xff).put((byte)0xff);
- bb.put((byte)0x00);
- bb.position(0);
- bb.limit(10);
- bi = Unsigned.getUnsignedLong(bb, 1);
- uLongMax = new BigInteger(ULONG_MAX);
- for (int i = 0; i < uLongMax.bitCount(); ++i) {
- TestCase.assertTrue("Bit: " + i + " should be: " + uLongMax.testBit(i),
- uLongMax.testBit(i) == bi.testBit(i));
- }
- TestCase.assertEquals(ULONG_MAX, bi.toString());
- }
-
- /**
- * Tests that we correctly put an unsigned long into a ByteBuffer
- * @throws Exception
- */
- public void testPutUnsignedLong() throws Exception {
- ByteBuffer bb = ByteBuffer.allocate(8);
- BigInteger uLongMax = new BigInteger(ULONG_MAX);
- Unsigned.putUnsignedLong(bb, uLongMax);
- for (int i = 0; i < 8; ++i) {
- TestCase.assertTrue("Byte: " + i + " should be 0xff, was: " + bb.get(i),
- (bb.get(i) & (short)0xff) == 0xff);
- }
-
- bb = ByteBuffer.allocate(10);
- Unsigned.putUnsignedLong(bb, uLongMax, 1);
- int offset = 1;
- for (int i = 0; i < 8; ++i) {
- TestCase.assertTrue("Byte: " + i + " should be 0xff, was: " +
- bb.get(offset+i), (bb.get(offset+i) & (short)0xff) == 0xff);
- }
- }
-}
+++ /dev/null
-See: http://tomcat.apache.org/tomcat-7.0-doc/config/filter.html#CORS_Filter
-And: http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
-This is done to allow a web page using javascript to be able to make calls
-to our REST APIs even though it does not originate in our domain.
-
-This bundle just rolls up org.apache.catalina.filters.CorsFilter and adds it as a
-fragment to the org.apache.catalina bundle.
-
-The reason this is necessary is because the CorsFilter class was originally added
-at Tomcat 7.0.42, and we are using 7.0.32. As the CorsFilter class is a simple one,
-with very few dependencies, this seemed the best way to bring it in.
-
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <!-- Get some common settings for the project we are using it in -->
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <relativePath>../commons/thirdparty</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
- <modelVersion>4.0.0</modelVersion>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>org.apache.catalina.filters.CorsFilter</artifactId>
- <version>7.1.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.3.6</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Fragment-Host>
- org.apache.catalina
- </Fragment-Host>
- <Import-Package>
- javax.servlet,
- javax.servlet.http,
- org.apache.catalina.filters,
- org.apache.tomcat.util.res,
- org.apache.catalina.comet,
- org.apache.tomcat.util
- </Import-Package>
- <Export-Package>
- org.apache.juli.*
- </Export-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <dependencies>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>javax.servlet</artifactId>
- <version>3.0.0.v201112011016</version>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.juli.extras</artifactId>
- <version>7.0.32.v201211081135</version>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.tomcat.util</artifactId>
- <version>7.0.32.v201211201952</version>
- </dependency>
- <dependency>
- <groupId>orbit</groupId>
- <artifactId>org.apache.catalina</artifactId>
- <version>7.0.32.v201211201336</version>
- </dependency>
- </dependencies>
-</project>
+++ /dev/null
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.catalina.filters;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.catalina.filters.Constants;
-import org.apache.juli.logging.Log;
-import org.apache.juli.logging.LogFactory;
-import org.apache.tomcat.util.res.StringManager;
-
-/**
- * <p>
- * A {@link Filter} that enable client-side cross-origin requests by
- * implementing W3C's CORS (<b>C</b>ross-<b>O</b>rigin <b>R</b>esource
- * <b>S</b>haring) specification for resources. Each {@link HttpServletRequest}
- * request is inspected as per specification, and appropriate response headers
- * are added to {@link HttpServletResponse}.
- * </p>
- *
- * <p>
- * By default, it also sets following request attributes, that help to
- * determine the nature of the request downstream.
- * <ul>
- * <li><b>cors.isCorsRequest:</b> Flag to determine if the request is a CORS
- * request. Set to <code>true</code> if a CORS request; <code>false</code>
- * otherwise.</li>
- * <li><b>cors.request.origin:</b> The Origin URL, i.e. the URL of the page from
- * where the request is originated.</li>
- * <li>
- * <b>cors.request.type:</b> Type of request. Possible values:
- * <ul>
- * <li>SIMPLE: A request which is not preceded by a pre-flight request.</li>
- * <li>ACTUAL: A request which is preceded by a pre-flight request.</li>
- * <li>PRE_FLIGHT: A pre-flight request.</li>
- * <li>NOT_CORS: A normal same-origin request.</li>
- * <li>INVALID_CORS: A cross-origin request which is invalid.</li>
- * </ul>
- * </li>
- * <li><b>cors.request.headers:</b> Request headers sent as
- * 'Access-Control-Request-Headers' header, for pre-flight request.</li>
- * </ul>
- * </p>
- *
- * @see <a href="http://www.w3.org/TR/cors/">CORS specification</a>
- *
- */
-public final class CorsFilter implements Filter {
-
- private static final Log log = LogFactory.getLog(CorsFilter.class);
-
- private static final StringManager sm =
- StringManager.getManager(Constants.Package);
-
-
- /**
- * A {@link Collection} of origins consisting of zero or more origins that
- * are allowed access to the resource.
- */
- private final Collection<String> allowedOrigins;
-
- /**
- * Determines if any origin is allowed to make request.
- */
- private boolean anyOriginAllowed;
-
- /**
- * A {@link Collection} of methods consisting of zero or more methods that
- * are supported by the resource.
- */
- private final Collection<String> allowedHttpMethods;
-
- /**
- * A {@link Collection} of headers consisting of zero or more header field
- * names that are supported by the resource.
- */
- private final Collection<String> allowedHttpHeaders;
-
- /**
- * A {@link Collection} of exposed headers consisting of zero or more header
- * field names of headers other than the simple response headers that the
- * resource might use and can be exposed.
- */
- private final Collection<String> exposedHeaders;
-
- /**
- * A supports credentials flag that indicates whether the resource supports
- * user credentials in the request. It is true when the resource does and
- * false otherwise.
- */
- private boolean supportsCredentials;
-
- /**
- * Indicates (in seconds) how long the results of a pre-flight request can
- * be cached in a pre-flight result cache.
- */
- private long preflightMaxAge;
-
- /**
- * Determines if the request should be decorated or not.
- */
- private boolean decorateRequest;
-
-
- public CorsFilter() {
- this.allowedOrigins = new HashSet<String>();
- this.allowedHttpMethods = new HashSet<String>();
- this.allowedHttpHeaders = new HashSet<String>();
- this.exposedHeaders = new HashSet<String>();
- }
-
-
- @Override
- public void doFilter(final ServletRequest servletRequest,
- final ServletResponse servletResponse, final FilterChain filterChain)
- throws IOException, ServletException {
- if (!(servletRequest instanceof HttpServletRequest) ||
- !(servletResponse instanceof HttpServletResponse)) {
- throw new ServletException(sm.getString("corsFilter.onlyHttp"));
- }
-
- // Safe to downcast at this point.
- HttpServletRequest request = (HttpServletRequest) servletRequest;
- HttpServletResponse response = (HttpServletResponse) servletResponse;
-
- // Determines the CORS request type.
- CorsFilter.CORSRequestType requestType = checkRequestType(request);
-
- // Adds CORS specific attributes to request.
- if (decorateRequest) {
- CorsFilter.decorateCORSProperties(request, requestType);
- }
- switch (requestType) {
- case SIMPLE:
- // Handles a Simple CORS request.
- this.handleSimpleCORS(request, response, filterChain);
- break;
- case ACTUAL:
- // Handles an Actual CORS request.
- this.handleSimpleCORS(request, response, filterChain);
- break;
- case PRE_FLIGHT:
- // Handles a Pre-flight CORS request.
- this.handlePreflightCORS(request, response, filterChain);
- break;
- case NOT_CORS:
- // Handles a Normal request that is not a cross-origin request.
- this.handleNonCORS(request, response, filterChain);
- break;
- default:
- // Handles a CORS request that violates specification.
- this.handleInvalidCORS(request, response, filterChain);
- break;
- }
- }
-
-
- @Override
- public void init(final FilterConfig filterConfig) throws ServletException {
- // Initialize defaults
- parseAndStore(DEFAULT_ALLOWED_ORIGINS, DEFAULT_ALLOWED_HTTP_METHODS,
- DEFAULT_ALLOWED_HTTP_HEADERS, DEFAULT_EXPOSED_HEADERS,
- DEFAULT_SUPPORTS_CREDENTIALS, DEFAULT_PREFLIGHT_MAXAGE,
- DEFAULT_DECORATE_REQUEST);
-
- if (filterConfig != null) {
- String configAllowedOrigins = filterConfig
- .getInitParameter(PARAM_CORS_ALLOWED_ORIGINS);
- String configAllowedHttpMethods = filterConfig
- .getInitParameter(PARAM_CORS_ALLOWED_METHODS);
- String configAllowedHttpHeaders = filterConfig
- .getInitParameter(PARAM_CORS_ALLOWED_HEADERS);
- String configExposedHeaders = filterConfig
- .getInitParameter(PARAM_CORS_EXPOSED_HEADERS);
- String configSupportsCredentials = filterConfig
- .getInitParameter(PARAM_CORS_SUPPORT_CREDENTIALS);
- String configPreflightMaxAge = filterConfig
- .getInitParameter(PARAM_CORS_PREFLIGHT_MAXAGE);
- String configDecorateRequest = filterConfig
- .getInitParameter(PARAM_CORS_REQUEST_DECORATE);
-
- parseAndStore(configAllowedOrigins, configAllowedHttpMethods,
- configAllowedHttpHeaders, configExposedHeaders,
- configSupportsCredentials, configPreflightMaxAge,
- configDecorateRequest);
- }
- }
-
-
- /**
- * Handles a CORS request of type {@link CORSRequestType}.SIMPLE.
- *
- * @param request
- * The {@link HttpServletRequest} object.
- * @param response
- * The {@link HttpServletResponse} object.
- * @param filterChain
- * The {@link FilterChain} object.
- * @throws IOException
- * @throws ServletException
- * @see <a href="http://www.w3.org/TR/cors/#resource-requests">Simple
- * Cross-Origin Request, Actual Request, and Redirects</a>
- */
- protected void handleSimpleCORS(final HttpServletRequest request,
- final HttpServletResponse response, final FilterChain filterChain)
- throws IOException, ServletException {
-
- CorsFilter.CORSRequestType requestType = checkRequestType(request);
- if (!(requestType == CorsFilter.CORSRequestType.SIMPLE ||
- requestType == CorsFilter.CORSRequestType.ACTUAL)) {
- throw new IllegalArgumentException(
- sm.getString("corsFilter.wrongType2",
- CorsFilter.CORSRequestType.SIMPLE,
- CorsFilter.CORSRequestType.ACTUAL));
- }
-
- final String origin = request
- .getHeader(CorsFilter.REQUEST_HEADER_ORIGIN);
- final String method = request.getMethod();
-
- // Section 6.1.2
- if (!isOriginAllowed(origin)) {
- handleInvalidCORS(request, response, filterChain);
- return;
- }
-
- if (!allowedHttpMethods.contains(method)) {
- handleInvalidCORS(request, response, filterChain);
- return;
- }
-
- // Section 6.1.3
- // Add a single Access-Control-Allow-Origin header.
- if (anyOriginAllowed && !supportsCredentials) {
- // If resource doesn't support credentials and if any origin is
- // allowed
- // to make CORS request, return header with '*'.
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
- "*");
- } else {
- // If the resource supports credentials add a single
- // Access-Control-Allow-Origin header, with the value of the Origin
- // header as value.
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
- origin);
- }
-
- // Section 6.1.3
- // If the resource supports credentials, add a single
- // Access-Control-Allow-Credentials header with the case-sensitive
- // string "true" as value.
- if (supportsCredentials) {
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_CREDENTIALS,
- "true");
- }
-
- // Section 6.1.4
- // If the list of exposed headers is not empty add one or more
- // Access-Control-Expose-Headers headers, with as values the header
- // field names given in the list of exposed headers.
- if ((exposedHeaders != null) && (exposedHeaders.size() > 0)) {
- String exposedHeadersString = join(exposedHeaders, ",");
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_EXPOSE_HEADERS,
- exposedHeadersString);
- }
-
- // Forward the request down the filter chain.
- filterChain.doFilter(request, response);
- }
-
-
- /**
- * Handles CORS pre-flight request.
- *
- * @param request
- * The {@link HttpServletRequest} object.
- * @param response
- * The {@link HttpServletResponse} object.
- * @param filterChain
- * The {@link FilterChain} object.
- * @throws IOException
- * @throws ServletException
- */
- protected void handlePreflightCORS(final HttpServletRequest request,
- final HttpServletResponse response, final FilterChain filterChain)
- throws IOException, ServletException {
-
- CORSRequestType requestType = checkRequestType(request);
- if (requestType != CORSRequestType.PRE_FLIGHT) {
- throw new IllegalArgumentException(
- sm.getString("corsFilter.wrongType1",
- CORSRequestType.PRE_FLIGHT.name().toLowerCase()));
- }
-
- final String origin = request
- .getHeader(CorsFilter.REQUEST_HEADER_ORIGIN);
-
- // Section 6.2.2
- if (!isOriginAllowed(origin)) {
- handleInvalidCORS(request, response, filterChain);
- return;
- }
-
- // Section 6.2.3
- String accessControlRequestMethod = request.getHeader(
- CorsFilter.REQUEST_HEADER_ACCESS_CONTROL_REQUEST_METHOD);
- if (accessControlRequestMethod == null ||
- !HTTP_METHODS.contains(accessControlRequestMethod.trim())) {
- handleInvalidCORS(request, response, filterChain);
- return;
- } else {
- accessControlRequestMethod = accessControlRequestMethod.trim();
- }
-
- // Section 6.2.4
- String accessControlRequestHeadersHeader = request.getHeader(
- CorsFilter.REQUEST_HEADER_ACCESS_CONTROL_REQUEST_HEADERS);
- List<String> accessControlRequestHeaders = new LinkedList<String>();
- if (accessControlRequestHeadersHeader != null &&
- !accessControlRequestHeadersHeader.trim().isEmpty()) {
- String[] headers = accessControlRequestHeadersHeader.trim().split(
- ",");
- for (String header : headers) {
- accessControlRequestHeaders.add(header.trim().toLowerCase());
- }
- }
-
- // Section 6.2.5
- if (!allowedHttpMethods.contains(accessControlRequestMethod)) {
- handleInvalidCORS(request, response, filterChain);
- return;
- }
-
- // Section 6.2.6
- if (!accessControlRequestHeaders.isEmpty()) {
- for (String header : accessControlRequestHeaders) {
- if (!allowedHttpHeaders.contains(header)) {
- handleInvalidCORS(request, response, filterChain);
- return;
- }
- }
- }
-
- // Section 6.2.7
- if (supportsCredentials) {
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
- origin);
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_CREDENTIALS,
- "true");
- } else {
- if (anyOriginAllowed) {
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
- "*");
- } else {
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN,
- origin);
- }
- }
-
- // Section 6.2.8
- if (preflightMaxAge > 0) {
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_MAX_AGE,
- String.valueOf(preflightMaxAge));
- }
-
- // Section 6.2.9
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_METHODS,
- accessControlRequestMethod);
-
- // Section 6.2.10
- if ((allowedHttpHeaders != null) && (!allowedHttpHeaders.isEmpty())) {
- response.addHeader(
- CorsFilter.RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_HEADERS,
- join(allowedHttpHeaders, ","));
- }
-
- // Do not forward the request down the filter chain.
- }
-
-
- /**
- * Handles a request, that's not a CORS request, but is a valid request i.e.
- * it is not a cross-origin request. This implementation, just forwards the
- * request down the filter chain.
- *
- * @param request
- * The {@link HttpServletRequest} object.
- * @param response
- * The {@link HttpServletResponse} object.
- * @param filterChain
- * The {@link FilterChain} object.
- * @throws IOException
- * @throws ServletException
- */
- private void handleNonCORS(final HttpServletRequest request,
- final HttpServletResponse response, final FilterChain filterChain)
- throws IOException, ServletException {
- // Let request pass.
- filterChain.doFilter(request, response);
- }
-
-
- /**
- * Handles a CORS request that violates specification.
- *
- * @param request
- * The {@link HttpServletRequest} object.
- * @param response
- * The {@link HttpServletResponse} object.
- * @param filterChain
- * The {@link FilterChain} object.
- */
- private void handleInvalidCORS(final HttpServletRequest request,
- final HttpServletResponse response, final FilterChain filterChain) {
- String origin = request.getHeader(CorsFilter.REQUEST_HEADER_ORIGIN);
- String method = request.getMethod();
- String accessControlRequestHeaders = request.getHeader(
- REQUEST_HEADER_ACCESS_CONTROL_REQUEST_HEADERS);
-
- response.setContentType("text/plain");
- response.setStatus(HttpServletResponse.SC_FORBIDDEN);
- response.resetBuffer();
-
- if (log.isDebugEnabled()) {
- // Debug so no need for i18n
- StringBuilder message =
- new StringBuilder("Invalid CORS request; Origin=");
- message.append(origin);
- message.append(";Method=");
- message.append(method);
- if (accessControlRequestHeaders != null) {
- message.append(";Access-Control-Request-Headers=");
- message.append(accessControlRequestHeaders);
- }
- log.debug(message.toString());
- }
- }
-
-
- @Override
- public void destroy() {
- // NOOP
- }
-
-
- /**
- * Decorates the {@link HttpServletRequest}, with CORS attributes.
- * <ul>
- * <li><b>cors.isCorsRequest:</b> Flag to determine if request is a CORS
- * request. Set to <code>true</code> if CORS request; <code>false</code>
- * otherwise.</li>
- * <li><b>cors.request.origin:</b> The Origin URL.</li>
- * <li><b>cors.request.type:</b> Type of request. Values:
- * <code>simple</code> or <code>preflight</code> or <code>not_cors</code> or
- * <code>invalid_cors</code></li>
- * <li><b>cors.request.headers:</b> Request headers sent as
- * 'Access-Control-Request-Headers' header, for pre-flight request.</li>
- * </ul>
- *
- * @param request
- * The {@link HttpServletRequest} object.
- * @param corsRequestType
- * The {@link CORSRequestType} object.
- */
- protected static void decorateCORSProperties(
- final HttpServletRequest request,
- final CORSRequestType corsRequestType) {
- if (request == null) {
- throw new IllegalArgumentException(
- sm.getString("corsFilter.nullRequest"));
- }
-
- if (corsRequestType == null) {
- throw new IllegalArgumentException(
- sm.getString("corsFilter.nullRequestType"));
- }
-
- switch (corsRequestType) {
- case SIMPLE:
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_IS_CORS_REQUEST,
- Boolean.TRUE);
- request.setAttribute(CorsFilter.HTTP_REQUEST_ATTRIBUTE_ORIGIN,
- request.getHeader(CorsFilter.REQUEST_HEADER_ORIGIN));
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_REQUEST_TYPE,
- corsRequestType.name().toLowerCase());
- break;
- case ACTUAL:
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_IS_CORS_REQUEST,
- Boolean.TRUE);
- request.setAttribute(CorsFilter.HTTP_REQUEST_ATTRIBUTE_ORIGIN,
- request.getHeader(CorsFilter.REQUEST_HEADER_ORIGIN));
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_REQUEST_TYPE,
- corsRequestType.name().toLowerCase());
- break;
- case PRE_FLIGHT:
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_IS_CORS_REQUEST,
- Boolean.TRUE);
- request.setAttribute(CorsFilter.HTTP_REQUEST_ATTRIBUTE_ORIGIN,
- request.getHeader(CorsFilter.REQUEST_HEADER_ORIGIN));
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_REQUEST_TYPE,
- corsRequestType.name().toLowerCase());
- String headers = request.getHeader(
- REQUEST_HEADER_ACCESS_CONTROL_REQUEST_HEADERS);
- if (headers == null) {
- headers = "";
- }
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_REQUEST_HEADERS, headers);
- break;
- case NOT_CORS:
- request.setAttribute(
- CorsFilter.HTTP_REQUEST_ATTRIBUTE_IS_CORS_REQUEST,
- Boolean.FALSE);
- break;
- default:
- // Don't set any attributes
- break;
- }
- }
-
-
- /**
- * Joins elements of {@link Set} into a string, where each element is
- * separated by the provided separator.
- *
- * @param elements
- * The {@link Set} containing elements to join together.
- * @param joinSeparator
- * The character to be used for separating elements.
- * @return The joined {@link String}; <code>null</code> if elements
- * {@link Set} is null.
- */
- protected static String join(final Collection<String> elements,
- final String joinSeparator) {
- String separator = ",";
- if (elements == null) {
- return null;
- }
- if (joinSeparator != null) {
- separator = joinSeparator;
- }
- StringBuilder buffer = new StringBuilder();
- boolean isFirst = true;
- for (String element : elements) {
- if (!isFirst) {
- buffer.append(separator);
- } else {
- isFirst = false;
- }
-
- if (element != null) {
- buffer.append(element);
- }
- }
-
- return buffer.toString();
- }
-
-
- /**
- * Determines the request type.
- *
- * @param request
- */
- protected CORSRequestType checkRequestType(final HttpServletRequest request) {
- CORSRequestType requestType = CORSRequestType.INVALID_CORS;
- if (request == null) {
- throw new IllegalArgumentException(
- sm.getString("corsFilter.nullRequest"));
- }
- String originHeader = request.getHeader(REQUEST_HEADER_ORIGIN);
- // Section 6.1.1 and Section 6.2.1
- if (originHeader != null) {
- if (originHeader.isEmpty()) {
- requestType = CORSRequestType.INVALID_CORS;
- } else if (!isValidOrigin(originHeader)) {
- requestType = CORSRequestType.INVALID_CORS;
- } else {
- String method = request.getMethod();
- if (method != null && HTTP_METHODS.contains(method)) {
- if ("OPTIONS".equals(method)) {
- String accessControlRequestMethodHeader =
- request.getHeader(
- REQUEST_HEADER_ACCESS_CONTROL_REQUEST_METHOD);
- if (accessControlRequestMethodHeader != null &&
- !accessControlRequestMethodHeader.isEmpty()) {
- requestType = CORSRequestType.PRE_FLIGHT;
- } else if (accessControlRequestMethodHeader != null &&
- accessControlRequestMethodHeader.isEmpty()) {
- requestType = CORSRequestType.INVALID_CORS;
- } else {
- requestType = CORSRequestType.ACTUAL;
- }
- } else if ("GET".equals(method) || "HEAD".equals(method)) {
- requestType = CORSRequestType.SIMPLE;
- } else if ("POST".equals(method)) {
- String contentType = request.getContentType();
- if (contentType != null) {
- contentType = contentType.toLowerCase().trim();
- if (SIMPLE_HTTP_REQUEST_CONTENT_TYPE_VALUES
- .contains(contentType)) {
- requestType = CORSRequestType.SIMPLE;
- } else {
- requestType = CORSRequestType.ACTUAL;
- }
- }
- } else if (COMPLEX_HTTP_METHODS.contains(method)) {
- requestType = CORSRequestType.ACTUAL;
- }
- }
- }
- } else {
- requestType = CORSRequestType.NOT_CORS;
- }
-
- return requestType;
- }
-
-
- /**
- * Checks if the Origin is allowed to make a CORS request.
- *
- * @param origin
- * The Origin.
- * @return <code>true</code> if origin is allowed; <code>false</code>
- * otherwise.
- */
- private boolean isOriginAllowed(final String origin) {
- if (anyOriginAllowed) {
- return true;
- }
-
- // If 'Origin' header is a case-sensitive match of any of allowed
- // origins, then return true, else return false.
- return allowedOrigins.contains(origin);
- }
-
-
- /**
- * Parses each param-value and populates configuration variables. If a param
- * is provided, it overrides the default.
- *
- * @param allowedOrigins
- * A {@link String} of comma separated origins.
- * @param allowedHttpMethods
- * A {@link String} of comma separated HTTP methods.
- * @param allowedHttpHeaders
- * A {@link String} of comma separated HTTP headers.
- * @param exposedHeaders
- * A {@link String} of comma separated headers that needs to be
- * exposed.
- * @param supportsCredentials
- * "true" if support credentials needs to be enabled.
- * @param preflightMaxAge
- * The amount of seconds the user agent is allowed to cache the
- * result of the pre-flight request.
- * @throws ServletException
- */
- private void parseAndStore(final String allowedOrigins,
- final String allowedHttpMethods, final String allowedHttpHeaders,
- final String exposedHeaders, final String supportsCredentials,
- final String preflightMaxAge, final String decorateRequest)
- throws ServletException {
- if (allowedOrigins != null) {
- if (allowedOrigins.trim().equals("*")) {
- this.anyOriginAllowed = true;
- } else {
- this.anyOriginAllowed = false;
- Set<String> setAllowedOrigins =
- parseStringToSet(allowedOrigins);
- this.allowedOrigins.clear();
- this.allowedOrigins.addAll(setAllowedOrigins);
- }
- }
-
- if (allowedHttpMethods != null) {
- Set<String> setAllowedHttpMethods =
- parseStringToSet(allowedHttpMethods);
- this.allowedHttpMethods.clear();
- this.allowedHttpMethods.addAll(setAllowedHttpMethods);
- }
-
- if (allowedHttpHeaders != null) {
- Set<String> setAllowedHttpHeaders =
- parseStringToSet(allowedHttpHeaders);
- Set<String> lowerCaseHeaders = new HashSet<String>();
- for (String header : setAllowedHttpHeaders) {
- String lowerCase = header.toLowerCase();
- lowerCaseHeaders.add(lowerCase);
- }
- this.allowedHttpHeaders.clear();
- this.allowedHttpHeaders.addAll(lowerCaseHeaders);
- }
-
- if (exposedHeaders != null) {
- Set<String> setExposedHeaders = parseStringToSet(exposedHeaders);
- this.exposedHeaders.clear();
- this.exposedHeaders.addAll(setExposedHeaders);
- }
-
- if (supportsCredentials != null) {
- // For any value other then 'true' this will be false.
- this.supportsCredentials = Boolean
- .parseBoolean(supportsCredentials);
- }
-
- if (preflightMaxAge != null) {
- try {
- if (!preflightMaxAge.isEmpty()) {
- this.preflightMaxAge = Long.parseLong(preflightMaxAge);
- } else {
- this.preflightMaxAge = 0L;
- }
- } catch (NumberFormatException e) {
- throw new ServletException(
- sm.getString("corsFilter.invalidPreflightMaxAge"), e);
- }
- }
-
- if (decorateRequest != null) {
- // For any value other then 'true' this will be false.
- this.decorateRequest = Boolean.parseBoolean(decorateRequest);
- }
- }
-
- /**
- * Takes a comma separated list and returns a Set<String>.
- *
- * @param data
- * A comma separated list of strings.
- * @return Set<String>
- */
- private Set<String> parseStringToSet(final String data) {
- String[] splits;
-
- if (data != null && data.length() > 0) {
- splits = data.split(",");
- } else {
- splits = new String[] {};
- }
-
- Set<String> set = new HashSet<String>();
- if (splits.length > 0) {
- for (String split : splits) {
- set.add(split.trim());
- }
- }
-
- return set;
- }
-
-
- /**
- * Checks if a given origin is valid or not. Criteria:
- * <ul>
- * <li>If an encoded character is present in origin, it's not valid.</li>
- * <li>Origin should be a valid {@link URI}</li>
- * </ul>
- *
- * @param origin
- * @see <a href="http://tools.ietf.org/html/rfc952">RFC952</a>
- */
- protected static boolean isValidOrigin(String origin) {
- // Checks for encoded characters. Helps prevent CRLF injection.
- if (origin.contains("%")) {
- return false;
- }
-
- URI originURI;
-
- try {
- originURI = new URI(origin);
- } catch (URISyntaxException e) {
- return false;
- }
- // If scheme for URI is null, return false. Return true otherwise.
- return originURI.getScheme() != null;
-
- }
-
-
- /**
- * Determines if any origin is allowed to make CORS request.
- *
- * @return <code>true</code> if it's enabled; false otherwise.
- */
- public boolean isAnyOriginAllowed() {
- return anyOriginAllowed;
- }
-
-
- /**
- * Returns a {@link Set} of headers that should be exposed by browser.
- */
- public Collection<String> getExposedHeaders() {
- return exposedHeaders;
- }
-
-
- /**
- * Determines is supports credentials is enabled.
- */
- public boolean isSupportsCredentials() {
- return supportsCredentials;
- }
-
-
- /**
- * Returns the preflight response cache time in seconds.
- *
- * @return Time to cache in seconds.
- */
- public long getPreflightMaxAge() {
- return preflightMaxAge;
- }
-
-
- /**
- * Returns the {@link Set} of allowed origins that are allowed to make
- * requests.
- *
- * @return {@link Set}
- */
- public Collection<String> getAllowedOrigins() {
- return allowedOrigins;
- }
-
-
- /**
- * Returns a {@link Set} of HTTP methods that are allowed to make requests.
- *
- * @return {@link Set}
- */
- public Collection<String> getAllowedHttpMethods() {
- return allowedHttpMethods;
- }
-
-
- /**
- * Returns a {@link Set} of headers support by resource.
- *
- * @return {@link Set}
- */
- public Collection<String> getAllowedHttpHeaders() {
- return allowedHttpHeaders;
- }
-
-
- // -------------------------------------------------- CORS Response Headers
- /**
- * The Access-Control-Allow-Origin header indicates whether a resource can
- * be shared based by returning the value of the Origin request header in
- * the response.
- */
- public static final String RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN =
- "Access-Control-Allow-Origin";
-
- /**
- * The Access-Control-Allow-Credentials header indicates whether the
- * response to request can be exposed when the omit credentials flag is
- * unset. When part of the response to a preflight request it indicates that
- * the actual request can include user credentials.
- */
- public static final String RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_CREDENTIALS =
- "Access-Control-Allow-Credentials";
-
- /**
- * The Access-Control-Expose-Headers header indicates which headers are safe
- * to expose to the API of a CORS API specification
- */
- public static final String RESPONSE_HEADER_ACCESS_CONTROL_EXPOSE_HEADERS =
- "Access-Control-Expose-Headers";
-
- /**
- * The Access-Control-Max-Age header indicates how long the results of a
- * preflight request can be cached in a preflight result cache.
- */
- public static final String RESPONSE_HEADER_ACCESS_CONTROL_MAX_AGE =
- "Access-Control-Max-Age";
-
- /**
- * The Access-Control-Allow-Methods header indicates, as part of the
- * response to a preflight request, which methods can be used during the
- * actual request.
- */
- public static final String RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_METHODS =
- "Access-Control-Allow-Methods";
-
- /**
- * The Access-Control-Allow-Headers header indicates, as part of the
- * response to a preflight request, which header field names can be used
- * during the actual request.
- */
- public static final String RESPONSE_HEADER_ACCESS_CONTROL_ALLOW_HEADERS =
- "Access-Control-Allow-Headers";
-
- // -------------------------------------------------- CORS Request Headers
- /**
- * The Origin header indicates where the cross-origin request or preflight
- * request originates from.
- */
- public static final String REQUEST_HEADER_ORIGIN = "Origin";
-
- /**
- * The Access-Control-Request-Method header indicates which method will be
- * used in the actual request as part of the preflight request.
- */
- public static final String REQUEST_HEADER_ACCESS_CONTROL_REQUEST_METHOD =
- "Access-Control-Request-Method";
-
- /**
- * The Access-Control-Request-Headers header indicates which headers will be
- * used in the actual request as part of the preflight request.
- */
- public static final String REQUEST_HEADER_ACCESS_CONTROL_REQUEST_HEADERS =
- "Access-Control-Request-Headers";
-
- // ----------------------------------------------------- Request attributes
- /**
- * The prefix to a CORS request attribute.
- */
- public static final String HTTP_REQUEST_ATTRIBUTE_PREFIX = "cors.";
-
- /**
- * Attribute that contains the origin of the request.
- */
- public static final String HTTP_REQUEST_ATTRIBUTE_ORIGIN =
- HTTP_REQUEST_ATTRIBUTE_PREFIX + "request.origin";
-
- /**
- * Boolean value, suggesting if the request is a CORS request or not.
- */
- public static final String HTTP_REQUEST_ATTRIBUTE_IS_CORS_REQUEST =
- HTTP_REQUEST_ATTRIBUTE_PREFIX + "isCorsRequest";
-
- /**
- * Type of CORS request, of type {@link CORSRequestType}.
- */
- public static final String HTTP_REQUEST_ATTRIBUTE_REQUEST_TYPE =
- HTTP_REQUEST_ATTRIBUTE_PREFIX + "request.type";
-
- /**
- * Request headers sent as 'Access-Control-Request-Headers' header, for
- * pre-flight request.
- */
- public static final String HTTP_REQUEST_ATTRIBUTE_REQUEST_HEADERS =
- HTTP_REQUEST_ATTRIBUTE_PREFIX + "request.headers";
-
- // -------------------------------------------------------------- Constants
- /**
- * Enumerates varies types of CORS requests. Also, provides utility methods
- * to determine the request type.
- */
- protected static enum CORSRequestType {
- /**
- * A simple HTTP request, i.e. it shouldn't be pre-flighted.
- */
- SIMPLE,
- /**
- * A HTTP request that needs to be pre-flighted.
- */
- ACTUAL,
- /**
- * A pre-flight CORS request, to get meta information, before a
- * non-simple HTTP request is sent.
- */
- PRE_FLIGHT,
- /**
- * Not a CORS request, but a normal request.
- */
- NOT_CORS,
- /**
- * An invalid CORS request, i.e. it qualifies to be a CORS request, but
- * fails to be a valid one.
- */
- INVALID_CORS
- }
-
- /**
- * {@link Collection} of HTTP methods. Case sensitive.
- *
- * @see <a href="http://tools.ietf.org/html/rfc2616#section-5.1.1"
- * >http://tools.ietf.org/html/rfc2616#section-5.1.1</a>
- *
- */
- public static final Collection<String> HTTP_METHODS =
- new HashSet<String>(Arrays.asList("OPTIONS", "GET", "HEAD", "POST",
- "PUT", "DELETE", "TRACE", "CONNECT"));
- /**
- * {@link Collection} of non-simple HTTP methods. Case sensitive.
- */
- public static final Collection<String> COMPLEX_HTTP_METHODS =
- new HashSet<String>(Arrays.asList("PUT", "DELETE", "TRACE",
- "CONNECT"));
- /**
- * {@link Collection} of Simple HTTP methods. Case sensitive.
- *
- * @see <a href="http://www.w3.org/TR/cors/#terminology"
- * >http://www.w3.org/TR/cors/#terminology</a>
- */
- public static final Collection<String> SIMPLE_HTTP_METHODS =
- new HashSet<String>(Arrays.asList("GET", "POST", "HEAD"));
-
- /**
- * {@link Collection} of Simple HTTP request headers. Case in-sensitive.
- *
- * @see <a href="http://www.w3.org/TR/cors/#terminology"
- * >http://www.w3.org/TR/cors/#terminology</a>
- */
- public static final Collection<String> SIMPLE_HTTP_REQUEST_HEADERS =
- new HashSet<String>(Arrays.asList("Accept", "Accept-Language",
- "Content-Language"));
-
- /**
- * {@link Collection} of Simple HTTP request headers. Case in-sensitive.
- *
- * @see <a href="http://www.w3.org/TR/cors/#terminology"
- * >http://www.w3.org/TR/cors/#terminology</a>
- */
- public static final Collection<String> SIMPLE_HTTP_RESPONSE_HEADERS =
- new HashSet<String>(Arrays.asList("Cache-Control",
- "Content-Language", "Content-Type", "Expires",
- "Last-Modified", "Pragma"));
-
- /**
- * {@link Collection} of Simple HTTP request headers. Case in-sensitive.
- *
- * @see <a href="http://www.w3.org/TR/cors/#terminology"
- * >http://www.w3.org/TR/cors/#terminology</a>
- */
- public static final Collection<String> SIMPLE_HTTP_REQUEST_CONTENT_TYPE_VALUES =
- new HashSet<String>(Arrays.asList(
- "application/x-www-form-urlencoded",
- "multipart/form-data", "text/plain"));
-
- // ------------------------------------------------ Configuration Defaults
- /**
- * By default, all origins are allowed to make requests.
- */
- public static final String DEFAULT_ALLOWED_ORIGINS = "*";
-
- /**
- * By default, following methods are supported: GET, POST, HEAD and OPTIONS.
- */
- public static final String DEFAULT_ALLOWED_HTTP_METHODS =
- "GET,POST,HEAD,OPTIONS";
-
- /**
- * By default, time duration to cache pre-flight response is 30 mins.
- */
- public static final String DEFAULT_PREFLIGHT_MAXAGE = "1800";
-
- /**
- * By default, support credentials is turned on.
- */
- public static final String DEFAULT_SUPPORTS_CREDENTIALS = "true";
-
- /**
- * By default, following headers are supported:
- * Origin,Accept,X-Requested-With, Content-Type,
- * Access-Control-Request-Method, and Access-Control-Request-Headers.
- */
- public static final String DEFAULT_ALLOWED_HTTP_HEADERS =
- "Origin,Accept,X-Requested-With,Content-Type," +
- "Access-Control-Request-Method,Access-Control-Request-Headers";
-
- /**
- * By default, none of the headers are exposed in response.
- */
- public static final String DEFAULT_EXPOSED_HEADERS = "";
-
- /**
- * By default, request is decorated with CORS attributes.
- */
- public static final String DEFAULT_DECORATE_REQUEST = "true";
-
- // ----------------------------------------Filter Config Init param-name(s)
- /**
- * Key to retrieve allowed origins from {@link FilterConfig}.
- */
- public static final String PARAM_CORS_ALLOWED_ORIGINS =
- "cors.allowed.origins";
-
- /**
- * Key to retrieve support credentials from {@link FilterConfig}.
- */
- public static final String PARAM_CORS_SUPPORT_CREDENTIALS =
- "cors.support.credentials";
-
- /**
- * Key to retrieve exposed headers from {@link FilterConfig}.
- */
- public static final String PARAM_CORS_EXPOSED_HEADERS =
- "cors.exposed.headers";
-
- /**
- * Key to retrieve allowed headers from {@link FilterConfig}.
- */
- public static final String PARAM_CORS_ALLOWED_HEADERS =
- "cors.allowed.headers";
-
- /**
- * Key to retrieve allowed methods from {@link FilterConfig}.
- */
- public static final String PARAM_CORS_ALLOWED_METHODS =
- "cors.allowed.methods";
-
- /**
- * Key to retrieve preflight max age from {@link FilterConfig}.
- */
- public static final String PARAM_CORS_PREFLIGHT_MAXAGE =
- "cors.preflight.maxage";
-
- /**
- * Key to determine if request should be decorated.
- */
- public static final String PARAM_CORS_REQUEST_DECORATE =
- "cors.request.decorate";
-}