--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Necessary TODO: Put your copyright here.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+--><project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.2-SNAPSHOT</version>
+ <relativePath>../../opendaylight/commons/opendaylight</relativePath>
+ </parent>
+ <artifactId>features-akka</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <packaging>jar</packaging>
+ <properties>
+ <features.file>features.xml</features.file>
+ <!-- Optional TODO: Move these properties to your parent pom and possibly
+ DependencyManagement section of your parent pom -->
+ <branding.version>1.0.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.4.2-SNAPSHOT</karaf.resources.version>
+ <karaf.version>3.0.1</karaf.version>
+ <feature.test.version>0.6.2-SNAPSHOT</feature.test.version>
+ <karaf.empty.version>1.4.2-SNAPSHOT</karaf.empty.version>
+ <surefire.version>2.16</surefire.version>
+ </properties>
+ <dependencies>
+ <!--
+ Necessary TODO: Put dependencies on any feature repos
+ you use in your features.xml file.
+
+ Note: they will need to be <type>xml</xml>
+ and <classifier>features</classifier>.
+ One other thing to watch for is to make sure they are
+ <scope>compile</compile>, which they should be by default,
+ but be cautious lest they be at a different scope in a parent pom.
+
+ Examples:
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-yangtools</artifactId>
+ <version>0.6.2-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-mdsal</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>features-openflowplugin</artifactId>
+ <version>0.0.3-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ -->
+
+ <!--
+ Necessary TODO: Put dependencies for bundles directly referenced
+ in your features.xml file. For every <bundle> reference in your
+ features.xml file, you need a corresponding dependency here.
+
+ Examples:
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-provider</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-model</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ -->
+
+ <!--
+ Necessary TODO: Put dependencies for configfiles directly referenced
+ in your features.xml file. For every <configfile> reference in your
+ features.xml file, you need a corresponding dependency here.
+
+ Example (presuming here version is coming from the parent pom):
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-config</artifactId>
+ <version>${project.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+ -->
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ <version>${scala.version}.${scala.micro.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-reflect</artifactId>
+ <version>${scala.version}.${scala.micro.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe</groupId>
+ <artifactId>config</artifactId>
+ <version>${typesafe.config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-actor_${scala.version}</artifactId>
+ <version>${akka.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-slf4j_${scala.version}</artifactId>
+ <version>${akka.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-osgi_${scala.version}</artifactId>
+ <version>${akka.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.uncommons.maths</groupId>
+ <artifactId>uncommons-maths</artifactId>
+ <version>${uncommons.maths.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>jfree</groupId>
+ <artifactId>jcommon</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>jfree</groupId>
+ <artifactId>jfreechart</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>${protobuf.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty</artifactId>
+ <version>3.8.0.Final</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-remote_${scala.version}</artifactId>
+ <version>${akka.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-cluster_${scala.version}</artifactId>
+ <version>${akka.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.iq80.leveldb</groupId>
+ <artifactId>leveldb</artifactId>
+ <version>${leveldb.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.fusesource.leveldbjni</groupId>
+ <artifactId>leveldbjni-all</artifactId>
+ <version>${leveldbjni.version}</version>
+ </dependency>
+ <!--
+ Optional TODO: Remove TODO comments.
+ -->
+ <!-- test to validate features.xml -->
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-test</artifactId>
+ <version>${feature.test.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <!-- dependency for opendaylight-karaf-empty for use by testing -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>opendaylight-karaf-empty</artifactId>
+ <version>${karaf.empty.version}</version>
+ <type>zip</type>
+ </dependency>
+ <!-- Uncomment this if you get an error : java.lang.NoSuchMethodError: org.slf4j.helpers.MessageFormatter.format(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Object;)Lorg/slf4j/helpers/FormattingTuple;
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-simple</artifactId>
+ <version>1.7.2</version>
+ </dependency>
+ -->
+
+ </dependencies>
+ <build>
+ <resources>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>filter</id>
+ <phase>generate-resources</phase>
+ <goals>
+ <goal>resources</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <phase>package</phase>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/${features.file}</file>
+ <type>xml</type>
+ <classifier>features</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${surefire.version}</version>
+ <configuration>
+ <systemPropertyVariables>
+ <karaf.distro.groupId>org.opendaylight.controller</karaf.distro.groupId>
+ <karaf.distro.artifactId>opendaylight-karaf-empty</karaf.distro.artifactId>
+ <karaf.distro.version>${karaf.empty.version}</karaf.distro.version>
+ </systemPropertyVariables>
+ <dependenciesToScan>
+ <dependency>org.opendaylight.yangtools:features-test</dependency>
+ </dependenciesToScan>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=summary</url>
+ </scm>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Necessary TODO: Put your copyright statement here
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<features name="odl-controller-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+ <!--
+ Necessary TODO: Please read the features guidelines:
+ https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Feature_Best_Practices
+ -->
+ <!--
+ Necessary TODO: Add repo entries for the repositories of features you refer to
+ in this feature file but do not define here.
+ Examples:
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.6.2-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.1-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.0.3-SNAPSHOT/xml/features</repository>
+ -->
+ <feature name='odl-akka-all' version='${project.version}' description='OpenDaylight :: Akka :: All'>
+ <!--
+ Necessary TODO:
+ List all of the user consumable features you define in this feature file here.
+ Generally you would *not* list individual bundles here, but only features defined in *this* file.
+ It is useful to list them in the same order they occur in the file.
+
+ Examples:
+ <feature version='${project.version}'>odl-controller-provider</feature>
+ <feature version='${project.version}'>odl-controller-model</feature>
+ -->
+ <feature version="${scala.version}">odl-akka-scala</feature>
+ <feature version="${akka.version}">odl-akka-system</feature>
+ <feature version="${akka.version}">odl-akka-clustering</feature>
+ <feature version='0.7'>odl-akka-leveldb</feature>
+ <feature version="${akka.version}">odl-akka-persistence</feature>
+ </feature>
+ <!--
+ Necessary TODO: Define your features. It is useful to list then in order of dependency. So if A depends on B, list A first.
+ When naming your features please be mindful of the guidelines:
+ https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines
+ Particularly:
+ a) Prefixing names with 'odl-': https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Feature_Naming
+ b) Descriptions: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Description
+ c) Avoid start-levels: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Avoid_start-levels
+
+ It's also nice to list inside a feature, first the features it needs, then the bundles it needs, then the configfiles.
+ Examples:
+
+ * Basic MD-SAL Provider
+ <feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
+ <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-controller-model</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
+ ... whatever other bundles you need
+ </feature>
+
+ * Basic MD-SAL Model feature
+ <feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
+ <feature version='0.6.2-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.6.2-SNAPSHOT'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
+ ... whatever other bundles you need
+ </feature>
+
+ * Config Subsystem example - the config file is your config subsystem configuration
+ <feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
+ <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
+ <configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
+ ... whatever other bundles you need
+ </feature>
+
+ * Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
+ <feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
+ <feature version='0.0.3-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
+ ... whatever other bundles you need
+ </feature>
+
+ -->
+ <feature name="odl-akka-scala" description="Scala Runtime for OpenDaylight" version="${scala.version}">
+ <bundle>mvn:org.scala-lang/scala-library/${scala.version}.${scala.micro.version}</bundle>
+ <bundle>mvn:org.scala-lang/scala-reflect/${scala.version}.${scala.micro.version}</bundle>
+ </feature>
+ <feature name="odl-akka-system" description="Akka Actor Framework System Bundles" version="${akka.version}">
+ <feature version="${scala.version}">odl-akka-scala</feature>
+ <bundle>mvn:com.typesafe/config/${typesafe.config.version}</bundle>
+ <bundle>mvn:com.typesafe.akka/akka-actor_${scala.version}/${akka.version}</bundle>
+ <bundle>mvn:com.typesafe.akka/akka-slf4j_${scala.version}/${akka.version}</bundle>
+ <bundle>mvn:com.typesafe.akka/akka-osgi_${scala.version}/${akka.version}</bundle>
+ </feature>
+ <feature name="odl-akka-clustering" description="Akka Clustering" version="${akka.version}">
+ <feature version="${akka.version}">odl-akka-system</feature>
+ <bundle>wrap:mvn:org.uncommons.maths/uncommons-maths/${uncommons.maths.version}</bundle>
+ <bundle>mvn:com.google.protobuf/protobuf-java/${protobuf.version}</bundle>
+ <bundle>mvn:io.netty/netty/3.8.0.Final</bundle>
+ <bundle>mvn:com.typesafe.akka/akka-remote_${scala.version}/${akka.version}</bundle>
+ <bundle>mvn:com.typesafe.akka/akka-cluster_${scala.version}/${akka.version}</bundle>
+ </feature>
+ <feature name='odl-akka-leveldb' description='LevelDB' version='0.7'>
+ <bundle>wrap:mvn:org.iq80.leveldb/leveldb/${leveldb.version}</bundle>
+ <bundle>mvn:org.fusesource.leveldbjni/leveldbjni-all/${leveldbjni.version}</bundle>
+ </feature>
+ <feature name='odl-akka-persistence' description='Akka Persistence' version="${akka.version}">
+ <feature version='0.7'>odl-akka-leveldb</feature>
+ <feature version="${akka.version}">odl-akka-system</feature>
+ <bundle>mvn:com.typesafe.akka/akka-persistence-experimental_${scala.version}/${akka.version}</bundle>
+ <bundle>wrap:mvn:com.google.protobuf/protobuf-java/${protobuf.version}$overwrite=merge&DynamicImport-Package=org.opendaylight.controller.protobuff.messages.*;org.opendaylight.controller.cluster.raft.protobuff.client.messages.*</bundle>
+ </feature>
+ <!-- Optional TODO: Remove TODO Comments -->
+
+</features>
<classifier>features</classifier>
<type>xml</type>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-akka</artifactId>
+ <version>${commons.opendaylight.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-core-api</artifactId>
<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-distributed-datastore</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-remoterpc-connector</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-commons</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-akka-raft</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-clustering-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-netconf-connector</artifactId>
<repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-config-persister/${config.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-config-netty/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
<feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
<feature version='${project.version}'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-mdsal-netconf-connector</feature>
<feature version='${project.version}'>odl-restconf</feature>
<feature version='${project.version}'>odl-mdsal-xsql</feature>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
<feature version='${project.version}'>odl-toaster</feature>
</feature>
<feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
<bundle>mvn:org.opendaylight.controller/sal-netconf-connector/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${config.version}</bundle>
+ <configfile finalname='${config.configfile.directory}/${config.netconf.client.configfile}'>mvn:org.opendaylight.controller/netconf-config/${netconf.version}/xml/config</configfile>
+ </feature>
+ <feature name='odl-mdsal-netconf-connector-ssh' version='${project.version}' description="OpenDaylight :: MDSAL :: Netconf Connector + Netconf SSH Server + loopback connection configuration">
+ <feature version='${netconf.version}'>odl-netconf-ssh</feature>
+ <feature version='${project.version}'>odl-mdsal-netconf-connector</feature>
<configfile finalname="${config.configfile.directory}/${config.netconf.connector.configfile}">mvn:org.opendaylight.controller/netconf-connector-config/${netconf.version}/xml/config</configfile>
</feature>
<feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
<bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
<bundle>wrap:mvn:org.json/json/${org.json.version}</bundle>
</feature>
+ <feature name ='odl-mdsal-clustering-commons' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${akka.version}'>odl-akka-system</feature>
+ <feature version='${akka.version}'>odl-akka-persistence</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-clustering-commons/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-akka-raft/${project.version}</bundle>
+ <bundle>mvn:com.codahale.metrics/metrics-core/3.0.1</bundle>
+ </feature>
+ <feature name ='odl-mdsal-distributed-datastore' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
+ <feature version='${akka.version}'>odl-akka-clustering</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-distributed-datastore/${project.version}</bundle>
+ </feature>
+ <feature name ='odl-mdsal-remoterpc-connector' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
+ <feature version='${akka.version}'>odl-akka-clustering</feature>
+ <feature version='0.7'>odl-akka-leveldb</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-remoterpc-connector/${project.version}</bundle>
+ </feature>
+ <feature name ='odl-mdsal-clustering' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-remoterpc-connector</feature>
+ <feature version='${project.version}'>odl-mdsal-distributed-datastore</feature>
+ <configfile finalname="${config.configfile.directory}/${config.clustering.configfile}">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/config</configfile>
+ <configfile finalname="configuration/initial/akka.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf</configfile>
+ <configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
+ <configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
+ </feature>
</features>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-auth</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-tcp</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-ssh</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk15on</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>ietf-netconf-monitoring</artifactId>
<type>xml</type>
<classifier>config</classifier>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-connector-config</artifactId>
+ <version>${config.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-monitoring</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.aaa</groupId>
+ <artifactId>features-aaa</artifactId>
+ <version>${aaa.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
</dependencies>
<build>
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
<repository>mvn:org.opendaylight.controller/features-protocol-framework/${protocol-framework.version}/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository>
+
<feature name='odl-netconf-all' version='${project.version}' description="OpenDaylight :: Netconf :: All">
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
<feature version='${project.version}'>odl-netconf-impl</feature>
+ <feature version='${project.version}'>odl-netconf-tcp</feature>
+ <feature version='${project.version}'>odl-netconf-ssh</feature>
<feature version='${project.version}'>odl-config-netconf-connector</feature>
<feature version='${project.version}'>odl-netconf-netty-util</feature>
<feature version='${project.version}'>odl-netconf-client</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
<feature version='${project.version}'>odl-netconf-netty-util</feature>
+ <!-- Netconf server without config connector is just an empty shell -->
+ <feature version='${project.version}'>odl-config-netconf-connector</feature>
+ <!-- Netconf will not provide schemas without monitoring -->
+ <feature version='${project.version}'>odl-netconf-monitoring</feature>
<bundle>mvn:org.opendaylight.controller/netconf-impl/${project.version}</bundle>
</feature>
+ <feature name='odl-netconf-ssh' version='${project.version}' description="OpenDaylight :: Netconf :: SSSH">
+ <feature version='${project.version}'>odl-netconf-tcp</feature>
+ <feature version='${aaa.version}'>odl-aaa-authn-plugin</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-ssh/${project.version}</bundle>
+ <bundle>mvn:org.bouncycastle/bcpkix-jdk15on/${bouncycastle.version}</bundle>
+ <bundle>mvn:org.bouncycastle/bcprov-jdk15on/${bouncycastle.version}</bundle>
+ </feature>
+ <feature name='odl-netconf-tcp' version='${project.version}' description="OpenDaylight :: Netconf :: TCP">
+ <feature version='${project.version}'>odl-netconf-impl</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-tcp/${project.version}</bundle>
+ </feature>
<feature name='odl-config-netconf-connector' version='${project.version}' description="OpenDaylight :: Netconf :: Connector">
<feature version='${config.version}'>odl-config-manager</feature>
<feature version='${project.version}'>odl-netconf-api</feature>
<feature name='odl-netconf-client' version='${project.version}' description="OpenDaylight :: Netconf :: Client">
<feature version='${project.version}'>odl-netconf-netty-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-client/${project.version}</bundle>
- <configfile finalname='${config.configfile.directory}/${config.netconf.client.configfile}'>mvn:org.opendaylight.controller/netconf-config/${netconf.version}/xml/config</configfile>
</feature>
<feature name='odl-netconf-monitoring' version='${project.version}' description="OpenDaylight :: Netconf :: Monitoring">
<feature version='${project.version}'>odl-netconf-util</feature>
<module>netconf</module>
<module>protocol-framework</module>
<module>adsal-compatibility</module>
+ <module>akka</module>
</modules>
</project>
\ No newline at end of file
<concepts.version>0.5.2-SNAPSHOT</concepts.version>
<concurrentlinkedhashmap.version>1.4</concurrentlinkedhashmap.version>
<config.version>0.2.5-SNAPSHOT</config.version>
+ <aaa.version>0.1.0-SNAPSHOT</aaa.version>
<config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
+ <config.clustering.configfile>05-clustering.xml</config.clustering.configfile>
<config.netty.configfile>00-netty.xml</config.netty.configfile>
<config.mdsal.configfile>01-mdsal.xml</config.mdsal.configfile>
<config.xsql.configfile>04-xsql.xml</config.xsql.configfile>
<topologymanager.shell.version>1.0.0-SNAPSHOT</topologymanager.shell.version>
<troubleshoot.web.version>0.4.2-SNAPSHOT</troubleshoot.web.version>
<typesafe.config.version>1.2.0</typesafe.config.version>
- <uncommons.maths.version>1.2.2</uncommons.maths.version>
+ <uncommons.maths.version>1.2.2a</uncommons.maths.version>
<usermanager.implementation.version>0.4.2-SNAPSHOT</usermanager.implementation.version>
<usermanager.northbound.version>0.0.2-SNAPSHOT</usermanager.northbound.version>
<usermanager.version>0.4.2-SNAPSHOT</usermanager.version>
<artifactId>opendaylight-karaf-resources</artifactId>
<description>Resources for opendaylight-karaf</description>
<packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <version>2.6</version>
+ <executions>
+ <execution>
+ <id>copy</id>
+ <goals>
+ <goal>copy</goal>
+ </goals>
+ <!-- here the phase you need -->
+ <phase>generate-resources</phase>
+ <configuration>
+ <artifactItems>
+ <!-- Needs to be copied to lib/ext in order to start bouncy provider for mina sshd -->
+ <artifactItem>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <version>${bouncycastle.version}</version>
+ <outputDirectory>target/classes/lib/ext</outputDirectory>
+ <destFileName>bcprov-jdk15on-${bouncycastle.version}.jar</destFileName>
+ </artifactItem>
+ </artifactItems>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
</project>
netconf.ssh.address=0.0.0.0
netconf.ssh.port=1830
netconf.ssh.pk.path = ./configuration/RSA.pk
+# Set security provider to BouncyCastle
+org.apache.karaf.security.providers = org.bouncycastle.jce.provider.BouncyCastleProvider
netconf.config.persister.active=1
# default Openflow version = 1.0, we also support 1.3.
# ovsdb.of.version=1.3
+# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
+# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
+# specific arp/neighDiscovery lookup.
+# ovsdb.l3gateway.mac=00:00:5E:00:02:01
+
# TLS configuration
# To enable TLS, set secureChannelEnabled=true and specify the location of controller Java KeyStore and TrustStore files.
# The Java KeyStore contains controller's private key and certificate. The Java TrustStore contains the trusted certificate
<outputDirectory>target/assembly/lib</outputDirectory>
<destFileName>karaf.branding-${branding.version}.jar</destFileName>
</artifactItem>
+ <!-- Needs to be copied to lib/ext in order to start bouncy provider for mina sshd -->
+ <artifactItem>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <version>${bouncycastle.version}</version>
+ <outputDirectory>target/assembly/lib/ext</outputDirectory>
+ <destFileName>bcprov-jdk15on-${bouncycastle.version}.jar</destFileName>
+ </artifactItem>
</artifactItems>
</configuration>
</execution>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-restconf-broker</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-remoterpc-connector</artifactId>
- </dependency>
<dependency>
<artifactId>jeromq</artifactId>
<version>0.3.1</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-distributed-datastore</artifactId>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-config</artifactId>
# default Openflow version = 1.3, we also support 1.0.
ovsdb.of.version=1.3
+# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
+# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
+# specific arp/neighDiscovery lookup.
+# ovsdb.l3gateway.mac=00:00:5E:00:02:01
+
# TLS configuration
# To enable TLS, set secureChannelEnabled=true and specify the location of controller Java KeyStore and TrustStore files.
# The Java KeyStore contains controller's private key and certificate. The Java TrustStore contains the trusted certificate
<configuration>
<instructions>
<Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
- <Export-package></Export-package>
- <Private-Package></Private-Package>
- <Import-Package></Import-Package>
+ <Export-package>org.opendaylight.cluster.raft</Export-package>
+ <Import-Package>*</Import-Package>
</instructions>
</configuration>
</plugin>
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.base.Optional;
+import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
import org.opendaylight.controller.cluster.example.messages.PrintState;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.Map;
}
}
- @Override protected Object createSnapshot() {
- return state;
+ @Override protected void createSnapshot() {
+ ByteString bs = null;
+ try {
+ bs = fromObject(state);
+ } catch (Exception e) {
+ LOG.error("Exception in creating snapshot", e);
+ }
+ getSelf().tell(new CaptureSnapshotReply(bs), null);
}
- @Override protected void applySnapshot(Object snapshot) {
+ @Override protected void applySnapshot(ByteString snapshot) {
state.clear();
- state.putAll((HashMap) snapshot);
- LOG.debug("Snapshot applied to state :" + ((HashMap) snapshot).size());
+ try {
+ state.putAll((HashMap) toObject(snapshot));
+ } catch (Exception e) {
+ LOG.error("Exception in applying snapshot", e);
+ }
+ LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+ }
+
+ private ByteString fromObject(Object snapshot) throws Exception {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(snapshot);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ }
+
+ private Object toObject(ByteString bs) throws ClassNotFoundException, IOException {
+ Object obj = null;
+ ByteArrayInputStream bis = null;
+ ObjectInputStream ois = null;
+ try {
+ bis = new ByteArrayInputStream(bs.toByteArray());
+ ois = new ObjectInputStream(bis);
+ obj = ois.readObject();
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (ois != null) {
+ ois.close();
+ }
+ }
+ return obj;
}
@Override protected void onStateChanged() {
public long getSnapshotBatchCount() {
return 50;
}
+
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
}
td.printState();
} else if (command.startsWith("printNodes")) {
td.printNodes();
+ } else {
+ System.out.println("Invalid command:" + command);
}
}
*/
package org.opendaylight.controller.cluster.raft;
+import com.google.protobuf.ByteString;
+
import java.util.ArrayList;
import java.util.List;
*/
public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
- protected final List<ReplicatedLogEntry> journal;
- protected final Object snapshot;
+ protected List<ReplicatedLogEntry> journal;
+ protected ByteString snapshot;
protected long snapshotIndex = -1;
protected long snapshotTerm = -1;
- public AbstractReplicatedLogImpl(Object state, long snapshotIndex,
+ // to be used for rollback during save snapshot failure
+ protected List<ReplicatedLogEntry> snapshottedJournal;
+ protected ByteString previousSnapshot;
+ protected long previousSnapshotIndex = -1;
+ protected long previousSnapshotTerm = -1;
+
+ public AbstractReplicatedLogImpl(ByteString state, long snapshotIndex,
long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries) {
this.snapshot = state;
this.snapshotIndex = snapshotIndex;
@Override
public boolean isInSnapshot(long logEntryIndex) {
- return logEntryIndex <= snapshotIndex;
+ return logEntryIndex <= snapshotIndex && snapshotIndex != -1;
}
@Override
- public Object getSnapshot() {
+ public ByteString getSnapshot() {
return snapshot;
}
@Override
public abstract void removeFromAndPersist(long index);
+
+ @Override
+ public void setSnapshotIndex(long snapshotIndex) {
+ this.snapshotIndex = snapshotIndex;
+ }
+
+ @Override
+ public void setSnapshotTerm(long snapshotTerm) {
+ this.snapshotTerm = snapshotTerm;
+ }
+
+ @Override
+ public void setSnapshot(ByteString snapshot) {
+ this.snapshot = snapshot;
+ }
+
+ @Override
+ public void clear(int startIndex, int endIndex) {
+ journal.subList(startIndex, endIndex).clear();
+ }
+
+ @Override
+ public void snapshotPreCommit(ByteString snapshot, long snapshotCapturedIndex, long snapshotCapturedTerm) {
+ snapshottedJournal = new ArrayList<>(journal.size());
+
+ snapshottedJournal.addAll(journal.subList(0, (int)(snapshotCapturedIndex - snapshotIndex)));
+ clear(0, (int) (snapshotCapturedIndex - snapshotIndex));
+
+ previousSnapshotIndex = snapshotIndex;
+ setSnapshotIndex(snapshotCapturedIndex);
+
+ previousSnapshotTerm = snapshotTerm;
+ setSnapshotTerm(snapshotCapturedTerm);
+
+ previousSnapshot = getSnapshot();
+ setSnapshot(snapshot);
+ }
+
+ @Override
+ public void snapshotCommit() {
+ snapshottedJournal.clear();
+ snapshottedJournal = null;
+ previousSnapshotIndex = -1;
+ previousSnapshotTerm = -1;
+ previousSnapshot = null;
+ }
+
+ @Override
+ public void snapshotRollback() {
+ snapshottedJournal.addAll(journal);
+ journal.clear();
+ journal = snapshottedJournal;
+ snapshottedJournal = null;
+
+ snapshotIndex = previousSnapshotIndex;
+ previousSnapshotIndex = -1;
+
+ snapshotTerm = previousSnapshotTerm;
+ previousSnapshotTerm = -1;
+
+ snapshot = previousSnapshot;
+ previousSnapshot = null;
+
+ }
}
* @return int
*/
public int getElectionTimeVariance();
+
+ /**
+ * The size (in bytes) of the snapshot chunk sent from Leader
+ */
+ public int getSnapshotChunkSize();
}
*/
private static final int ELECTION_TIME_MAX_VARIANCE = 100;
+ private final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+
/**
* The interval at which a heart beat message will be sent to the remote
public int getElectionTimeVariance() {
return ELECTION_TIME_MAX_VARIANCE;
}
+
+ @Override
+ public int getSnapshotChunkSize() {
+ return SNAPSHOT_CHUNK_SIZE;
+ }
}
import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.UntypedPersistentActor;
+import com.google.common.base.Optional;
+import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
-import com.google.common.base.Optional;
+import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.behaviors.Candidate;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import java.io.Serializable;
-import java.util.List;
import java.util.Map;
/**
*/
private ReplicatedLogImpl replicatedLog = new ReplicatedLogImpl();
+ private CaptureSnapshot captureSnapshot = null;
+
+ private volatile boolean hasSnapshotCaptureInitiated = false;
public RaftActor(String id, Map<String, String> peerAddresses) {
this(id, peerAddresses, Optional.<ConfigParams>absent());
replicatedLog = new ReplicatedLogImpl(snapshot);
context.setReplicatedLog(replicatedLog);
+ context.setLastApplied(snapshot.getLastAppliedIndex());
LOG.debug("Applied snapshot to replicatedLog. " +
"snapshotIndex={}, snapshotTerm={}, journal-size={}",
replicatedLog.size());
// Apply the snapshot to the actors state
- applySnapshot(snapshot.getState());
+ applySnapshot(ByteString.copyFrom(snapshot.getState()));
} else if (message instanceof ReplicatedLogEntry) {
replicatedLog.append((ReplicatedLogEntry) message);
applyState.getReplicatedLogEntry().getData());
} else if(message instanceof ApplySnapshot ) {
- applySnapshot(((ApplySnapshot) message).getSnapshot());
+ Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
+
+ LOG.debug("ApplySnapshot called on Follower Actor " +
+ "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
+ snapshot.getLastAppliedTerm());
+ applySnapshot(ByteString.copyFrom(snapshot.getState()));
+
+ //clears the followers log, sets the snapshot index to ensure adjusted-index works
+ replicatedLog = new ReplicatedLogImpl(snapshot);
+ context.setReplicatedLog(replicatedLog);
+ context.setLastApplied(snapshot.getLastAppliedIndex());
} else if (message instanceof FindLeader) {
getSender().tell(
} else if (message instanceof SaveSnapshotSuccess) {
SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
+ LOG.info("SaveSnapshotSuccess received for snapshot");
+
+ context.getReplicatedLog().snapshotCommit();
// TODO: Not sure if we want to be this aggressive with trimming stuff
trimPersistentData(success.metadata().sequenceNr());
} else if (message instanceof SaveSnapshotFailure) {
+ SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
+
+ LOG.info("saveSnapshotFailure.metadata():{}", saveSnapshotFailure.metadata().toString());
+ LOG.error(saveSnapshotFailure.cause(), "SaveSnapshotFailure received for snapshot Cause:");
- // TODO: Handle failure in saving the snapshot
+ context.getReplicatedLog().snapshotRollback();
+
+ LOG.info("Replicated Log rollbacked. Snapshot will be attempted in the next cycle." +
+ "snapshotIndex:{}, snapshotTerm:{}, log-size:{}",
+ context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm(),
+ context.getReplicatedLog().size());
} else if (message instanceof AddRaftPeer){
RemoveRaftPeer rrp = (RemoveRaftPeer)message;
context.removePeer(rrp.getName());
+ } else if (message instanceof CaptureSnapshot) {
+ LOG.debug("CaptureSnapshot received by actor");
+ CaptureSnapshot cs = (CaptureSnapshot)message;
+ captureSnapshot = cs;
+ createSnapshot();
+
+ } else if (message instanceof CaptureSnapshotReply){
+ LOG.debug("CaptureSnapshotReply received by actor");
+ CaptureSnapshotReply csr = (CaptureSnapshotReply) message;
+
+ ByteString stateInBytes = csr.getSnapshot();
+ LOG.debug("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
+ handleCaptureSnapshotReply(stateInBytes);
+
} else {
+ if (!(message instanceof AppendEntriesMessages.AppendEntries)
+ && !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
+ LOG.debug("onReceiveCommand: message:" + message.getClass());
+ }
RaftState state =
currentBehavior.handleMessage(getSender(), message);
protected ActorSelection getLeader(){
String leaderAddress = getLeaderAddress();
+ if(leaderAddress == null){
+ return null;
+ }
+
return context.actorSelection(leaderAddress);
}
*
* @return The current state of the actor
*/
- protected abstract Object createSnapshot();
+ protected abstract void createSnapshot();
/**
* This method will be called by the RaftActor during recovery to
*
* @param snapshot A snapshot of the state of the actor
*/
- protected abstract void applySnapshot(Object snapshot);
+ protected abstract void applySnapshot(ByteString snapshot);
/**
* This method will be called by the RaftActor when the state of the
return peerAddress;
}
+ private void handleCaptureSnapshotReply(ByteString stateInBytes) {
+ // create a snapshot object from the state provided and save it
+ // when snapshot is saved async, SaveSnapshotSuccess is raised.
+
+ Snapshot sn = Snapshot.create(stateInBytes.toByteArray(),
+ context.getReplicatedLog().getFrom(captureSnapshot.getLastAppliedIndex() + 1),
+ captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
+ captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
+
+ saveSnapshot(sn);
+
+ LOG.info("Persisting of snapshot done:{}", sn.getLogMessage());
+
+ //be greedy and remove entries from in-mem journal which are in the snapshot
+ // and update snapshotIndex and snapshotTerm without waiting for the success,
+
+ context.getReplicatedLog().snapshotPreCommit(stateInBytes,
+ captureSnapshot.getLastAppliedIndex(),
+ captureSnapshot.getLastAppliedTerm());
+
+ LOG.info("Removed in-memory snapshotted entries, adjusted snaphsotIndex:{} " +
+ "and term:{}", captureSnapshot.getLastAppliedIndex(),
+ captureSnapshot.getLastAppliedTerm());
+
+ captureSnapshot = null;
+ hasSnapshotCaptureInitiated = false;
+ }
+
private class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
public ReplicatedLogImpl(Snapshot snapshot) {
- super(snapshot.getState(),
+ super(ByteString.copyFrom(snapshot.getState()),
snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
snapshot.getUnAppliedEntries());
}
persist(replicatedLogEntry,
new Procedure<ReplicatedLogEntry>() {
public void apply(ReplicatedLogEntry evt) throws Exception {
- // FIXME : Tentatively create a snapshot every hundred thousand entries. To be tuned.
- if (journal.size() > context.getConfigParams().getSnapshotBatchCount()) {
+ // when a snaphsot is being taken, captureSnapshot != null
+ if (hasSnapshotCaptureInitiated == false &&
+ journal.size() % context.getConfigParams().getSnapshotBatchCount() == 0) {
+
LOG.info("Initiating Snapshot Capture..");
long lastAppliedIndex = -1;
long lastAppliedTerm = -1;
LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
- // create a snapshot object from the state provided and save it
- // when snapshot is saved async, SaveSnapshotSuccess is raised.
- Snapshot sn = Snapshot.create(createSnapshot(),
- getFrom(context.getLastApplied() + 1),
- lastIndex(), lastTerm(), lastAppliedIndex,
- lastAppliedTerm);
- saveSnapshot(sn);
-
- LOG.info("Persisting of snapshot done:{}", sn.getLogMessage());
-
- //be greedy and remove entries from in-mem journal which are in the snapshot
- // and update snapshotIndex and snapshotTerm without waiting for the success,
- // TODO: damage-recovery to be done on failure
- journal.subList(0, (int) (lastAppliedIndex - snapshotIndex)).clear();
- snapshotIndex = lastAppliedIndex;
- snapshotTerm = lastAppliedTerm;
-
- LOG.info("Removed in-memory snapshotted entries, " +
- "adjusted snaphsotIndex:{}" +
- "and term:{}", snapshotIndex, lastAppliedTerm);
+ // send a CaptureSnapshot to self to make the expensive operation async.
+ getSelf().tell(new CaptureSnapshot(
+ lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm),
+ null);
+ hasSnapshotCaptureInitiated = true;
}
// Send message for replication
if (clientActor != null) {
}
- private static class Snapshot implements Serializable {
- private final Object state;
- private final List<ReplicatedLogEntry> unAppliedEntries;
- private final long lastIndex;
- private final long lastTerm;
- private final long lastAppliedIndex;
- private final long lastAppliedTerm;
-
- private Snapshot(Object state,
- List<ReplicatedLogEntry> unAppliedEntries, long lastIndex,
- long lastTerm, long lastAppliedIndex, long lastAppliedTerm) {
- this.state = state;
- this.unAppliedEntries = unAppliedEntries;
- this.lastIndex = lastIndex;
- this.lastTerm = lastTerm;
- this.lastAppliedIndex = lastAppliedIndex;
- this.lastAppliedTerm = lastAppliedTerm;
- }
-
-
- public static Snapshot create(Object state,
- List<ReplicatedLogEntry> entries, long lastIndex, long lastTerm,
- long lastAppliedIndex, long lastAppliedTerm) {
- return new Snapshot(state, entries, lastIndex, lastTerm,
- lastAppliedIndex, lastAppliedTerm);
- }
-
- public Object getState() {
- return state;
- }
-
- public List<ReplicatedLogEntry> getUnAppliedEntries() {
- return unAppliedEntries;
- }
-
- public long getLastTerm() {
- return lastTerm;
- }
-
- public long getLastAppliedIndex() {
- return lastAppliedIndex;
- }
-
- public long getLastAppliedTerm() {
- return lastAppliedTerm;
- }
-
- public String getLogMessage() {
- StringBuilder sb = new StringBuilder();
- return sb.append("Snapshot={")
- .append("lastTerm:" + this.getLastTerm() + ", ")
- .append("LastAppliedIndex:" + this.getLastAppliedIndex() + ", ")
- .append("LastAppliedTerm:" + this.getLastAppliedTerm() + ", ")
- .append("UnAppliedEntries size:" + this.getUnAppliedEntries().size() + "}")
- .toString();
-
- }
- }
-
private class ElectionTermImpl implements ElectionTerm {
/**
* Identifier of the actor whose election term information this is
package org.opendaylight.controller.cluster.raft;
+import com.google.protobuf.ByteString;
+
import java.util.List;
/**
*
* @return an object representing the snapshot if it exists. null otherwise
*/
- Object getSnapshot();
+ ByteString getSnapshot();
/**
* Get the index of the snapshot
* otherwise
*/
long getSnapshotTerm();
+
+ /**
+ * sets the snapshot index in the replicated log
+ * @param snapshotIndex
+ */
+ void setSnapshotIndex(long snapshotIndex);
+
+ /**
+ * sets snapshot term
+ * @param snapshotTerm
+ */
+ public void setSnapshotTerm(long snapshotTerm);
+
+ /**
+ * sets the snapshot in bytes
+ * @param snapshot
+ */
+ public void setSnapshot(ByteString snapshot);
+
+ /**
+ * Clears the journal entries with startIndex(inclusive) and endIndex (exclusive)
+ * @param startIndex
+ * @param endIndex
+ */
+ public void clear(int startIndex, int endIndex);
+
+ /**
+ * Handles all the bookkeeping in order to perform a rollback in the
+ * event of SaveSnapshotFailure
+ * @param snapshot
+ * @param snapshotCapturedIndex
+ * @param snapshotCapturedTerm
+ */
+ public void snapshotPreCommit(ByteString snapshot,
+ long snapshotCapturedIndex, long snapshotCapturedTerm);
+
+ /**
+ * Sets the Replicated log to state after snapshot success.
+ */
+ public void snapshotCommit();
+
+ /**
+ * Restores the replicated log to a state in the event of a save snapshot failure
+ */
+ public void snapshotRollback();
}
package org.opendaylight.controller.cluster.raft;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
public class SerializationUtils {
public static Object fromSerializable(Object serializable){
if(serializable.getClass().equals(AppendEntries.SERIALIZABLE_CLASS)){
return AppendEntries.fromSerializable(serializable);
+
+ } else if (serializable.getClass().equals(InstallSnapshot.SERIALIZABLE_CLASS)) {
+ return InstallSnapshot.fromSerializable(serializable);
}
return serializable;
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import java.io.Serializable;
+import java.util.List;
+
+
+public class Snapshot implements Serializable {
+ private final byte[] state;
+ private final List<ReplicatedLogEntry> unAppliedEntries;
+ private final long lastIndex;
+ private final long lastTerm;
+ private final long lastAppliedIndex;
+ private final long lastAppliedTerm;
+
+ private Snapshot(byte[] state,
+ List<ReplicatedLogEntry> unAppliedEntries, long lastIndex,
+ long lastTerm, long lastAppliedIndex, long lastAppliedTerm) {
+ this.state = state;
+ this.unAppliedEntries = unAppliedEntries;
+ this.lastIndex = lastIndex;
+ this.lastTerm = lastTerm;
+ this.lastAppliedIndex = lastAppliedIndex;
+ this.lastAppliedTerm = lastAppliedTerm;
+ }
+
+
+ public static Snapshot create(byte[] state,
+ List<ReplicatedLogEntry> entries, long lastIndex, long lastTerm,
+ long lastAppliedIndex, long lastAppliedTerm) {
+ return new Snapshot(state, entries, lastIndex, lastTerm,
+ lastAppliedIndex, lastAppliedTerm);
+ }
+
+ public byte[] getState() {
+ return state;
+ }
+
+ public List<ReplicatedLogEntry> getUnAppliedEntries() {
+ return unAppliedEntries;
+ }
+
+ public long getLastTerm() {
+ return lastTerm;
+ }
+
+ public long getLastAppliedIndex() {
+ return lastAppliedIndex;
+ }
+
+ public long getLastAppliedTerm() {
+ return lastAppliedTerm;
+ }
+
+ public long getLastIndex() {
+ return this.lastIndex;
+ }
+
+ public String getLogMessage() {
+ StringBuilder sb = new StringBuilder();
+ return sb.append("Snapshot={")
+ .append("lastTerm:" + this.getLastTerm() + ", ")
+ .append("lastIndex:" + this.getLastIndex() + ", ")
+ .append("LastAppliedIndex:" + this.getLastAppliedIndex() + ", ")
+ .append("LastAppliedTerm:" + this.getLastAppliedTerm() + ", ")
+ .append("UnAppliedEntries size:" + this.getUnAppliedEntries().size() + "}")
+ .toString();
+
+ }
+}
package org.opendaylight.controller.cluster.raft.base.messages;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+
import java.io.Serializable;
+/**
+ * Internal message, issued by follower to its actor
+ */
public class ApplySnapshot implements Serializable {
- private final Object snapshot;
+ private final Snapshot snapshot;
- public ApplySnapshot(Object snapshot) {
+ public ApplySnapshot(Snapshot snapshot) {
this.snapshot = snapshot;
}
- public Object getSnapshot() {
+ public Snapshot getSnapshot() {
return snapshot;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+public class CaptureSnapshot {
+ private long lastAppliedIndex;
+ private long lastAppliedTerm;
+ private long lastIndex;
+ private long lastTerm;
+
+ public CaptureSnapshot(long lastIndex, long lastTerm,
+ long lastAppliedIndex, long lastAppliedTerm) {
+ this.lastIndex = lastIndex;
+ this.lastTerm = lastTerm;
+ this.lastAppliedIndex = lastAppliedIndex;
+ this.lastAppliedTerm = lastAppliedTerm;
+ }
+
+ public long getLastAppliedIndex() {
+ return lastAppliedIndex;
+ }
+
+ public long getLastAppliedTerm() {
+ return lastAppliedTerm;
+ }
+
+ public long getLastIndex() {
+ return lastIndex;
+ }
+
+ public long getLastTerm() {
+ return lastTerm;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import com.google.protobuf.ByteString;
+
+public class CaptureSnapshotReply {
+ private ByteString snapshot;
+
+ public CaptureSnapshotReply(ByteString snapshot) {
+ this.snapshot = snapshot;
+ }
+
+ public ByteString getSnapshot() {
+ return snapshot;
+ }
+
+ public void setSnapshot(ByteString snapshot) {
+ this.snapshot = snapshot;
+ }
+}
* @param index a log index that is known to be committed
*/
protected void applyLogToStateMachine(final long index) {
+ long newLastApplied = context.getLastApplied();
// Now maybe we apply to the state machine
for (long i = context.getLastApplied() + 1;
i < index + 1; i++) {
if (replicatedLogEntry != null) {
actor().tell(new ApplyState(clientActor, identifier,
replicatedLogEntry), actor());
+ newLastApplied = i;
} else {
+ //if one index is not present in the log, no point in looping
+ // around as the rest wont be present either
context.getLogger().error(
- "Missing index " + i + " from log. Cannot apply state.");
+ "Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index );
+ break;
}
}
// Send a local message to the local RaftActor (it's derived class to be
// specific to apply the log to it's index)
- context.getLogger().debug("Setting last applied to {}", index);
- context.setLastApplied(index);
+ context.getLogger().debug("Setting last applied to {}", newLastApplied);
+ context.setLastApplied(newLastApplied);
}
protected Object fromSerializableMessage(Object serializable){
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
+import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.Snapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import java.util.ArrayList;
+
/**
* The behavior of a RaftActor in the Follower state
* <p/>
* </ul>
*/
public class Follower extends AbstractRaftActorBehavior {
+ private ByteString snapshotChunksCollected = ByteString.EMPTY;
+
public Follower(RaftActorContext context) {
super(context);
if (outOfSync) {
// We found that the log was out of sync so just send a negative
// reply and return
+ context.getLogger().debug("Follower is out-of-sync, " +
+ "so sending negative reply, lastIndex():{}, lastTerm():{}",
+ lastIndex(), lastTerm());
sender.tell(
new AppendEntriesReply(context.getId(), currentTerm(), false,
lastIndex(), lastTerm()), actor()
// If commitIndex > lastApplied: increment lastApplied, apply
// log[lastApplied] to state machine (§5.3)
- if (appendEntries.getLeaderCommit() > context.getLastApplied()) {
+ // check if there are any entries to be applied. last-applied can be equal to last-index
+ if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
+ context.getLastApplied() < lastIndex()) {
+ context.getLogger().debug("applyLogToStateMachine, " +
+ "appendEntries.getLeaderCommit():{}," +
+ "context.getLastApplied():{}, lastIndex():{}",
+ appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex());
applyLogToStateMachine(appendEntries.getLeaderCommit());
}
} else if (message instanceof InstallSnapshot) {
InstallSnapshot installSnapshot = (InstallSnapshot) message;
- actor().tell(new ApplySnapshot(installSnapshot.getData()), actor());
+ handleInstallSnapshot(sender, installSnapshot);
}
scheduleElection(electionDuration());
return super.handleMessage(sender, message);
}
+ private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
+ context.getLogger().debug("InstallSnapshot received by follower " +
+ "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
+ installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
+
+ try {
+ if (installSnapshot.getChunkIndex() == installSnapshot.getTotalChunks()) {
+ // this is the last chunk, create a snapshot object and apply
+
+ snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
+ context.getLogger().debug("Last chunk received: snapshotChunksCollected.size:{}",
+ snapshotChunksCollected.size());
+
+ Snapshot snapshot = Snapshot.create(snapshotChunksCollected.toByteArray(),
+ new ArrayList<ReplicatedLogEntry>(),
+ installSnapshot.getLastIncludedIndex(),
+ installSnapshot.getLastIncludedTerm(),
+ installSnapshot.getLastIncludedIndex(),
+ installSnapshot.getLastIncludedTerm());
+
+ actor().tell(new ApplySnapshot(snapshot), actor());
+
+ } else {
+ // we have more to go
+ snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
+ context.getLogger().debug("Chunk={},snapshotChunksCollected.size:{}",
+ installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+ }
+
+ sender.tell(new InstallSnapshotReply(
+ currentTerm(), context.getId(), installSnapshot.getChunkIndex(),
+ true), actor());
+
+ } catch (Exception e) {
+ context.getLogger().error("Exception in InstallSnapshot of follower", e);
+ //send reply with success as false. The chunk will be sent again on failure
+ sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
+ installSnapshot.getChunkIndex(), false), actor());
+ }
+ }
+
@Override public void close() throws Exception {
stopElection();
}
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import scala.concurrent.duration.FiniteDuration;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
public class Leader extends AbstractRaftActorBehavior {
- private final Map<String, FollowerLogInformation> followerToLog =
+ protected final Map<String, FollowerLogInformation> followerToLog =
new HashMap();
+ protected final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
private final Set<String> followers;
return super.handleMessage(sender, message);
}
- private void handleInstallSnapshotReply(InstallSnapshotReply message) {
- InstallSnapshotReply reply = message;
+ private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
String followerId = reply.getFollowerId();
- FollowerLogInformation followerLogInformation =
- followerToLog.get(followerId);
+ FollowerToSnapshot followerToSnapshot =
+ mapFollowerToSnapshot.get(followerId);
+
+ if (followerToSnapshot != null &&
+ followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
+
+ if (reply.isSuccess()) {
+ if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
+ //this was the last chunk reply
+ context.getLogger().debug("InstallSnapshotReply received, " +
+ "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
+ reply.getChunkIndex(), followerId,
+ context.getReplicatedLog().getSnapshotIndex() + 1);
+
+ FollowerLogInformation followerLogInformation =
+ followerToLog.get(followerId);
+ followerLogInformation.setMatchIndex(
+ context.getReplicatedLog().getSnapshotIndex());
+ followerLogInformation.setNextIndex(
+ context.getReplicatedLog().getSnapshotIndex() + 1);
+ mapFollowerToSnapshot.remove(followerId);
+ context.getLogger().debug("followerToLog.get(followerId).getNextIndex().get()=" +
+ followerToLog.get(followerId).getNextIndex().get());
+
+ } else {
+ followerToSnapshot.markSendStatus(true);
+ }
+ } else {
+ context.getLogger().info("InstallSnapshotReply received, " +
+ "sending snapshot chunk failed, Will retry, Chunk:{}",
+ reply.getChunkIndex());
+ followerToSnapshot.markSendStatus(false);
+ }
- followerLogInformation
- .setMatchIndex(context.getReplicatedLog().getSnapshotIndex());
- followerLogInformation
- .setNextIndex(context.getReplicatedLog().getSnapshotIndex() + 1);
+ } else {
+ context.getLogger().error("ERROR!!" +
+ "FollowerId in InstallSnapshotReply not known to Leader" +
+ " or Chunk Index in InstallSnapshotReply not matching {} != {}",
+ followerToSnapshot.getChunkIndex(), reply.getChunkIndex() );
+ }
}
private void replicate(Replicate replicate) {
private void sendAppendEntries() {
// Send an AppendEntries to all followers
for (String followerId : followers) {
- ActorSelection followerActor =
- context.getPeerActorSelection(followerId);
+ ActorSelection followerActor = context.getPeerActorSelection(followerId);
if (followerActor != null) {
- FollowerLogInformation followerLogInformation =
- followerToLog.get(followerId);
-
- long nextIndex = followerLogInformation.getNextIndex().get();
-
+ FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
+ long followerNextIndex = followerLogInformation.getNextIndex().get();
List<ReplicatedLogEntry> entries = Collections.emptyList();
- if (context.getReplicatedLog().isPresent(nextIndex)) {
- // FIXME : Sending one entry at a time
- entries =
- context.getReplicatedLog().getFrom(nextIndex, 1);
+ if (mapFollowerToSnapshot.get(followerId) != null) {
+ if (mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
+ sendSnapshotChunk(followerActor, followerId);
+ }
+
+ } else {
+
+ if (context.getReplicatedLog().isPresent(followerNextIndex)) {
+ // FIXME : Sending one entry at a time
+ entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
+
+ followerActor.tell(
+ new AppendEntries(currentTerm(), context.getId(),
+ prevLogIndex(followerNextIndex),
+ prevLogTerm(followerNextIndex), entries,
+ context.getCommitIndex()).toSerializable(),
+ actor()
+ );
+
+ } else {
+ // if the followers next index is not present in the leaders log, then snapshot should be sent
+ long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
+ long leaderLastIndex = context.getReplicatedLog().lastIndex();
+ if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) {
+ // if the follower is just not starting and leader's index
+ // is more than followers index
+ context.getLogger().debug("SendInstallSnapshot to follower:{}," +
+ "follower-nextIndex:{}, leader-snapshot-index:{}, " +
+ "leader-last-index:{}", followerId,
+ followerNextIndex, leaderSnapShotIndex, leaderLastIndex);
+
+ actor().tell(new SendInstallSnapshot(), actor());
+ } else {
+ followerActor.tell(
+ new AppendEntries(currentTerm(), context.getId(),
+ prevLogIndex(followerNextIndex),
+ prevLogTerm(followerNextIndex), entries,
+ context.getCommitIndex()).toSerializable(),
+ actor()
+ );
+ }
+ }
}
-
- followerActor.tell(
- new AppendEntries(currentTerm(), context.getId(),
- prevLogIndex(nextIndex),
- prevLogTerm(nextIndex), entries,
- context.getCommitIndex()).toSerializable(),
- actor()
- );
}
}
}
long nextIndex = followerLogInformation.getNextIndex().get();
- if (!context.getReplicatedLog().isPresent(nextIndex) && context
- .getReplicatedLog().isInSnapshot(nextIndex)) {
- followerActor.tell(
- new InstallSnapshot(currentTerm(), context.getId(),
- context.getReplicatedLog().getSnapshotIndex(),
- context.getReplicatedLog().getSnapshotTerm(),
- context.getReplicatedLog().getSnapshot()
- ),
- actor()
- );
+ if (!context.getReplicatedLog().isPresent(nextIndex) &&
+ context.getReplicatedLog().isInSnapshot(nextIndex)) {
+ sendSnapshotChunk(followerActor, followerId);
}
}
}
}
+ /**
+ * Sends a snapshot chunk to a given follower
+ * InstallSnapshot should qualify as a heartbeat too.
+ */
+ private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
+ try {
+ followerActor.tell(
+ new InstallSnapshot(currentTerm(), context.getId(),
+ context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm(),
+ getNextSnapshotChunk(followerId,
+ context.getReplicatedLog().getSnapshot()),
+ mapFollowerToSnapshot.get(followerId).incrementChunkIndex(),
+ mapFollowerToSnapshot.get(followerId).getTotalChunks()
+ ).toSerializable(),
+ actor()
+ );
+ context.getLogger().info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
+ followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
+ mapFollowerToSnapshot.get(followerId).getTotalChunks());
+ } catch (IOException e) {
+ context.getLogger().error("InstallSnapshot failed for Leader.", e);
+ }
+ }
+
+ /**
+ * Acccepts snaphot as ByteString, enters into map for future chunks
+ * creates and return a ByteString chunk
+ */
+ private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
+ FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
+ if (followerToSnapshot == null) {
+ followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
+ mapFollowerToSnapshot.put(followerId, followerToSnapshot);
+ }
+ ByteString nextChunk = followerToSnapshot.getNextChunk();
+ context.getLogger().debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+
+ return nextChunk;
+ }
+
private RaftState sendHeartBeat() {
if (followers.size() > 0) {
sendAppendEntries();
return context.getId();
}
+ /**
+ * Encapsulates the snapshot bytestring and handles the logic of sending
+ * snapshot chunks
+ */
+ protected class FollowerToSnapshot {
+ private ByteString snapshotBytes;
+ private int offset = 0;
+ // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
+ private int replyReceivedForOffset;
+ // if replyStatus is false, the previous chunk is attempted
+ private boolean replyStatus = false;
+ private int chunkIndex;
+ private int totalChunks;
+
+ public FollowerToSnapshot(ByteString snapshotBytes) {
+ this.snapshotBytes = snapshotBytes;
+ replyReceivedForOffset = -1;
+ chunkIndex = 1;
+ int size = snapshotBytes.size();
+ totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
+ ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
+ context.getLogger().debug("Snapshot {} bytes, total chunks to send:{}",
+ size, totalChunks);
+ }
+
+ public ByteString getSnapshotBytes() {
+ return snapshotBytes;
+ }
+
+ public int incrementOffset() {
+ if(replyStatus) {
+ // if prev chunk failed, we would want to sent the same chunk again
+ offset = offset + context.getConfigParams().getSnapshotChunkSize();
+ }
+ return offset;
+ }
+
+ public int incrementChunkIndex() {
+ if (replyStatus) {
+ // if prev chunk failed, we would want to sent the same chunk again
+ chunkIndex = chunkIndex + 1;
+ }
+ return chunkIndex;
+ }
+
+ public int getChunkIndex() {
+ return chunkIndex;
+ }
+
+ public int getTotalChunks() {
+ return totalChunks;
+ }
+
+ public boolean canSendNextChunk() {
+ // we only send a false if a chunk is sent but we have not received a reply yet
+ return replyReceivedForOffset == offset;
+ }
+
+ public boolean isLastChunk(int chunkIndex) {
+ return totalChunks == chunkIndex;
+ }
+
+ public void markSendStatus(boolean success) {
+ if (success) {
+ // if the chunk sent was successful
+ replyReceivedForOffset = offset;
+ replyStatus = true;
+ } else {
+ // if the chunk sent was failure
+ replyReceivedForOffset = offset;
+ replyStatus = false;
+ }
+ }
+
+ public ByteString getNextChunk() {
+ int snapshotLength = getSnapshotBytes().size();
+ int start = incrementOffset();
+ int size = context.getConfigParams().getSnapshotChunkSize();
+ if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
+ size = snapshotLength;
+ } else {
+ if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
+ size = snapshotLength - start;
+ }
+ }
+
+ context.getLogger().debug("length={}, offset={},size={}",
+ snapshotLength, start, size);
+ return getSnapshotBytes().substring(start, start + size);
+
+ }
+ }
+
}
package org.opendaylight.controller.cluster.raft.messages;
+import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
+
public class InstallSnapshot extends AbstractRaftRPC {
+ public static final Class SERIALIZABLE_CLASS = InstallSnapshotMessages.InstallSnapshot.class;
+
private final String leaderId;
private final long lastIncludedIndex;
private final long lastIncludedTerm;
- private final Object data;
+ private final ByteString data;
+ private final int chunkIndex;
+ private final int totalChunks;
- public InstallSnapshot(long term, String leaderId, long lastIncludedIndex, long lastIncludedTerm, Object data) {
+ public InstallSnapshot(long term, String leaderId, long lastIncludedIndex,
+ long lastIncludedTerm, ByteString data, int chunkIndex, int totalChunks) {
super(term);
this.leaderId = leaderId;
this.lastIncludedIndex = lastIncludedIndex;
this.lastIncludedTerm = lastIncludedTerm;
this.data = data;
+ this.chunkIndex = chunkIndex;
+ this.totalChunks = totalChunks;
}
public String getLeaderId() {
return lastIncludedTerm;
}
- public Object getData() {
+ public ByteString getData() {
return data;
}
+
+ public int getChunkIndex() {
+ return chunkIndex;
+ }
+
+ public int getTotalChunks() {
+ return totalChunks;
+ }
+
+ public <T extends Object> Object toSerializable(){
+ return InstallSnapshotMessages.InstallSnapshot.newBuilder()
+ .setLeaderId(this.getLeaderId())
+ .setChunkIndex(this.getChunkIndex())
+ .setData(this.getData())
+ .setLastIncludedIndex(this.getLastIncludedIndex())
+ .setLastIncludedTerm(this.getLastIncludedTerm())
+ .setTotalChunks(this.getTotalChunks()).build();
+
+ }
+
+ public static InstallSnapshot fromSerializable (Object o) {
+ InstallSnapshotMessages.InstallSnapshot from =
+ (InstallSnapshotMessages.InstallSnapshot) o;
+
+ InstallSnapshot installSnapshot = new InstallSnapshot(from.getTerm(),
+ from.getLeaderId(), from.getLastIncludedIndex(),
+ from.getLastIncludedTerm(), from.getData(),
+ from.getChunkIndex(), from.getTotalChunks());
+
+ return installSnapshot;
+ }
}
// The followerId - this will be used to figure out which follower is
// responding
private final String followerId;
+ private final int chunkIndex;
+ private boolean success;
- protected InstallSnapshotReply(long term, String followerId) {
+ public InstallSnapshotReply(long term, String followerId, int chunkIndex,
+ boolean success) {
super(term);
this.followerId = followerId;
+ this.chunkIndex = chunkIndex;
+ this.success = success;
}
public String getFollowerId() {
return followerId;
}
+
+ public int getChunkIndex() {
+ return chunkIndex;
+ }
+
+ public boolean isSuccess() {
+ return success;
+ }
}
--- /dev/null
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: InstallSnapshot.proto
+
+package org.opendaylight.controller.cluster.raft.protobuff.messages;
+
+public final class InstallSnapshotMessages {
+ private InstallSnapshotMessages() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface InstallSnapshotOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional int64 term = 1;
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ boolean hasTerm();
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ long getTerm();
+
+ // optional string leaderId = 2;
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ boolean hasLeaderId();
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ java.lang.String getLeaderId();
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ com.google.protobuf.ByteString
+ getLeaderIdBytes();
+
+ // optional int64 lastIncludedIndex = 3;
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ boolean hasLastIncludedIndex();
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ long getLastIncludedIndex();
+
+ // optional int64 lastIncludedTerm = 4;
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ boolean hasLastIncludedTerm();
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ long getLastIncludedTerm();
+
+ // optional bytes data = 5;
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ boolean hasData();
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ com.google.protobuf.ByteString getData();
+
+ // optional int32 chunkIndex = 6;
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ boolean hasChunkIndex();
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ int getChunkIndex();
+
+ // optional int32 totalChunks = 7;
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ boolean hasTotalChunks();
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ int getTotalChunks();
+ }
+ /**
+ * Protobuf type {@code org.opendaylight.controller.cluster.raft.InstallSnapshot}
+ */
+ public static final class InstallSnapshot extends
+ com.google.protobuf.GeneratedMessage
+ implements InstallSnapshotOrBuilder {
+ // Use InstallSnapshot.newBuilder() to construct.
+ private InstallSnapshot(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private InstallSnapshot(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final InstallSnapshot defaultInstance;
+ public static InstallSnapshot getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public InstallSnapshot getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private InstallSnapshot(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ term_ = input.readInt64();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ leaderId_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ lastIncludedIndex_ = input.readInt64();
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ lastIncludedTerm_ = input.readInt64();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ data_ = input.readBytes();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ chunkIndex_ = input.readInt32();
+ break;
+ }
+ case 56: {
+ bitField0_ |= 0x00000040;
+ totalChunks_ = input.readInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<InstallSnapshot> PARSER =
+ new com.google.protobuf.AbstractParser<InstallSnapshot>() {
+ public InstallSnapshot parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new InstallSnapshot(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<InstallSnapshot> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional int64 term = 1;
+ public static final int TERM_FIELD_NUMBER = 1;
+ private long term_;
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ public boolean hasTerm() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ public long getTerm() {
+ return term_;
+ }
+
+ // optional string leaderId = 2;
+ public static final int LEADERID_FIELD_NUMBER = 2;
+ private java.lang.Object leaderId_;
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public boolean hasLeaderId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public java.lang.String getLeaderId() {
+ java.lang.Object ref = leaderId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ leaderId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getLeaderIdBytes() {
+ java.lang.Object ref = leaderId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ leaderId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int64 lastIncludedIndex = 3;
+ public static final int LASTINCLUDEDINDEX_FIELD_NUMBER = 3;
+ private long lastIncludedIndex_;
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ public boolean hasLastIncludedIndex() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ public long getLastIncludedIndex() {
+ return lastIncludedIndex_;
+ }
+
+ // optional int64 lastIncludedTerm = 4;
+ public static final int LASTINCLUDEDTERM_FIELD_NUMBER = 4;
+ private long lastIncludedTerm_;
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ public boolean hasLastIncludedTerm() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ public long getLastIncludedTerm() {
+ return lastIncludedTerm_;
+ }
+
+ // optional bytes data = 5;
+ public static final int DATA_FIELD_NUMBER = 5;
+ private com.google.protobuf.ByteString data_;
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ public boolean hasData() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ public com.google.protobuf.ByteString getData() {
+ return data_;
+ }
+
+ // optional int32 chunkIndex = 6;
+ public static final int CHUNKINDEX_FIELD_NUMBER = 6;
+ private int chunkIndex_;
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ public boolean hasChunkIndex() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ public int getChunkIndex() {
+ return chunkIndex_;
+ }
+
+ // optional int32 totalChunks = 7;
+ public static final int TOTALCHUNKS_FIELD_NUMBER = 7;
+ private int totalChunks_;
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ public boolean hasTotalChunks() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ public int getTotalChunks() {
+ return totalChunks_;
+ }
+
+ private void initFields() {
+ term_ = 0L;
+ leaderId_ = "";
+ lastIncludedIndex_ = 0L;
+ lastIncludedTerm_ = 0L;
+ data_ = com.google.protobuf.ByteString.EMPTY;
+ chunkIndex_ = 0;
+ totalChunks_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt64(1, term_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getLeaderIdBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt64(3, lastIncludedIndex_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt64(4, lastIncludedTerm_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, data_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt32(6, chunkIndex_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt32(7, totalChunks_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(1, term_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getLeaderIdBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(3, lastIncludedIndex_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(4, lastIncludedTerm_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, data_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(6, chunkIndex_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(7, totalChunks_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.opendaylight.controller.cluster.raft.InstallSnapshot}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshotOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+ }
+
+ // Construct using org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ term_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ leaderId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ lastIncludedIndex_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ lastIncludedTerm_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ data_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ chunkIndex_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ totalChunks_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ }
+
+ public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
+ return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
+ }
+
+ public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot build() {
+ org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot buildPartial() {
+ org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.term_ = term_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.leaderId_ = leaderId_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.lastIncludedIndex_ = lastIncludedIndex_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.lastIncludedTerm_ = lastIncludedTerm_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.data_ = data_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.chunkIndex_ = chunkIndex_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.totalChunks_ = totalChunks_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) {
+ return mergeFrom((org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot other) {
+ if (other == org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
+ if (other.hasTerm()) {
+ setTerm(other.getTerm());
+ }
+ if (other.hasLeaderId()) {
+ bitField0_ |= 0x00000002;
+ leaderId_ = other.leaderId_;
+ onChanged();
+ }
+ if (other.hasLastIncludedIndex()) {
+ setLastIncludedIndex(other.getLastIncludedIndex());
+ }
+ if (other.hasLastIncludedTerm()) {
+ setLastIncludedTerm(other.getLastIncludedTerm());
+ }
+ if (other.hasData()) {
+ setData(other.getData());
+ }
+ if (other.hasChunkIndex()) {
+ setChunkIndex(other.getChunkIndex());
+ }
+ if (other.hasTotalChunks()) {
+ setTotalChunks(other.getTotalChunks());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional int64 term = 1;
+ private long term_ ;
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ public boolean hasTerm() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ public long getTerm() {
+ return term_;
+ }
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ public Builder setTerm(long value) {
+ bitField0_ |= 0x00000001;
+ term_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 term = 1;</code>
+ */
+ public Builder clearTerm() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ term_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional string leaderId = 2;
+ private java.lang.Object leaderId_ = "";
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public boolean hasLeaderId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public java.lang.String getLeaderId() {
+ java.lang.Object ref = leaderId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ leaderId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public com.google.protobuf.ByteString
+ getLeaderIdBytes() {
+ java.lang.Object ref = leaderId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ leaderId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public Builder setLeaderId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ leaderId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public Builder clearLeaderId() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ leaderId_ = getDefaultInstance().getLeaderId();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional string leaderId = 2;</code>
+ */
+ public Builder setLeaderIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ leaderId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 lastIncludedIndex = 3;
+ private long lastIncludedIndex_ ;
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ public boolean hasLastIncludedIndex() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ public long getLastIncludedIndex() {
+ return lastIncludedIndex_;
+ }
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ public Builder setLastIncludedIndex(long value) {
+ bitField0_ |= 0x00000004;
+ lastIncludedIndex_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 lastIncludedIndex = 3;</code>
+ */
+ public Builder clearLastIncludedIndex() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ lastIncludedIndex_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 lastIncludedTerm = 4;
+ private long lastIncludedTerm_ ;
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ public boolean hasLastIncludedTerm() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ public long getLastIncludedTerm() {
+ return lastIncludedTerm_;
+ }
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ public Builder setLastIncludedTerm(long value) {
+ bitField0_ |= 0x00000008;
+ lastIncludedTerm_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 lastIncludedTerm = 4;</code>
+ */
+ public Builder clearLastIncludedTerm() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ lastIncludedTerm_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional bytes data = 5;
+ private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ public boolean hasData() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ public com.google.protobuf.ByteString getData() {
+ return data_;
+ }
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ public Builder setData(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ data_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional bytes data = 5;</code>
+ */
+ public Builder clearData() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ data_ = getDefaultInstance().getData();
+ onChanged();
+ return this;
+ }
+
+ // optional int32 chunkIndex = 6;
+ private int chunkIndex_ ;
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ public boolean hasChunkIndex() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ public int getChunkIndex() {
+ return chunkIndex_;
+ }
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ public Builder setChunkIndex(int value) {
+ bitField0_ |= 0x00000020;
+ chunkIndex_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 chunkIndex = 6;</code>
+ */
+ public Builder clearChunkIndex() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ chunkIndex_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional int32 totalChunks = 7;
+ private int totalChunks_ ;
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ public boolean hasTotalChunks() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ public int getTotalChunks() {
+ return totalChunks_;
+ }
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ public Builder setTotalChunks(int value) {
+ bitField0_ |= 0x00000040;
+ totalChunks_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 totalChunks = 7;</code>
+ */
+ public Builder clearTotalChunks() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ totalChunks_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.cluster.raft.InstallSnapshot)
+ }
+
+ static {
+ defaultInstance = new InstallSnapshot(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.opendaylight.controller.cluster.raft.InstallSnapshot)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\025InstallSnapshot.proto\022(org.opendayligh" +
+ "t.controller.cluster.raft\"\235\001\n\017InstallSna" +
+ "pshot\022\014\n\004term\030\001 \001(\003\022\020\n\010leaderId\030\002 \001(\t\022\031\n" +
+ "\021lastIncludedIndex\030\003 \001(\003\022\030\n\020lastIncluded" +
+ "Term\030\004 \001(\003\022\014\n\004data\030\005 \001(\014\022\022\n\nchunkIndex\030\006" +
+ " \001(\005\022\023\n\013totalChunks\030\007 \001(\005BX\n;org.openday" +
+ "light.controller.cluster.raft.protobuff." +
+ "messagesB\027InstallSnapshotMessagesH\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor,
+ new java.lang.String[] { "Term", "LeaderId", "LastIncludedIndex", "LastIncludedTerm", "Data", "ChunkIndex", "TotalChunks", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
--- /dev/null
+package org.opendaylight.controller.cluster.raft;
+
+option java_package = "org.opendaylight.controller.cluster.raft.protobuff.messages";
+option java_outer_classname = "InstallSnapshotMessages";
+option optimize_for = SPEED;
+
+message InstallSnapshot {
+ optional int64 term = 1;
+ optional string leaderId = 2;
+ optional int64 lastIncludedIndex = 3;
+ optional int64 lastIncludedTerm = 4;
+ optional bytes data = 5;
+ optional int32 chunkIndex = 6;
+ optional int32 totalChunks = 7;
+}
import akka.actor.Props;
import akka.event.Logging;
import akka.event.LoggingAdapter;
+import com.google.common.base.Preconditions;
import com.google.protobuf.GeneratedMessage;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.test.MockPayloadMessages;
-import com.google.common.base.Preconditions;
import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
public class MockRaftActorContext implements RaftActorContext {
private final ElectionTerm electionTerm;
private ReplicatedLog replicatedLog;
private Map<String, String> peerAddresses = new HashMap();
+ private ConfigParams configParams;
public MockRaftActorContext(){
electionTerm = null;
}
};
+ configParams = new DefaultConfigParamsImpl();
+
initReplicatedLog();
}
@Override
public ConfigParams getConfigParams() {
- return new DefaultConfigParamsImpl();
+ return configParams;
}
- public static class SimpleReplicatedLog implements ReplicatedLog {
- private final List<ReplicatedLogEntry> log = new ArrayList<>();
-
- @Override public ReplicatedLogEntry get(long index) {
- if(index >= log.size() || index < 0){
- return null;
- }
- return log.get((int) index);
- }
-
- @Override public ReplicatedLogEntry last() {
- if(log.size() == 0){
- return null;
- }
- return log.get(log.size()-1);
- }
-
- @Override public long lastIndex() {
- if(log.size() == 0){
- return -1;
- }
-
- return last().getIndex();
- }
-
- @Override public long lastTerm() {
- if(log.size() == 0){
- return -1;
- }
-
- return last().getTerm();
- }
-
- @Override public void removeFrom(long index) {
- if(index >= log.size() || index < 0){
- return;
- }
-
- log.subList((int) index, log.size()).clear();
- //log.remove((int) index);
- }
-
- @Override public void removeFromAndPersist(long index) {
- removeFrom(index);
- }
-
- @Override public void append(ReplicatedLogEntry replicatedLogEntry) {
- log.add(replicatedLogEntry);
- }
+ public void setConfigParams(ConfigParams configParams) {
+ this.configParams = configParams;
+ }
+ public static class SimpleReplicatedLog extends AbstractReplicatedLogImpl {
@Override public void appendAndPersist(
ReplicatedLogEntry replicatedLogEntry) {
append(replicatedLogEntry);
}
- @Override public List<ReplicatedLogEntry> getFrom(long index) {
- if(index >= log.size() || index < 0){
- return Collections.EMPTY_LIST;
- }
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- for(int i=(int) index ; i < log.size() ; i++) {
- entries.add(get(i));
- }
- return entries;
- }
-
- @Override public List<ReplicatedLogEntry> getFrom(long index, int max) {
- if(index >= log.size() || index < 0){
- return Collections.EMPTY_LIST;
- }
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- int maxIndex = (int) index + max;
- if(maxIndex > log.size()){
- maxIndex = log.size();
- }
-
- for(int i=(int) index ; i < maxIndex ; i++) {
- entries.add(get(i));
- }
- return entries;
-
- }
-
- @Override public long size() {
- return log.size();
- }
-
- @Override public boolean isPresent(long index) {
- if(index >= log.size() || index < 0){
- return false;
- }
-
- return true;
- }
-
- @Override public boolean isInSnapshot(long index) {
- return false;
- }
-
- @Override public Object getSnapshot() {
- return null;
- }
-
- @Override public long getSnapshotIndex() {
- return -1;
- }
-
- @Override public long getSnapshotTerm() {
- return -1;
+ @Override public void removeFromAndPersist(long index) {
+ removeFrom(index);
}
}
import akka.event.Logging;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
+import com.google.protobuf.ByteString;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
Object data) {
}
- @Override protected Object createSnapshot() {
+ @Override protected void createSnapshot() {
throw new UnsupportedOperationException("createSnapshot");
}
- @Override protected void applySnapshot(Object snapshot) {
+ @Override protected void applySnapshot(ByteString snapshot) {
throw new UnsupportedOperationException("applySnapshot");
}
createActorContext();
context.setLastApplied(100);
- setLastLogEntry((MockRaftActorContext) context, 0, 0, new MockRaftActorContext.MockPayload(""));
+ setLastLogEntry((MockRaftActorContext) context, 1, 100, new MockRaftActorContext.MockPayload(""));
+ ((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
List<ReplicatedLogEntry> entries =
Arrays.asList(
- (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(100, 101,
+ (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
new MockRaftActorContext.MockPayload("foo"))
);
// The new commitIndex is 101
AppendEntries appendEntries =
- new AppendEntries(100, "leader-1", 0, 0, entries, 101);
+ new AppendEntries(2, "leader-1", 100, 1, entries, 101);
RaftState raftState =
createBehavior(context).handleMessage(getRef(), appendEntries);
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
-import junit.framework.Assert;
+import com.google.protobuf.ByteString;
+import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
+import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
+import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
+import org.opendaylight.controller.cluster.raft.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
+import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
public class LeaderTest extends AbstractRaftActorBehaviorTest {
assertEquals("match", out);
}
-
-
};
}};
}
assertEquals("match", out);
}
+ };
+ }};
+ }
+
+ @Test
+ public void testSendInstallSnapshot() {
+ new LeaderTestKit(getSystem()) {{
+
+ new Within(duration("1 seconds")) {
+ protected void run() {
+ ActorRef followerActor = getTestActor();
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext(getRef());
+ actorContext.setPeerAddresses(peerAddresses);
+
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshot(
+ toByteString(leadersSnapshot));
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+
+ MockLeader leader = new MockLeader(actorContext);
+ // set the follower info in leader
+ leader.addToFollowerToLog(followerActor.path().toString(), followersLastIndex, -1);
+
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
+
+ // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
+ RaftState raftState = leader.handleMessage(
+ senderActor, new Replicate(null, "state-id", entry));
+
+ assertEquals(RaftState.Leader, raftState);
+
+ // we might receive some heartbeat messages, so wait till we SendInstallSnapshot
+ Boolean[] matches = new ReceiveWhile<Boolean>(Boolean.class, duration("2 seconds")) {
+ @Override
+ protected Boolean match(Object o) throws Exception {
+ if (o instanceof SendInstallSnapshot) {
+ return true;
+ }
+ return false;
+ }
+ }.get();
+
+ boolean sendInstallSnapshotReceived = false;
+ for (Boolean b: matches) {
+ sendInstallSnapshotReceived = b | sendInstallSnapshotReceived;
+ }
+
+ assertTrue(sendInstallSnapshotReceived);
+
+ }
+ };
+ }};
+ }
+
+ @Test
+ public void testInstallSnapshot() {
+ new LeaderTestKit(getSystem()) {{
+
+ new Within(duration("1 seconds")) {
+ protected void run() {
+ ActorRef followerActor = getTestActor();
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext();
+ actorContext.setPeerAddresses(peerAddresses);
+
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshot(toByteString(leadersSnapshot));
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+
+ MockLeader leader = new MockLeader(actorContext);
+ // set the follower info in leader
+ leader.addToFollowerToLog(followerActor.path().toString(), followersLastIndex, -1);
+
+ // new entry
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
+ new MockRaftActorContext.MockPayload("D"));
+
+
+ RaftState raftState = leader.handleMessage(senderActor, new SendInstallSnapshot());
+
+ assertEquals(RaftState.Leader, raftState);
+
+ // check if installsnapshot gets called with the correct values.
+ final String out =
+ new ExpectMsg<String>(duration("1 seconds"), "match hint") {
+ // do not put code outside this method, will run afterwards
+ protected String match(Object in) {
+ if (in instanceof InstallSnapshotMessages.InstallSnapshot) {
+ InstallSnapshot is = (InstallSnapshot)
+ SerializationUtils.fromSerializable(in);
+ if (is.getData() == null) {
+ return "InstallSnapshot data is null";
+ }
+ if (is.getLastIncludedIndex() != snapshotIndex) {
+ return is.getLastIncludedIndex() + "!=" + snapshotIndex;
+ }
+ if (is.getLastIncludedTerm() != snapshotTerm) {
+ return is.getLastIncludedTerm() + "!=" + snapshotTerm;
+ }
+ if (is.getTerm() == currentTerm) {
+ return is.getTerm() + "!=" + currentTerm;
+ }
+
+ return "match";
+
+ } else {
+ return "message mismatch:" + in.getClass();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ assertEquals("match", out);
+ }
+ };
+ }};
+ }
+
+ @Test
+ public void testHandleInstallSnapshotReplyLastChunk() {
+ new LeaderTestKit(getSystem()) {{
+ new Within(duration("1 seconds")) {
+ protected void run() {
+ ActorRef followerActor = getTestActor();
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ MockRaftActorContext actorContext =
+ (MockRaftActorContext) createActorContext();
+ actorContext.setPeerAddresses(peerAddresses);
+
+ final int followersLastIndex = 2;
+ final int snapshotIndex = 3;
+ final int newEntryIndex = 4;
+ final int snapshotTerm = 1;
+ final int currentTerm = 2;
+
+ MockLeader leader = new MockLeader(actorContext);
+ // set the follower info in leader
+ leader.addToFollowerToLog(followerActor.path().toString(), followersLastIndex, -1);
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ // set the snapshot variables in replicatedlog
+ actorContext.getReplicatedLog().setSnapshot(
+ toByteString(leadersSnapshot));
+ actorContext.getReplicatedLog().setSnapshotIndex(snapshotIndex);
+ actorContext.getReplicatedLog().setSnapshotTerm(snapshotTerm);
+ actorContext.getTermInformation().update(currentTerm, leaderActor.path().toString());
+
+ ByteString bs = toByteString(leadersSnapshot);
+ leader.createFollowerToSnapshot(followerActor.path().toString(), bs);
+ while(!leader.getFollowerToSnapshot().isLastChunk(leader.getFollowerToSnapshot().getChunkIndex())) {
+ leader.getFollowerToSnapshot().getNextChunk();
+ leader.getFollowerToSnapshot().incrementChunkIndex();
+ }
+
+ //clears leaders log
+ actorContext.getReplicatedLog().removeFrom(0);
+ RaftState raftState = leader.handleMessage(senderActor,
+ new InstallSnapshotReply(currentTerm, followerActor.path().toString(),
+ leader.getFollowerToSnapshot().getChunkIndex(), true));
+ assertEquals(RaftState.Leader, raftState);
+
+ assertEquals(leader.mapFollowerToSnapshot.size(), 0);
+ assertEquals(leader.followerToLog.size(), 1);
+ assertNotNull(leader.followerToLog.get(followerActor.path().toString()));
+ FollowerLogInformation fli = leader.followerToLog.get(followerActor.path().toString());
+ assertEquals(snapshotIndex, fli.getMatchIndex().get());
+ assertEquals(snapshotIndex, fli.getMatchIndex().get());
+ assertEquals(snapshotIndex + 1, fli.getNextIndex().get());
+ }
};
}};
}
+ @Test
+ public void testFollowerToSnapshotLogic() {
+
+ MockRaftActorContext actorContext = (MockRaftActorContext) createActorContext();
+
+ actorContext.setConfigParams(new DefaultConfigParamsImpl() {
+ @Override
+ public int getSnapshotChunkSize() {
+ return 50;
+ }
+ });
+
+ MockLeader leader = new MockLeader(actorContext);
+
+ Map<String, String> leadersSnapshot = new HashMap<>();
+ leadersSnapshot.put("1", "A");
+ leadersSnapshot.put("2", "B");
+ leadersSnapshot.put("3", "C");
+
+ ByteString bs = toByteString(leadersSnapshot);
+ byte[] barray = bs.toByteArray();
+
+ leader.createFollowerToSnapshot("followerId", bs);
+ assertEquals(bs.size(), barray.length);
+
+ int chunkIndex=0;
+ for (int i=0; i < barray.length; i = i + 50) {
+ int j = i + 50;
+ chunkIndex++;
+
+ if (i + 50 > barray.length) {
+ j = barray.length;
+ }
+
+ ByteString chunk = leader.getFollowerToSnapshot().getNextChunk();
+ assertEquals("bytestring size not matching for chunk:"+ chunkIndex, j-i, chunk.size());
+ assertEquals("chunkindex not matching", chunkIndex, leader.getFollowerToSnapshot().getChunkIndex());
+
+ leader.getFollowerToSnapshot().markSendStatus(true);
+ if (!leader.getFollowerToSnapshot().isLastChunk(chunkIndex)) {
+ leader.getFollowerToSnapshot().incrementChunkIndex();
+ }
+ }
+
+ assertEquals("totalChunks not matching", chunkIndex, leader.getFollowerToSnapshot().getTotalChunks());
+ }
+
+
@Override protected RaftActorBehavior createBehavior(
RaftActorContext actorContext) {
return new Leader(actorContext);
}
@Override protected RaftActorContext createActorContext() {
- return new MockRaftActorContext("test", getSystem(), leaderActor);
+ return createActorContext(leaderActor);
+ }
+
+ protected RaftActorContext createActorContext(ActorRef actorRef) {
+ return new MockRaftActorContext("test", getSystem(), actorRef);
+ }
+
+ private ByteString toByteString(Map<String, String> state) {
+ ByteArrayOutputStream b = null;
+ ObjectOutputStream o = null;
+ try {
+ try {
+ b = new ByteArrayOutputStream();
+ o = new ObjectOutputStream(b);
+ o.writeObject(state);
+ byte[] snapshotBytes = b.toByteArray();
+ return ByteString.copyFrom(snapshotBytes);
+ } finally {
+ if (o != null) {
+ o.flush();
+ o.close();
+ }
+ if (b != null) {
+ b.close();
+ }
+ }
+ } catch (IOException e) {
+ Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
+ }
+ return null;
+ }
+
+ private static class LeaderTestKit extends JavaTestKit {
+
+ private LeaderTestKit(ActorSystem actorSystem) {
+ super(actorSystem);
+ }
+
+ protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
+ // Wait for a specific log message to show up
+ final boolean result =
+ new JavaTestKit.EventFilter<Boolean>(logLevel
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(subject.path().toString())
+ .message(logMessage)
+ .occurrences(1).exec();
+
+ Assert.assertEquals(true, result);
+
+ }
+ }
+
+ class MockLeader extends Leader {
+
+ FollowerToSnapshot fts;
+
+ public MockLeader(RaftActorContext context){
+ super(context);
+ }
+
+ public void addToFollowerToLog(String followerId, long nextIndex, long matchIndex) {
+ FollowerLogInformation followerLogInformation =
+ new FollowerLogInformationImpl(followerId,
+ new AtomicLong(nextIndex),
+ new AtomicLong(matchIndex));
+ followerToLog.put(followerId, followerLogInformation);
+ }
+
+ public FollowerToSnapshot getFollowerToSnapshot() {
+ return fts;
+ }
+
+ public void createFollowerToSnapshot(String followerId, ByteString bs ) {
+ fts = new FollowerToSnapshot(bs);
+ mapFollowerToSnapshot.put(followerId, fts);
+
+ }
}
}
</dependency>
</dependencies>
<build>
+
<plugins>
<plugin>
<groupId>org.jacoco</groupId>
</execution>
</executions>
</plugin>
- </plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ <Export-Package>org.opendaylight.controller.cluster.*,org.opendaylight.common.actor,org.opendaylight.common.reporting,org.opendaylight.controller.protobuff.*,org.opendaylight.controller.xml.*</Export-Package>
+ <Import-Package>*</Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
</build>
-
</project>
*/
org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages.ModificationOrBuilder getModificationOrBuilder(
int index);
+
+ // optional int64 timeStamp = 2;
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ boolean hasTimeStamp();
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ long getTimeStamp();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CompositeModification}
modification_.add(input.readMessage(org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages.Modification.PARSER, extensionRegistry));
break;
}
+ case 16: {
+ bitField0_ |= 0x00000001;
+ timeStamp_ = input.readInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return PARSER;
}
+ private int bitField0_;
// repeated .org.opendaylight.controller.mdsal.Modification modification = 1;
public static final int MODIFICATION_FIELD_NUMBER = 1;
private java.util.List<org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages.Modification> modification_;
return modification_.get(index);
}
+ // optional int64 timeStamp = 2;
+ public static final int TIMESTAMP_FIELD_NUMBER = 2;
+ private long timeStamp_;
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ public boolean hasTimeStamp() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ public long getTimeStamp() {
+ return timeStamp_;
+ }
+
private void initFields() {
modification_ = java.util.Collections.emptyList();
+ timeStamp_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
for (int i = 0; i < modification_.size(); i++) {
output.writeMessage(1, modification_.get(i));
}
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt64(2, timeStamp_);
+ }
getUnknownFields().writeTo(output);
}
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, modification_.get(i));
}
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(2, timeStamp_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
} else {
modificationBuilder_.clear();
}
+ timeStamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages.CompositeModification buildPartial() {
org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages.CompositeModification result = new org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages.CompositeModification(this);
int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
if (modificationBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
modification_ = java.util.Collections.unmodifiableList(modification_);
} else {
result.modification_ = modificationBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.timeStamp_ = timeStamp_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
}
}
}
+ if (other.hasTimeStamp()) {
+ setTimeStamp(other.getTimeStamp());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
return modificationBuilder_;
}
+ // optional int64 timeStamp = 2;
+ private long timeStamp_ ;
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ public boolean hasTimeStamp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ public long getTimeStamp() {
+ return timeStamp_;
+ }
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ public Builder setTimeStamp(long value) {
+ bitField0_ |= 0x00000002;
+ timeStamp_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int64 timeStamp = 2;</code>
+ */
+ public Builder clearTimeStamp() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ timeStamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CompositeModification)
}
"e\030\001 \002(\t\022C\n\004path\030\002 \002(\01325.org.opendaylight" +
".controller.mdsal.InstanceIdentifier\0225\n\004" +
"data\030\003 \001(\0132\'.org.opendaylight.controller" +
- ".mdsal.Node\"^\n\025CompositeModification\022E\n\014" +
+ ".mdsal.Node\"q\n\025CompositeModification\022E\n\014" +
"modification\030\001 \003(\0132/.org.opendaylight.co" +
- "ntroller.mdsal.ModificationBO\n9org.opend" +
- "aylight.controller.protobuff.messages.pe",
- "rsistentB\022PersistentMessages"
+ "ntroller.mdsal.Modification\022\021\n\ttimeStamp" +
+ "\030\002 \001(\003BO\n9org.opendaylight.controller.pr",
+ "otobuff.messages.persistentB\022PersistentM" +
+ "essages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_CompositeModification_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CompositeModification_descriptor,
- new java.lang.String[] { "Modification", });
+ new java.lang.String[] { "Modification", "TimeStamp", });
return null;
}
};
*/
public static String inputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
LOG.debug("Converting input composite node to xml {}", cNode);
- if (cNode == null) return BLANK;
+ if (cNode == null) {
+ return BLANK;
+ }
- if(schemaContext == null) return BLANK;
+ if(schemaContext == null) {
+ return BLANK;
+ }
Document domTree = null;
try {
*/
public static String outputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
LOG.debug("Converting output composite node to xml {}", cNode);
- if (cNode == null) return BLANK;
+ if (cNode == null) {
+ return BLANK;
+ }
- if(schemaContext == null) return BLANK;
+ if(schemaContext == null) {
+ return BLANK;
+ }
Document domTree = null;
try {
}
public static CompositeNode xmlToCompositeNode(String xml){
- if (xml==null || xml.length()==0) return null;
+ if (xml==null || xml.length()==0) {
+ return null;
+ }
Node<?> dataTree;
try {
*/
public static CompositeNode inputXmlToCompositeNode(QName rpc, String xml, SchemaContext schemaContext){
LOG.debug("Converting input xml to composite node {}", xml);
- if (xml==null || xml.length()==0) return null;
+ if (xml==null || xml.length()==0) {
+ return null;
+ }
- if(rpc == null) return null;
+ if(rpc == null) {
+ return null;
+ }
- if(schemaContext == null) return null;
+ if(schemaContext == null) {
+ return null;
+ }
CompositeNode compositeNode = null;
try {
LOG.debug("Converted xml input to list of nodes {}", dataNodes);
final CompositeNodeBuilder<ImmutableCompositeNode> it = ImmutableCompositeNode.builder();
- it.setQName(input);
+ it.setQName(rpc);
it.add(ImmutableCompositeNode.create(input, dataNodes));
compositeNode = it.toInstance();
break;
required string type=1;
required InstanceIdentifier path=2;
optional Node data=3;
+
}
message CompositeModification {
repeated Modification modification=1;
+ optional int64 timeStamp = 2;
}
<type>xml</type>
<classifier>config</classifier>
</artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/akka.conf</file>
+ <type>xml</type>
+ <classifier>akkaconf</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+ <type>xml</type>
+ <classifier>moduleshardconf</classifier>
+ </artifact>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/modules.conf</file>
+ <type>xml</type>
+ <classifier>moduleconf</classifier>
+ </artifact>
</artifacts>
</configuration>
</execution>
remote {
log-remote-lifecycle-events = off
netty.tcp {
- hostname = "<CHANGE_ME>"
+ hostname = "127.0.0.1"
port = 2550
maximum-frame-size = 419430400
send-buffer-size = 52428800
}
cluster {
- seed-nodes = ["akka.tcp://opendaylight-cluster-data@<CHANGE_SEED_IP>:2550"]
+ seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
auto-down-unreachable-after = 10s
+
+ roles = [
+ "member-1"
+ ]
+
}
}
}
remote {
log-remote-lifecycle-events = off
netty.tcp {
- hostname = "<CHANGE_ME>"
+ hostname = "127.0.0.1"
port = 2551
}
}
cluster {
- seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@<CHANGE_SEED_IP>:2551"]
+ seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"]
auto-down-unreachable-after = 10s
}
<artifactId>akka-slf4j_${scala.version}</artifactId>
</dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-osgi_${scala.version}</artifactId>
+ </dependency>
+
<!-- SAL Dependencies -->
<dependency>
<Export-package></Export-package>
<Private-Package></Private-Package>
<Import-Package>!*snappy;!org.jboss.*;!com.jcraft.*;!*jetty*;!sun.security.*;*</Import-Package>
+ <!--
<Embed-Dependency>
sal-clustering-commons;
sal-akka-raft;
*scala*;
</Embed-Dependency>
<Embed-Transitive>true</Embed-Transitive>
+ -->
</instructions>
</configuration>
</plugin>
import akka.actor.ActorSystem;
import akka.actor.Props;
-import com.google.common.base.Function;
+import akka.osgi.BundleDelegatingClassLoader;
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
+import org.osgi.framework.BundleContext;
-import javax.annotation.Nullable;
+import java.io.File;
public class ActorSystemFactory {
- private static final ActorSystem actorSystem = (new Function<Void, ActorSystem>(){
-
- @Nullable @Override public ActorSystem apply(@Nullable Void aVoid) {
- ActorSystem system =
- ActorSystem.create("opendaylight-cluster-data", ConfigFactory
- .load().getConfig("odl-cluster-data"));
- system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
- return system;
- }
- }).apply(null);
+
+ public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+ public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
+ public static final String CONFIGURATION_NAME = "odl-cluster-data";
+
+ private static volatile ActorSystem actorSystem = null;
public static final ActorSystem getInstance(){
return actorSystem;
}
+
+ /**
+ * This method should be called only once during initialization
+ *
+ * @param bundleContext
+ */
+ public static final ActorSystem createInstance(final BundleContext bundleContext) {
+ if(actorSystem == null) {
+ // Create an OSGi bundle classloader for actor system
+ BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
+ Thread.currentThread().getContextClassLoader());
+ synchronized (ActorSystemFactory.class) {
+ // Double check
+
+ if (actorSystem == null) {
+ ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
+ ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
+ system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+ actorSystem = system;
+ }
+ }
+ }
+
+ return actorSystem;
+ }
+
+
+ private static final Config readAkkaConfiguration(){
+ File defaultConfigFile = new File(AKKA_CONF_PATH);
+ Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
+ return ConfigFactory.parseFile(defaultConfigFile);
+ }
}
package org.opendaylight.controller.cluster.datastore;
import com.google.common.base.Preconditions;
+
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+
import scala.concurrent.duration.Duration;
import java.util.concurrent.TimeUnit;
/**
- * Contains contextual data for shards.
+ * Contains contextual data for a data store.
*
* @author Thomas Pantelis
*/
private final InMemoryDOMDataStoreConfigProperties dataStoreProperties;
private final Duration shardTransactionIdleTimeout;
+ private final int operationTimeoutInSeconds;
+ private final String dataStoreMXBeanType;
public DatastoreContext() {
this.dataStoreProperties = null;
+ this.dataStoreMXBeanType = "DistributedDatastore";
this.shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
+ this.operationTimeoutInSeconds = 5;
}
- public DatastoreContext(InMemoryDOMDataStoreConfigProperties dataStoreProperties,
- Duration shardTransactionIdleTimeout) {
+ public DatastoreContext(String dataStoreMXBeanType,
+ InMemoryDOMDataStoreConfigProperties dataStoreProperties,
+ Duration shardTransactionIdleTimeout,
+ int operationTimeoutInSeconds) {
+ this.dataStoreMXBeanType = dataStoreMXBeanType;
this.dataStoreProperties = Preconditions.checkNotNull(dataStoreProperties);
- this.shardTransactionIdleTimeout = Preconditions.checkNotNull(shardTransactionIdleTimeout);
+ this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ this.operationTimeoutInSeconds = operationTimeoutInSeconds;
}
public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
return shardTransactionIdleTimeout;
}
+ public String getDataStoreMXBeanType() {
+ return dataStoreMXBeanType;
+ }
+ public int getOperationTimeoutInSeconds() {
+ return operationTimeoutInSeconds;
+ }
}
package org.opendaylight.controller.cluster.datastore;
-import java.util.concurrent.TimeUnit;
-
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.duration.Duration;
-
/**
*
*/
private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
private final ActorContext actorContext;
- private final DatastoreContext datastoreContext;
public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster,
- Configuration configuration, DistributedDataStoreProperties dataStoreProperties) {
+ Configuration configuration, DatastoreContext datastoreContext) {
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
Preconditions.checkNotNull(type, "type should not be null");
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
-
+ Preconditions.checkNotNull(datastoreContext, "datastoreContext should not be null");
String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
LOG.info("Creating ShardManager : {}", shardManagerId);
- datastoreContext = new DatastoreContext(InMemoryDOMDataStoreConfigProperties.create(
- dataStoreProperties.getMaxShardDataChangeExecutorPoolSize(),
- dataStoreProperties.getMaxShardDataChangeExecutorQueueSize(),
- dataStoreProperties.getMaxShardDataChangeListenerQueueSize()),
- Duration.create(dataStoreProperties.getShardTransactionIdleTimeoutInMinutes(),
- TimeUnit.MINUTES));
+ actorContext = new ActorContext(actorSystem, actorSystem.actorOf(
+ ShardManager.props(type, cluster, configuration, datastoreContext)
+ .withMailbox(ActorContext.MAILBOX), shardManagerId ), cluster, configuration);
- actorContext
- = new ActorContext(
- actorSystem, actorSystem.actorOf(
- ShardManager.props(type, cluster, configuration, datastoreContext).
- withMailbox(ActorContext.MAILBOX), shardManagerId ), cluster, configuration);
-
- actorContext.setOperationTimeout(dataStoreProperties.getOperationTimeoutInSeconds());
+ actorContext.setOperationTimeout(datastoreContext.getOperationTimeoutInSeconds());
}
public DistributedDataStore(ActorContext actorContext) {
this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
- this.datastoreContext = new DatastoreContext();
}
-
@SuppressWarnings("unchecked")
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.osgi.framework.BundleContext;
public class DistributedDataStoreFactory {
public static DistributedDataStore createInstance(String name, SchemaService schemaService,
- DistributedDataStoreProperties dataStoreProperties) {
+ DatastoreContext datastoreContext, BundleContext bundleContext) {
- ActorSystem actorSystem = ActorSystemFactory.getInstance();
+ ActorSystem actorSystem = ActorSystemFactory.createInstance(bundleContext);
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
final DistributedDataStore dataStore =
new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
- config, dataStoreProperties );
+ config, datastoreContext );
ShardStrategyFactory.setConfiguration(config);
schemaService.registerSchemaContextListener(dataStore);
return dataStore;
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-/**
- * Wrapper class for DistributedDataStore configuration properties.
- *
- * @author Thomas Pantelis
- */
-public class DistributedDataStoreProperties {
- private final int maxShardDataChangeListenerQueueSize;
- private final int maxShardDataChangeExecutorQueueSize;
- private final int maxShardDataChangeExecutorPoolSize;
- private final int shardTransactionIdleTimeoutInMinutes;
- private final int operationTimeoutInSeconds;
-
- public DistributedDataStoreProperties() {
- maxShardDataChangeListenerQueueSize = 1000;
- maxShardDataChangeExecutorQueueSize = 1000;
- maxShardDataChangeExecutorPoolSize = 20;
- shardTransactionIdleTimeoutInMinutes = 10;
- operationTimeoutInSeconds = 5;
- }
-
- public DistributedDataStoreProperties(int maxShardDataChangeListenerQueueSize,
- int maxShardDataChangeExecutorQueueSize, int maxShardDataChangeExecutorPoolSize,
- int shardTransactionIdleTimeoutInMinutes, int operationTimeoutInSeconds) {
- this.maxShardDataChangeListenerQueueSize = maxShardDataChangeListenerQueueSize;
- this.maxShardDataChangeExecutorQueueSize = maxShardDataChangeExecutorQueueSize;
- this.maxShardDataChangeExecutorPoolSize = maxShardDataChangeExecutorPoolSize;
- this.shardTransactionIdleTimeoutInMinutes = shardTransactionIdleTimeoutInMinutes;
- this.operationTimeoutInSeconds = operationTimeoutInSeconds;
- }
-
- public int getMaxShardDataChangeListenerQueueSize() {
- return maxShardDataChangeListenerQueueSize;
- }
-
- public int getMaxShardDataChangeExecutorQueueSize() {
- return maxShardDataChangeExecutorQueueSize;
- }
-
- public int getMaxShardDataChangeExecutorPoolSize() {
- return maxShardDataChangeExecutorPoolSize;
- }
-
- public int getShardTransactionIdleTimeoutInMinutes() {
- return shardTransactionIdleTimeoutInMinutes;
- }
-
- public int getOperationTimeoutInSeconds() {
- return operationTimeoutInSeconds;
- }
-}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.japi.Creator;
+import akka.persistence.RecoveryFailure;
import akka.serialization.Serialization;
-
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
+import org.opendaylight.controller.cluster.datastore.messages.ReadData;
+import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
import scala.concurrent.duration.FiniteDuration;
import java.util.ArrayList;
-import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
private SchemaContext schemaContext;
+ private ActorRef createSnapshotTransaction;
+
private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
- DatastoreContext datastoreContext) {
+ DatastoreContext datastoreContext, SchemaContext schemaContext) {
super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
this.name = name;
this.datastoreContext = datastoreContext;
+ this.schemaContext = schemaContext;
String setting = System.getProperty("shard.persistent");
store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
datastoreContext.getDataStoreProperties());
- shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString());
+ if(schemaContext != null) {
+ store.onGlobalContextUpdated(schemaContext);
+ }
+ shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
+ datastoreContext.getDataStoreMXBeanType());
+ shardMBean.setDataStoreExecutor(store.getDomStoreExecutor());
+ shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
}
public static Props props(final ShardIdentifier name,
final Map<ShardIdentifier, String> peerAddresses,
- DatastoreContext datastoreContext) {
+ DatastoreContext datastoreContext, SchemaContext schemaContext) {
Preconditions.checkNotNull(name, "name should not be null");
Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
- Preconditions.checkNotNull(datastoreContext, "shardContext should not be null");
+ Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
+ Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
- return Props.create(new ShardCreator(name, peerAddresses, datastoreContext));
+ return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
+ }
+
+ @Override public void onReceiveRecover(Object message) {
+ LOG.debug("onReceiveRecover: Received message {} from {}", message.getClass().toString(),
+ getSender());
+
+ if (message instanceof RecoveryFailure){
+ LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
+ } else {
+ super.onReceiveRecover(message);
+ }
}
@Override public void onReceiveCommand(Object message) {
- LOG.debug("Received message {} from {}", message.getClass().toString(),
+ LOG.debug("onReceiveCommand: Received message {} from {}", message.getClass().toString(),
getSender());
if (message.getClass()
} else if (getLeader() != null) {
getLeader().forward(message, getContext());
}
+ } else if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+ // This must be for install snapshot. Don't want to open this up and trigger
+ // deSerialization
+ self().tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)), self());
+
+ // Send a PoisonPill instead of sending close transaction because we do not really need
+ // a response
+ getSender().tell(PoisonPill.getInstance(), self());
+
} else if (message instanceof RegisterChangeListener) {
registerChangeListener((RegisterChangeListener) message);
} else if (message instanceof UpdateSchemaContext) {
}
private ActorRef createTypedTransactionActor(
- CreateTransaction createTransaction,
+ int transactionType,
ShardTransactionIdentifier transactionId) {
- if (createTransaction.getTransactionType()
+
+ if(this.schemaContext == null){
+ throw new NullPointerException("schemaContext should not be null");
+ }
+
+ if (transactionType
== TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
shardMBean.incrementReadOnlyTransactionCount();
return getContext().actorOf(
ShardTransaction.props(store.newReadOnlyTransaction(), getSelf(),
- schemaContext,datastoreContext, name.toString()), transactionId.toString());
+ schemaContext,datastoreContext, shardMBean), transactionId.toString());
- } else if (createTransaction.getTransactionType()
+ } else if (transactionType
== TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
shardMBean.incrementReadWriteTransactionCount();
return getContext().actorOf(
ShardTransaction.props(store.newReadWriteTransaction(), getSelf(),
- schemaContext, datastoreContext,name.toString()), transactionId.toString());
+ schemaContext, datastoreContext, shardMBean), transactionId.toString());
- } else if (createTransaction.getTransactionType()
+ } else if (transactionType
== TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
shardMBean.incrementWriteOnlyTransactionCount();
return getContext().actorOf(
ShardTransaction.props(store.newWriteOnlyTransaction(), getSelf(),
- schemaContext, datastoreContext, name.toString()), transactionId.toString());
+ schemaContext, datastoreContext, shardMBean), transactionId.toString());
} else {
throw new IllegalArgumentException(
"Shard="+name + ":CreateTransaction message has unidentified transaction type="
- + createTransaction.getTransactionType());
+ + transactionType);
}
}
private void createTransaction(CreateTransaction createTransaction) {
+ createTransaction(createTransaction.getTransactionType(),
+ createTransaction.getTransactionId());
+ }
+
+ private ActorRef createTransaction(int transactionType, String remoteTransactionId) {
ShardTransactionIdentifier transactionId =
ShardTransactionIdentifier.builder()
- .remoteTransactionId(createTransaction.getTransactionId())
+ .remoteTransactionId(remoteTransactionId)
.build();
LOG.debug("Creating transaction : {} ", transactionId);
ActorRef transactionActor =
- createTypedTransactionActor(createTransaction, transactionId);
+ createTypedTransactionActor(transactionType, transactionId);
getSender()
.tell(new CreateTransactionReply(
Serialization.serializedActorPath(transactionActor),
- createTransaction.getTransactionId()).toSerializable(),
+ remoteTransactionId).toSerializable(),
getSelf());
+
+ return transactionActor;
+ }
+
+ private void syncCommitTransaction(DOMStoreWriteTransaction transaction)
+ throws ExecutionException, InterruptedException {
+ DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
}
+
private void commit(final ActorRef sender, Object serialized) {
Modification modification = MutableCompositeModification
.fromSerializable(serialized, schemaContext);
LOG.debug(
"Could not find cohort for modification : {}. Writing modification using a new transaction",
modification);
- DOMStoreReadWriteTransaction transaction =
- store.newReadWriteTransaction();
+ DOMStoreWriteTransaction transaction =
+ store.newWriteOnlyTransaction();
modification.apply(transaction);
- DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
- ListenableFuture<Void> future =
- commitCohort.preCommit();
try {
- future.get();
- future = commitCohort.commit();
- future.get();
+ syncCommitTransaction(transaction);
} catch (InterruptedException | ExecutionException e) {
shardMBean.incrementFailedTransactionsCount();
LOG.error("Failed to commit", e);
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(Void v) {
- sender.tell(new CommitTransactionReply().toSerializable(),self);
- shardMBean.incrementCommittedTransactionCount();
- shardMBean.setLastCommittedTransactionTime(new Date());
+ sender.tell(new CommitTransactionReply().toSerializable(), self);
+ shardMBean.incrementCommittedTransactionCount();
+ shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
}
@Override
private void updateSchemaContext(UpdateSchemaContext message) {
this.schemaContext = message.getSchemaContext();
+ updateSchemaContext(message.getSchemaContext());
store.onGlobalContextUpdated(message.getSchemaContext());
}
+ @VisibleForTesting void updateSchemaContext(SchemaContext schemaContext) {
+ store.onGlobalContextUpdated(schemaContext);
+ }
+
private void registerChangeListener(
RegisterChangeListener registerChangeListener) {
private void createTransactionChain() {
DOMStoreTransactionChain chain = store.createTransactionChain();
ActorRef transactionChain = getContext().actorOf(
- ShardTransactionChain.props(chain, schemaContext, datastoreContext,name.toString() ));
+ ShardTransactionChain.props(chain, schemaContext, datastoreContext, shardMBean));
getSender().tell(new CreateTransactionChainReply(transactionChain.path()).toSerializable(),
- getSelf());
+ getSelf());
}
@Override protected void applyState(ActorRef clientActor, String identifier,
identifier, clientActor.path().toString());
}
-
} else {
LOG.error("Unknown state received {}", data);
}
}
- @Override protected Object createSnapshot() {
- throw new UnsupportedOperationException("createSnapshot");
+ @Override protected void createSnapshot() {
+ if (createSnapshotTransaction == null) {
+
+ // Create a transaction. We are really going to treat the transaction as a worker
+ // so that this actor does not get block building the snapshot
+ createSnapshotTransaction = createTransaction(
+ TransactionProxy.TransactionType.READ_ONLY.ordinal(),
+ "createSnapshot");
+
+ createSnapshotTransaction.tell(
+ new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
+
+ }
}
- @Override protected void applySnapshot(Object snapshot) {
- throw new UnsupportedOperationException("applySnapshot");
+ @VisibleForTesting @Override protected void applySnapshot(ByteString snapshot) {
+ // Since this will be done only on Recovery or when this actor is a Follower
+ // we can safely commit everything in here. We not need to worry about event notifications
+ // as they would have already been disabled on the follower
+ try {
+ DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+ NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
+ NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext)
+ .decode(YangInstanceIdentifier.builder().build(), serializedNode);
+
+ // delete everything first
+ transaction.delete(YangInstanceIdentifier.builder().build());
+
+ // Add everything from the remote node back
+ transaction.write(YangInstanceIdentifier.builder().build(), node);
+ syncCommitTransaction(transaction);
+ } catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
+ LOG.error(e, "An exception occurred when applying snapshot");
+ }
}
@Override protected void onStateChanged() {
final ShardIdentifier name;
final Map<ShardIdentifier, String> peerAddresses;
final DatastoreContext datastoreContext;
+ final SchemaContext schemaContext;
ShardCreator(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
- DatastoreContext datastoreContext) {
+ DatastoreContext datastoreContext, SchemaContext schemaContext) {
this.name = name;
this.peerAddresses = peerAddresses;
this.datastoreContext = datastoreContext;
+ this.schemaContext = schemaContext;
}
@Override
public Shard create() throws Exception {
- return new Shard(name, peerAddresses, datastoreContext);
+ return new Shard(name, peerAddresses, datastoreContext, schemaContext);
}
}
+
+ @VisibleForTesting NormalizedNode readStore() throws ExecutionException, InterruptedException {
+ DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
+ transaction.read(YangInstanceIdentifier.builder().build());
+
+ NormalizedNode<?, ?> node = future.get().get();
+
+ transaction.close();
+
+ return node;
+ }
+
+ @VisibleForTesting void writeToStore(YangInstanceIdentifier id, NormalizedNode node)
+ throws ExecutionException, InterruptedException {
+ DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+
+ transaction.write(id, node);
+
+ syncCommitTransaction(transaction);
+ }
+
}
import akka.cluster.ClusterEvent;
import akka.japi.Creator;
import akka.japi.Function;
-
import com.google.common.base.Preconditions;
-
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
import java.util.ArrayList;
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
- // Create all the local Shards and make them a child of the ShardManager
- // TODO: This may need to be initiated when we first get the schema context
- createLocalShards();
+ //createLocalShards(null);
}
public static Props props(final String type,
* @param message
*/
private void updateSchemaContext(Object message) {
- for(ShardInformation info : localShards.values()){
- info.getActor().tell(message,getSelf());
+ SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+
+ if(localShards.size() == 0){
+ createLocalShards(schemaContext);
+ } else {
+ for (ShardInformation info : localShards.values()) {
+ info.getActor().tell(message, getSelf());
+ }
}
}
* runs
*
*/
- private void createLocalShards() {
+ private void createLocalShards(SchemaContext schemaContext) {
String memberName = this.cluster.getCurrentMemberName();
List<String> memberShardNames =
this.configuration.getMemberShardNames(memberName);
ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
ActorRef actor = getContext()
- .actorOf(Shard.props(shardId, peerAddresses, datastoreContext).
+ .actorOf(Shard.props(shardId, peerAddresses, datastoreContext, schemaContext).
withMailbox(ActorContext.MAILBOX), shardId.toString());
-
localShardActorNames.add(shardId.toString());
localShards.put(shardName, new ShardInformation(shardName, actor, peerAddresses));
}
- mBean = ShardManagerInfo
- .createShardManagerMBean("shard-manager-" + this.type, localShardActorNames);
-
+ mBean = ShardManagerInfo.createShardManagerMBean("shard-manager-" + this.type,
+ datastoreContext.getDataStoreMXBeanType(), localShardActorNames);
}
/**
import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
private final DOMStoreReadTransaction transaction;
public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,String shardName) {
- super(shardActor, schemaContext, shardName);
+ SchemaContext schemaContext, ShardStats shardStats) {
+ super(shardActor, schemaContext, shardStats);
this.transaction = transaction;
}
import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
private final DOMStoreReadWriteTransaction transaction;
public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,String shardName) {
- super(shardActor, schemaContext, shardName);
+ SchemaContext schemaContext, ShardStats shardStats) {
+ super(shardActor, schemaContext, shardStats);
this.transaction = transaction;
}
import akka.actor.Props;
import akka.actor.ReceiveTimeout;
import akka.japi.Creator;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
+
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
private final ActorRef shardActor;
protected final SchemaContext schemaContext;
- private final String shardName;
-
+ private final ShardStats shardStats;
private final MutableCompositeModification modification = new MutableCompositeModification();
protected ShardTransaction(ActorRef shardActor, SchemaContext schemaContext,
- String shardName) {
+ ShardStats shardStats) {
this.shardActor = shardActor;
this.schemaContext = schemaContext;
- this.shardName = shardName;
+ this.shardStats = shardStats;
}
public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,DatastoreContext datastoreContext, String shardName) {
+ SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats) {
return Props.create(new ShardTransactionCreator(transaction, shardActor, schemaContext,
- datastoreContext, shardName));
+ datastoreContext, shardStats));
}
protected abstract DOMStoreTransaction getDOMStoreTransaction();
sender.tell(new ReadDataReply(schemaContext,null).toSerializable(), self);
}
} catch (Exception e) {
- ShardMBeanFactory.getShardStatsMBean(shardName).incrementFailedReadTransactionsCount();
+ shardStats.incrementFailedReadTransactionsCount();
sender.tell(new akka.actor.Status.Failure(e), self);
}
protected void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message) {
DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
ActorRef cohortActor = getContext().actorOf(
- ThreePhaseCommitCohort.props(cohort, shardActor, modification, shardName), "cohort");
+ ThreePhaseCommitCohort.props(cohort, shardActor, modification, shardStats), "cohort");
getSender()
.tell(new ReadyTransactionReply(cohortActor.path()).toSerializable(), getSelf());
final ActorRef shardActor;
final SchemaContext schemaContext;
final DatastoreContext datastoreContext;
- final String shardName;
+ final ShardStats shardStats;
ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, DatastoreContext datastoreContext, String shardName) {
+ SchemaContext schemaContext, DatastoreContext datastoreContext,
+ ShardStats shardStats) {
this.transaction = transaction;
this.shardActor = shardActor;
- this.shardName = shardName;
+ this.shardStats = shardStats;
this.schemaContext = schemaContext;
this.datastoreContext = datastoreContext;
}
ShardTransaction tx;
if(transaction instanceof DOMStoreReadWriteTransaction) {
tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, schemaContext, shardName);
+ shardActor, schemaContext, shardStats);
} else if(transaction instanceof DOMStoreReadTransaction) {
tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- schemaContext, shardName);
+ schemaContext, shardStats);
} else {
tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, schemaContext, shardName);
+ shardActor, schemaContext, shardStats);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
import akka.actor.Props;
import akka.japi.Creator;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
private final DOMStoreTransactionChain chain;
private final DatastoreContext datastoreContext;
private final SchemaContext schemaContext;
- private final String shardName;
+ private final ShardStats shardStats;
public ShardTransactionChain(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext,String shardName) {
+ DatastoreContext datastoreContext, ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
- this.shardName = shardName;
+ this.shardStats = shardStats;
}
@Override
TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext,shardName), transactionId);
+ schemaContext, datastoreContext, shardStats), transactionId);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
- schemaContext, datastoreContext,shardName), transactionId);
+ schemaContext, datastoreContext, shardStats), transactionId);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext,shardName), transactionId);
+ schemaContext, datastoreContext, shardStats), transactionId);
} else {
throw new IllegalArgumentException (
"CreateTransaction message has unidentified transaction type=" +
}
public static Props props(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, String shardName) {
- return Props.create(new ShardTransactionChainCreator(chain, schemaContext, datastoreContext, shardName));
+ DatastoreContext datastoreContext, ShardStats shardStats) {
+ return Props.create(new ShardTransactionChainCreator(chain, schemaContext,
+ datastoreContext, shardStats));
}
private static class ShardTransactionChainCreator implements Creator<ShardTransactionChain> {
final DOMStoreTransactionChain chain;
final DatastoreContext datastoreContext;
final SchemaContext schemaContext;
- final String shardName;
+ final ShardStats shardStats;
ShardTransactionChainCreator(DOMStoreTransactionChain chain, SchemaContext schemaContext,
- DatastoreContext datastoreContext, String shardName) {
+ DatastoreContext datastoreContext, ShardStats shardStats) {
this.chain = chain;
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
- this.shardName = shardName;
+ this.shardStats = shardStats;
}
@Override
public ShardTransactionChain create() throws Exception {
- return new ShardTransactionChain(chain, schemaContext, datastoreContext,shardName);
+ return new ShardTransactionChain(chain, schemaContext, datastoreContext, shardStats);
}
}
}
import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,String shardName) {
- super(shardActor, schemaContext, shardName);
+ SchemaContext schemaContext, ShardStats shardStats) {
+ super(shardActor, schemaContext, shardStats);
this.transaction = transaction;
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
private final DOMStoreThreePhaseCommitCohort cohort;
private final ActorRef shardActor;
private final CompositeModification modification;
- private final String shardName;
+ private final ShardStats shardStats;
public ThreePhaseCommitCohort(DOMStoreThreePhaseCommitCohort cohort,
- ActorRef shardActor, CompositeModification modification,String shardName) {
+ ActorRef shardActor, CompositeModification modification, ShardStats shardStats) {
this.cohort = cohort;
this.shardActor = shardActor;
this.modification = modification;
- this.shardName = shardName;
+ this.shardStats = shardStats;
}
private final LoggingAdapter log =
Logging.getLogger(getContext().system(), this);
public static Props props(final DOMStoreThreePhaseCommitCohort cohort,
- final ActorRef shardActor, final CompositeModification modification,
- String shardName) {
+ final ActorRef shardActor, final CompositeModification modification,
+ ShardStats shardStats) {
return Props.create(new ThreePhaseCommitCohortCreator(cohort, shardActor, modification,
- shardName));
+ shardStats));
}
@Override
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(Void v) {
- ShardMBeanFactory.getShardStatsMBean(shardName).incrementAbortTransactionsCount();
+ shardStats.incrementAbortTransactionsCount();
sender
.tell(new AbortTransactionReply().toSerializable(),
self);
final DOMStoreThreePhaseCommitCohort cohort;
final ActorRef shardActor;
final CompositeModification modification;
- final String shardName;
+ final ShardStats shardStats;
ThreePhaseCommitCohortCreator(DOMStoreThreePhaseCommitCohort cohort,
- ActorRef shardActor, CompositeModification modification, String shardName) {
+ ActorRef shardActor, CompositeModification modification, ShardStats shardStats) {
this.cohort = cohort;
this.shardActor = shardActor;
this.modification = modification;
- this.shardName = shardName;
+ this.shardStats = shardStats;
}
@Override
public ThreePhaseCommitCohort create() throws Exception {
- return new ThreePhaseCommitCohort(cohort, shardActor, modification, shardName);
+ return new ThreePhaseCommitCohort(cohort, shardActor, modification, shardStats);
}
}
}
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
-
-
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.InstanceNotFoundException;
-import javax.management.MBeanRegistrationException;
-import javax.management.MBeanServer;
-import javax.management.MalformedObjectNameException;
-import javax.management.ObjectName;
-import java.lang.management.ManagementFactory;
-
-/**
- * All MBeans should extend this class that help in registering and
- * unregistering the MBeans.
- * @author Basheeruddin <syedbahm@cisco.com>
- */
-
-
-public abstract class AbstractBaseMBean {
-
-
- public static String BASE_JMX_PREFIX = "org.opendaylight.controller:";
- public static String JMX_TYPE_DISTRIBUTED_DATASTORE = "DistributedDatastore";
- public static String JMX_CATEGORY_SHARD = "Shard";
- public static String JMX_CATEGORY_SHARD_MANAGER = "ShardManager";
-
- private static final Logger LOG = LoggerFactory
- .getLogger(AbstractBaseMBean.class);
-
- MBeanServer server = ManagementFactory.getPlatformMBeanServer();
- /**
- * gets the MBean ObjectName
- *
- * @return Object name of the MBean
- * @throws MalformedObjectNameException - The bean name does not have the right format.
- * @throws NullPointerException - The bean name is null
- */
- protected ObjectName getMBeanObjectName()
- throws MalformedObjectNameException, NullPointerException {
- String name = BASE_JMX_PREFIX + "type="+getMBeanType()+",Category="+
- getMBeanCategory() + ",name="+
- getMBeanName();
-
-
- return new ObjectName(name);
- }
-
- public boolean registerMBean() {
- boolean registered = false;
- try {
- // Object to identify MBean
- final ObjectName mbeanName = this.getMBeanObjectName();
-
- Preconditions.checkArgument(mbeanName != null,
- "Object name of the MBean cannot be null");
-
- LOG.debug("Register MBean {}", mbeanName);
-
- // unregistered if already registered
- if (server.isRegistered(mbeanName)) {
-
- LOG.debug("MBean {} found to be already registered", mbeanName);
-
- try {
- unregisterMBean(mbeanName);
- } catch (Exception e) {
-
- LOG.warn("unregister mbean {} resulted in exception {} ", mbeanName,
- e);
- }
- }
- server.registerMBean(this, mbeanName);
-
- LOG.debug("MBean {} registered successfully",
- mbeanName.getCanonicalName());
- registered = true;
- } catch (Exception e) {
-
- LOG.error("registration failed {}", e);
-
- }
- return registered;
- }
-
-
- public boolean unregisterMBean() {
- boolean unregister = false;
- try {
- ObjectName mbeanName = this.getMBeanObjectName();
- unregister = true;
- unregisterMBean(mbeanName);
- } catch (Exception e) {
-
- LOG.error("Failed when unregistering MBean {}", e);
- }
- return unregister;
- }
-
- private void unregisterMBean(ObjectName mbeanName)
- throws MBeanRegistrationException, InstanceNotFoundException {
-
- server.unregisterMBean(mbeanName);
-
- }
-
-
- /**
- * @return name of bean
- */
- protected abstract String getMBeanName();
-
- /**
- * @return type of the MBean
- */
- protected abstract String getMBeanType();
-
-
- /**
- * @return Category name of teh bean
- */
- protected abstract String getMBeanCategory();
-
- //require for test cases
- public MBeanServer getMBeanServer() {
- return server;
- }
-}
*/
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
/**
* @author Basheeruddin syedbahm@cisco.com
*
*/
public class ShardMBeanFactory {
- private static Map<String, ShardStats> shardMBeans =
- new HashMap<String, ShardStats>();
- public static ShardStats getShardStatsMBean(String shardName) {
- if (shardMBeans.containsKey(shardName)) {
- return shardMBeans.get(shardName);
- } else {
- ShardStats shardStatsMBeanImpl = new ShardStats(shardName);
+ private static final Logger LOG = LoggerFactory.getLogger(ShardMBeanFactory.class);
- if (shardStatsMBeanImpl.registerMBean()) {
- shardMBeans.put(shardName, shardStatsMBeanImpl);
- }
- return shardStatsMBeanImpl;
+ private static Cache<String,ShardStats> shardMBeansCache =
+ CacheBuilder.newBuilder().weakValues().build();
+
+ public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType) {
+ final String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
+ try {
+ return shardMBeansCache.get(shardName, new Callable<ShardStats>() {
+ @Override
+ public ShardStats call() throws Exception {
+ ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType);
+ shardStatsMBeanImpl.registerMBean();
+ return shardStatsMBeanImpl;
+ }
+ });
+ } catch(ExecutionException e) {
+ LOG.error(String.format("Could not create MXBean for shard: %s", shardName), e);
+ // Just return an instance that isn't registered.
+ return new ShardStats(shardName, finalMXBeanType);
}
}
-
}
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStats;
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
+import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
+import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
+ * Maintains statistics for a shard.
+ *
* @author Basheeruddin syedbahm@cisco.com
*/
-public class ShardStats extends AbstractBaseMBean implements ShardStatsMBean {
+public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
+ public static String JMX_CATEGORY_SHARD = "Shards";
- private final String shardName;
+ private final AtomicLong committedTransactionsCount = new AtomicLong();
- private long committedTransactionsCount = 0L;
+ private final AtomicLong readOnlyTransactionCount = new AtomicLong();
- private long readOnlyTransactionCount = 0L;
+ private final AtomicLong writeOnlyTransactionCount = new AtomicLong();
- private long writeOnlyTransactionCount = 0L;
-
- private long readWriteTransactionCount = 0L;
+ private final AtomicLong readWriteTransactionCount = new AtomicLong();
private String leader;
private String raftState;
- private long lastLogTerm = -1L;
+ private volatile long lastLogTerm = -1L;
+
+ private volatile long lastLogIndex = -1L;
- private long lastLogIndex = -1L;
+ private volatile long currentTerm = -1L;
- private long currentTerm = -1L;
+ private volatile long commitIndex = -1L;
- private long commitIndex = -1L;
+ private volatile long lastApplied = -1L;
- private long lastApplied = -1L;
+ private volatile long lastCommittedTransactionTime;
- private Date lastCommittedTransactionTime = new Date(0L);
+ private final AtomicLong failedTransactionsCount = new AtomicLong();
- private long failedTransactionsCount = 0L;
+ private final AtomicLong failedReadTransactionsCount = new AtomicLong();
- private long failedReadTransactionsCount = 0L;
+ private final AtomicLong abortTransactionsCount = new AtomicLong();
- private long abortTransactionsCount = 0L;
+ private ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean;
- private SimpleDateFormat sdf =
+ private ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean;
+
+ private QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
+
+ private final SimpleDateFormat sdf =
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
- ShardStats(String shardName) {
- this.shardName = shardName;
+ public ShardStats(String shardName, String mxBeanType) {
+ super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
+ }
+
+ public void setDataStoreExecutor(ExecutorService dsExecutor) {
+ this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor,
+ "notification-executor", getMBeanType(), getMBeanCategory());
}
+ public void setNotificationManager(QueuedNotificationManager<?, ?> manager) {
+ this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
+ "notification-manager", getMBeanType(), getMBeanCategory());
+
+ this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+ "data-store-executor", getMBeanType(), getMBeanCategory());
+ }
@Override
public String getShardName() {
- return shardName;
+ return getMBeanName();
}
@Override
public long getCommittedTransactionsCount() {
- return committedTransactionsCount;
+ return committedTransactionsCount.get();
}
- @Override public String getLeader() {
+ @Override
+ public String getLeader() {
return leader;
}
- @Override public String getRaftState() {
+ @Override
+ public String getRaftState() {
return raftState;
}
- @Override public long getReadOnlyTransactionCount() {
- return readOnlyTransactionCount;
+ @Override
+ public long getReadOnlyTransactionCount() {
+ return readOnlyTransactionCount.get();
}
- @Override public long getWriteOnlyTransactionCount() {
- return writeOnlyTransactionCount;
+ @Override
+ public long getWriteOnlyTransactionCount() {
+ return writeOnlyTransactionCount.get();
}
- @Override public long getReadWriteTransactionCount() {
- return readWriteTransactionCount;
+ @Override
+ public long getReadWriteTransactionCount() {
+ return readWriteTransactionCount.get();
}
- @Override public long getLastLogIndex() {
+ @Override
+ public long getLastLogIndex() {
return lastLogIndex;
}
- @Override public long getLastLogTerm() {
+ @Override
+ public long getLastLogTerm() {
return lastLogTerm;
}
- @Override public long getCurrentTerm() {
+ @Override
+ public long getCurrentTerm() {
return currentTerm;
}
- @Override public long getCommitIndex() {
+ @Override
+ public long getCommitIndex() {
return commitIndex;
}
- @Override public long getLastApplied() {
+ @Override
+ public long getLastApplied() {
return lastApplied;
}
@Override
public String getLastCommittedTransactionTime() {
- return sdf.format(lastCommittedTransactionTime);
+ return sdf.format(new Date(lastCommittedTransactionTime));
}
- @Override public long getFailedTransactionsCount() {
- return failedTransactionsCount;
+ @Override
+ public long getFailedTransactionsCount() {
+ return failedTransactionsCount.get();
}
- @Override public long getFailedReadTransactionsCount() {
- return failedReadTransactionsCount;
+ @Override
+ public long getFailedReadTransactionsCount() {
+ return failedReadTransactionsCount.get();
}
- @Override public long getAbortTransactionsCount() {
- return abortTransactionsCount;
+ @Override
+ public long getAbortTransactionsCount() {
+ return abortTransactionsCount.get();
}
public long incrementCommittedTransactionCount() {
- return committedTransactionsCount++;
+ return committedTransactionsCount.incrementAndGet();
}
public long incrementReadOnlyTransactionCount() {
- return readOnlyTransactionCount++;
+ return readOnlyTransactionCount.incrementAndGet();
}
public long incrementWriteOnlyTransactionCount() {
- return writeOnlyTransactionCount++;
+ return writeOnlyTransactionCount.incrementAndGet();
}
public long incrementReadWriteTransactionCount() {
- return readWriteTransactionCount++;
+ return readWriteTransactionCount.incrementAndGet();
}
public long incrementFailedTransactionsCount() {
- return failedTransactionsCount++;
+ return failedTransactionsCount.incrementAndGet();
}
public long incrementFailedReadTransactionsCount() {
- return failedReadTransactionsCount++;
+ return failedReadTransactionsCount.incrementAndGet();
}
- public long incrementAbortTransactionsCount () { return abortTransactionsCount++;}
+ public long incrementAbortTransactionsCount ()
+ {
+ return abortTransactionsCount.incrementAndGet();
+ }
public void setLeader(String leader) {
this.leader = leader;
this.lastApplied = lastApplied;
}
-
- public void setLastCommittedTransactionTime(
- Date lastCommittedTransactionTime) {
+ public void setLastCommittedTransactionTime(long lastCommittedTransactionTime) {
this.lastCommittedTransactionTime = lastCommittedTransactionTime;
}
@Override
- protected String getMBeanName() {
- return shardName;
+ public ThreadExecutorStats getDataStoreExecutorStats() {
+ return dataStoreExecutorStatsBean.toThreadExecutorStats();
+ }
+
+ @Override
+ public ThreadExecutorStats getNotificationMgrExecutorStats() {
+ return notificationExecutorStatsBean.toThreadExecutorStats();
}
@Override
- protected String getMBeanType() {
- return JMX_TYPE_DISTRIBUTED_DATASTORE;
+ public List<ListenerNotificationQueueStats> getCurrentNotificationMgrListenerQueueStats() {
+ return notificationManagerStatsBean.getCurrentListenerQueueStats();
}
@Override
- protected String getMBeanCategory() {
- return JMX_CATEGORY_SHARD;
+ public int getMaxNotificationMgrListenerQueueSize() {
+ return notificationManagerStatsBean.getMaxListenerQueueSize();
}
/**
* resets the counters related to transactions
*/
-
+ @Override
public void resetTransactionCounters(){
- committedTransactionsCount = 0L;
+ committedTransactionsCount.set(0);
- readOnlyTransactionCount = 0L;
+ readOnlyTransactionCount.set(0);
- writeOnlyTransactionCount = 0L;
+ writeOnlyTransactionCount.set(0);
- readWriteTransactionCount = 0L;
+ readWriteTransactionCount.set(0);
- lastCommittedTransactionTime = new Date(0L);
+ lastCommittedTransactionTime = 0;
- failedTransactionsCount = 0L;
+ failedTransactionsCount.set(0);
- failedReadTransactionsCount = 0L;
+ failedReadTransactionsCount.set(0);
- abortTransactionsCount = 0L;
+ abortTransactionsCount.set(0);
}
-
-
}
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
-
-/**
- * @author: syedbahm
- */
-public interface ShardStatsMBean {
- String getShardName();
-
- long getCommittedTransactionsCount();
-
- String getLeader();
-
- String getRaftState();
-
- long getReadOnlyTransactionCount();
-
- long getWriteOnlyTransactionCount();
-
- long getReadWriteTransactionCount();
-
- long getLastLogIndex();
-
- long getLastLogTerm();
-
- long getCurrentTerm();
-
- long getCommitIndex();
-
- long getLastApplied();
-
- String getLastCommittedTransactionTime();
-
- long getFailedTransactionsCount();
-
- long getFailedReadTransactionsCount();
-
- long getAbortTransactionsCount();
-
- void resetTransactionCounters();
-
-}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStats;
+import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
+
+/**
+ * @author: syedbahm
+ */
+public interface ShardStatsMXBean {
+
+ String getShardName();
+
+ long getCommittedTransactionsCount();
+
+ long getReadOnlyTransactionCount();
+
+ long getWriteOnlyTransactionCount();
+
+ long getReadWriteTransactionCount();
+
+ long getLastLogIndex();
+
+ long getLastLogTerm();
+
+ long getCurrentTerm();
+
+ long getCommitIndex();
+
+ long getLastApplied();
+
+ String getLastCommittedTransactionTime();
+
+ long getFailedTransactionsCount();
+
+ long getAbortTransactionsCount();
+
+ long getFailedReadTransactionsCount();
+
+ String getLeader();
+
+ String getRaftState();
+
+ ThreadExecutorStats getDataStoreExecutorStats();
+
+ ThreadExecutorStats getNotificationMgrExecutorStats();
+
+ List<ListenerNotificationQueueStats> getCurrentNotificationMgrListenerQueueStats();
+
+ int getMaxNotificationMgrListenerQueueSize();
+
+ void resetTransactionCounters();
+}
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
-
import java.util.List;
-public class ShardManagerInfo extends AbstractBaseMBean implements
- ShardManagerInfoMBean {
-
- private final String name;
- private final List<String> localShards;
-
- public ShardManagerInfo(String name, List<String> localShards) {
- this.name = name;
- this.localShards = localShards;
- }
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+public class ShardManagerInfo extends AbstractMXBean implements ShardManagerInfoMBean {
- @Override protected String getMBeanName() {
- return name;
- }
+ public static String JMX_CATEGORY_SHARD_MANAGER = "ShardManager";
- @Override protected String getMBeanType() {
- return JMX_TYPE_DISTRIBUTED_DATASTORE;
- }
+ private final List<String> localShards;
- @Override protected String getMBeanCategory() {
- return JMX_CATEGORY_SHARD_MANAGER;
+ public ShardManagerInfo(String name, String mxBeanType, List<String> localShards) {
+ super(name, mxBeanType, JMX_CATEGORY_SHARD_MANAGER);
+ this.localShards = localShards;
}
- public static ShardManagerInfo createShardManagerMBean(String name, List<String> localShards){
- ShardManagerInfo shardManagerInfo = new ShardManagerInfo(name,
- localShards);
+ public static ShardManagerInfo createShardManagerMBean(String name, String mxBeanType,
+ List<String> localShards){
+ ShardManagerInfo shardManagerInfo = new ShardManagerInfo(name, mxBeanType, localShards);
shardManagerInfo.registerMBean();
return shardManagerInfo;
}
- @Override public List<String> getLocalShards() {
+ @Override
+ public List<String> getLocalShards() {
return localShards;
}
}
package org.opendaylight.controller.cluster.datastore.messages;
+import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
ShardTransactionMessages.ReadDataReply o = (ShardTransactionMessages.ReadDataReply) serializable;
return new ReadDataReply(schemaContext,new NormalizedNodeToNodeCodec(schemaContext).decode(id, o.getNormalizedNode()));
}
+
+ public static ByteString getNormalizedNodeByteString(Object serializable){
+ ShardTransactionMessages.ReadDataReply o = (ShardTransactionMessages.ReadDataReply) serializable;
+ return ((ShardTransactionMessages.ReadDataReply) serializable).getNormalizedNode().toByteString();
+ }
}
PersistentMessages.CompositeModification.Builder builder =
PersistentMessages.CompositeModification.newBuilder();
+ builder.setTimeStamp(System.nanoTime());
+
for (Modification m : modifications) {
builder.addModification(
(PersistentMessages.Modification) m.toSerializable());
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreProperties;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+import org.osgi.framework.BundleContext;
+
+import scala.concurrent.duration.Duration;
public class DistributedConfigDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedConfigDataStoreProviderModule {
+ private BundleContext bundleContext;
+
public DistributedConfigDataStoreProviderModule(
org.opendaylight.controller.config.api.ModuleIdentifier identifier,
org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
props = new ConfigProperties();
}
- return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
- new DistributedDataStoreProperties(
+ DatastoreContext datastoreContext = new DatastoreContext("DistributedConfigDatastore",
+ InMemoryDOMDataStoreConfigProperties.create(
props.getMaxShardDataChangeExecutorPoolSize().getValue(),
props.getMaxShardDataChangeExecutorQueueSize().getValue(),
props.getMaxShardDataChangeListenerQueueSize().getValue(),
- props.getShardTransactionIdleTimeoutInMinutes().getValue(),
- props.getOperationTimeoutInSeconds().getValue()));
+ props.getMaxShardDataStoreExecutorQueueSize().getValue()),
+ Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
+ TimeUnit.MINUTES),
+ props.getOperationTimeoutInSeconds().getValue());
+
+ return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
+ datastoreContext, bundleContext);
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
}
}
* Do not modify this file unless it is present under src/main directory
*/
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
public class DistributedConfigDataStoreProviderModuleFactory extends org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedConfigDataStoreProviderModuleFactory {
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ DistributedConfigDataStoreProviderModule module = (DistributedConfigDataStoreProviderModule)super.createModule(instanceName,dependencyResolver,bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ DistributedConfigDataStoreProviderModule module = (DistributedConfigDataStoreProviderModule)super.createModule(instanceName, dependencyResolver,
+ old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+
}
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreProperties;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+import org.osgi.framework.BundleContext;
+
+import scala.concurrent.duration.Duration;
public class DistributedOperationalDataStoreProviderModule extends
org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedOperationalDataStoreProviderModule {
+ private BundleContext bundleContext;
+
public DistributedOperationalDataStoreProviderModule(
org.opendaylight.controller.config.api.ModuleIdentifier identifier,
org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
props = new OperationalProperties();
}
- return DistributedDataStoreFactory.createInstance("operational",
- getOperationalSchemaServiceDependency(),
- new DistributedDataStoreProperties(
+ DatastoreContext datastoreContext = new DatastoreContext("DistributedOperationalDatastore",
+ InMemoryDOMDataStoreConfigProperties.create(
props.getMaxShardDataChangeExecutorPoolSize().getValue(),
props.getMaxShardDataChangeExecutorQueueSize().getValue(),
props.getMaxShardDataChangeListenerQueueSize().getValue(),
- props.getShardTransactionIdleTimeoutInMinutes().getValue(),
- props.getOperationTimeoutInSeconds().getValue()));
+ props.getMaxShardDataStoreExecutorQueueSize().getValue()),
+ Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
+ TimeUnit.MINUTES),
+ props.getOperationTimeoutInSeconds().getValue());
+
+ return DistributedDataStoreFactory.createInstance("operational",
+ getOperationalSchemaServiceDependency(), datastoreContext, bundleContext);
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
}
}
* Do not modify this file unless it is present under src/main directory
*/
package org.opendaylight.controller.config.yang.config.distributed_datastore_provider;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
public class DistributedOperationalDataStoreProviderModuleFactory extends org.opendaylight.controller.config.yang.config.distributed_datastore_provider.AbstractDistributedOperationalDataStoreProviderModuleFactory {
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ DistributedOperationalDataStoreProviderModule module = (DistributedOperationalDataStoreProviderModule)super.createModule(instanceName,dependencyResolver,bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ DistributedOperationalDataStoreProviderModule module = (DistributedOperationalDataStoreProviderModule)super.createModule(instanceName, dependencyResolver,
+ old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
}
type non-zero-uint16-type;
description "The maximum queue size for each shard's data store data change listeners.";
}
-
+
+ leaf max-shard-data-store-executor-queue-size {
+ default 5000;
+ type non-zero-uint16-type;
+ description "The maximum queue size for each shard's data store executor.";
+ }
+
leaf shard-transaction-idle-timeout-in-minutes {
default 10;
type non-zero-uint16-type;
@BeforeClass
public static void setUpClass() throws IOException {
- File journal = new File("journal");
-
- if(journal.exists()) {
- FileUtils.deleteDirectory(journal);
- }
System.setProperty("shard.persistent", "false");
system = ActorSystem.create("test");
public static void tearDownClass() throws IOException {
JavaTestKit.shutdownActorSystem(system);
system = null;
+ }
+ protected static void deletePersistenceFiles() throws IOException {
File journal = new File("journal");
if(journal.exists()) {
FileUtils.deleteDirectory(journal);
}
+
+ File snapshots = new File("snapshots");
+
+ if(snapshots.exists()){
+ FileUtils.deleteDirectory(snapshots);
+ }
+
}
protected ActorSystem getSystem() {
final SchemaContext schemaContext = TestModel.createTestContext();
DatastoreContext datastoreContext = new DatastoreContext();
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, datastoreContext);
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext());
final ActorRef shard = getSystem().actorOf(props);
new Within(duration("10 seconds")) {
final DistributedDataStore distributedDataStore =
new DistributedDataStore(getSystem(), "config",
new MockClusterWrapper(), configuration,
- new DistributedDataStoreProperties());
+ new DatastoreContext());
distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
new DistributedDataStore(actorSystem, "config",
mock(ClusterWrapper.class), mock(Configuration.class),
- new DistributedDataStoreProperties());
+ new DatastoreContext());
verify(actorSystem).actorOf(any(Props.class), eq("shardmanager-config"));
}
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import scala.concurrent.duration.Duration;
import static junit.framework.Assert.assertEquals;
final TestActorRef<ShardManager> subject =
TestActorRef.create(system, props);
+ subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
new Within(duration("10 seconds")) {
@Override
protected void run() {
final TestActorRef<ShardManager> subject =
TestActorRef.create(system, props);
+ subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
new Within(duration("10 seconds")) {
@Override
protected void run() {
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.event.Logging;
import akka.testkit.JavaTestKit;
-
+import akka.testkit.TestActorRef;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.ExecutionException;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class ShardTest extends AbstractActorTest {
ShardIdentifier.builder().memberName("member-1")
.shardName("inventory").type("config").build();
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT);
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testCreateTransactionChain");
ShardIdentifier.builder().memberName("member-1")
.shardName("inventory").type("config").build();
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT);
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testRegisterChangeListener");
ShardIdentifier.builder().memberName("member-1")
.shardName("inventory").type("config").build();
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT);
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testCreateTransaction");
.shardName("inventory").type("config").build();
peerAddresses.put(identifier, null);
- final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT);
+ final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testPeerAddressResolved");
}};
}
+ @Test
+ public void testApplySnapshot() throws ExecutionException, InterruptedException {
+ Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ peerAddresses.put(identifier, null);
+ final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
+
+ TestActorRef<Shard> ref = TestActorRef.create(getSystem(), props);
+
+ ref.underlyingActor().updateSchemaContext(TestModel.createTestContext());
+
+ NormalizedNodeToNodeCodec codec =
+ new NormalizedNodeToNodeCodec(TestModel.createTestContext());
+
+ ref.underlyingActor().writeToStore(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ NormalizedNode expected = ref.underlyingActor().readStore();
+
+ NormalizedNodeMessages.Container encode = codec
+ .encode(YangInstanceIdentifier.builder().build(), expected);
+
+
+ ref.underlyingActor().applySnapshot(encode.getNormalizedNode().toByteString());
+
+ NormalizedNode actual = ref.underlyingActor().readStore();
+
+ assertEquals(expected, actual);
+ }
+
+ private static class ShardTestKit extends JavaTestKit {
+
+ private ShardTestKit(ActorSystem actorSystem) {
+ super(actorSystem);
+ }
+
+ protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
+ // Wait for a specific log message to show up
+ final boolean result =
+ new JavaTestKit.EventFilter<Boolean>(logLevel
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(subject.path().toString())
+ .message(logMessage)
+ .occurrences(1).exec();
+
+ Assert.assertEquals(true, result);
+
+ }
+
+ }
+
+ @Test
+ public void testCreateSnapshot() throws IOException, InterruptedException {
+ new ShardTestKit(getSystem()) {{
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
+ final ActorRef subject =
+ getSystem().actorOf(props, "testCreateSnapshot");
+
+ // Wait for a specific log message to show up
+ this.waitForLogMessage(Logging.Info.class, subject, "Switching from state Candidate to Leader");
+
+
+ new Within(duration("3 seconds")) {
+ @Override
+ protected void run() {
+
+ subject.tell(
+ new UpdateSchemaContext(TestModel.createTestContext()),
+ getRef());
+
+ subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
+ getRef());
+
+ waitForLogMessage(Logging.Debug.class, subject, "CaptureSnapshotReply received by actor");
+ }
+ };
+
+ Thread.sleep(2000);
+ deletePersistenceFiles();
+ }};
+ }
+
+ /**
+ * This test simply verifies that the applySnapShot logic will work
+ * @throws ReadFailedException
+ */
+ @Test
+ public void testInMemoryDataStoreRestore() throws ReadFailedException {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", MoreExecutors.listeningDecorator(
+ MoreExecutors.sameThreadExecutor()), MoreExecutors.sameThreadExecutor());
+
+ store.onGlobalContextUpdated(TestModel.createTestContext());
+
+ DOMStoreWriteTransaction putTransaction = store.newWriteOnlyTransaction();
+ putTransaction.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ commitTransaction(putTransaction);
+
+
+ NormalizedNode expected = readStore(store);
+
+ DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
+
+ writeTransaction.delete(YangInstanceIdentifier.builder().build());
+ writeTransaction.write(YangInstanceIdentifier.builder().build(), expected);
+
+ commitTransaction(writeTransaction);
+
+ NormalizedNode actual = readStore(store);
+
+ assertEquals(expected, actual);
+
+ }
+
+ private NormalizedNode readStore(InMemoryDOMDataStore store) throws ReadFailedException {
+ DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
+ transaction.read(YangInstanceIdentifier.builder().build());
+
+ Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
+
+ NormalizedNode<?, ?> normalizedNode = optional.get();
+
+ transaction.close();
+
+ return normalizedNode;
+ }
+
+ private void commitTransaction(DOMStoreWriteTransaction transaction) {
+ DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
+ ListenableFuture<Void> future =
+ commitCohort.preCommit();
+ try {
+ future.get();
+ future = commitCohort.commit();
+ future.get();
+ } catch (InterruptedException | ExecutionException e) {
+ }
+ }
+
private AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> noOpDataChangeListener() {
return new AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>() {
@Override
import org.junit.BeforeClass;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChainReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
private static final String mockShardName = "mockShardName";
+ private final ShardStats shardStats = new ShardStats(mockShardName, "DataStore");
+
@BeforeClass
public static void staticSetup() {
store.onGlobalContextUpdated(testSchemaContext);
public void testOnReceiveCreateTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final Props props = ShardTransactionChain.props(store.createTransactionChain(),
- testSchemaContext, DATA_STORE_CONTEXT, mockShardName);
+ testSchemaContext, DATA_STORE_CONTEXT, shardStats);
final ActorRef subject = getSystem().actorOf(props, "testCreateTransaction");
new Within(duration("1 seconds")) {
public void testOnReceiveCloseTransactionChain() throws Exception {
new JavaTestKit(getSystem()) {{
final Props props = ShardTransactionChain.props(store.createTransactionChain(),
- testSchemaContext, DATA_STORE_CONTEXT,mockShardName );
+ testSchemaContext, DATA_STORE_CONTEXT, shardStats );
final ActorRef subject = getSystem().actorOf(props, "testCloseTransactionChain");
new Within(duration("1 seconds")) {
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
private final DatastoreContext datastoreContext = new DatastoreContext();
+ private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
+
@BeforeClass
public static void staticSetup() {
store.onGlobalContextUpdated(testSchemaContext);
}
+ private ActorRef createShard(){
+ return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext(), TestModel.createTestContext()));
+ }
+
@Test(expected = ReadFailedException.class)
public void testNegativeReadWithReadOnlyTransactionClosed()
throws Throwable {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
public void testNegativeReadWithReadWriteTransactionClosed()
throws Throwable {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
public void testNegativeExistsWithReadWriteTransactionClosed()
throws Throwable {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
public void testNegativeWriteWithTransactionReady() throws Exception {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
public void testNegativeReadWriteWithTransactionReady() throws Exception {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
public void testNegativeMergeTransactionReady() throws Exception {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props, "testNegativeMergeTransactionReady");
public void testNegativeDeleteDataWhenTransactionReady() throws Exception {
- final ActorRef shard =
- getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
private DatastoreContext datastoreContext = new DatastoreContext();
+ private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
+
@BeforeClass
public static void staticSetup() {
store.onGlobalContextUpdated(testSchemaContext);
}
+ private ActorRef createShard(){
+ return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
+ Collections.EMPTY_MAP, new DatastoreContext(), TestModel.createTestContext()));
+ }
+
@Test
public void testOnReceiveReadData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject = getSystem().actorOf(props, "testReadData");
new Within(duration("1 seconds")) {
@Test
public void testOnReceiveReadDataWhenDataNotFound() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject = getSystem().actorOf(props, "testReadDataWhenDataNotFound");
new Within(duration("1 seconds")) {
@Test
public void testOnReceiveDataExistsPositive() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject = getSystem().actorOf(props, "testDataExistsPositive");
new Within(duration("1 seconds")) {
@Test
public void testOnReceiveDataExistsNegative() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject = getSystem().actorOf(props, "testDataExistsNegative");
new Within(duration("1 seconds")) {
@Test
public void testOnReceiveWriteData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject =
getSystem().actorOf(props, "testWriteData");
@Test
public void testOnReceiveMergeData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject =
getSystem().actorOf(props, "testMergeData");
@Test
public void testOnReceiveDeleteData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject =
getSystem().actorOf(props, "testDeleteData");
@Test
public void testOnReceiveReadyTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject =
getSystem().actorOf(props, "testReadyTransaction");
@Test
public void testOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject =
getSystem().actorOf(props, "testCloseTransaction");
@Test(expected=UnknownMessageException.class)
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final TestActorRef subject = TestActorRef.apply(props,getSystem());
subject.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(), ActorRef.noSender());
@Test
public void testShardTransactionInactivity() {
- datastoreContext = new DatastoreContext(InMemoryDOMDataStoreConfigProperties.getDefault(),
- Duration.create(500, TimeUnit.MILLISECONDS));
+ datastoreContext = new DatastoreContext("Test",
+ InMemoryDOMDataStoreConfigProperties.getDefault(),
+ Duration.create(500, TimeUnit.MILLISECONDS), 5);
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext()));
+ final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString());
+ testSchemaContext, datastoreContext, shardStats);
final ActorRef subject =
getSystem().actorOf(props, "testShardTransactionInactivity");
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
private final DatastoreContext datastoreContext = new DatastoreContext();
+ private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
@BeforeClass
public static void staticSetup() {
private final FiniteDuration ASK_RESULT_DURATION = Duration.create(5000, TimeUnit.MILLISECONDS);
+ private ActorRef createShard(){
+ return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext()));
+ }
@Test(expected = TestException.class)
public void testNegativeAbortResultsInException() throws Exception {
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, datastoreContext));
+ final ActorRef shard = createShard();
final DOMStoreThreePhaseCommitCohort mockCohort = Mockito
.mock(DOMStoreThreePhaseCommitCohort.class);
final CompositeModification mockComposite =
Mockito.mock(CompositeModification.class);
final Props props =
- ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite,SHARD_IDENTIFIER.toString());
+ ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite, shardStats);
final TestActorRef<ThreePhaseCommitCohort> subject = TestActorRef
.create(getSystem(), props,
@Test(expected = OptimisticLockFailedException.class)
public void testNegativeCanCommitResultsInException() throws Exception {
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, datastoreContext));
+ final ActorRef shard = createShard();
final DOMStoreThreePhaseCommitCohort mockCohort = Mockito
.mock(DOMStoreThreePhaseCommitCohort.class);
final CompositeModification mockComposite =
Mockito.mock(CompositeModification.class);
final Props props =
- ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite,SHARD_IDENTIFIER.toString());
+ ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite, shardStats);
final TestActorRef<ThreePhaseCommitCohort> subject = TestActorRef
.create(getSystem(), props,
@Test(expected = TestException.class)
public void testNegativePreCommitResultsInException() throws Exception {
- final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, datastoreContext));
+ final ActorRef shard = createShard();
final DOMStoreThreePhaseCommitCohort mockCohort = Mockito
.mock(DOMStoreThreePhaseCommitCohort.class);
final CompositeModification mockComposite =
Mockito.mock(CompositeModification.class);
final Props props =
- ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite,SHARD_IDENTIFIER.toString());
+ ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite, shardStats);
final TestActorRef<ThreePhaseCommitCohort> subject = TestActorRef
.create(getSystem(), props,
public void testNegativeCommitResultsInException() throws Exception {
final TestActorRef<Shard> subject = TestActorRef.create(getSystem(),
- Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext),
+ Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext()),
"testNegativeCommitResultsInException");
final ActorRef shardTransaction =
getSystem().actorOf(ShardTransaction.props(store.newReadWriteTransaction(), subject,
- testSchemaContext, datastoreContext,SHARD_IDENTIFIER.toString()));
+ testSchemaContext, datastoreContext, shardStats));
ShardTransactionMessages.WriteData writeData =
ShardTransactionMessages.WriteData.newBuilder()
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import javax.management.MBeanServer;
import javax.management.ObjectName;
+
+import java.lang.management.ManagementFactory;
import java.text.SimpleDateFormat;
import java.util.Date;
@Before
public void setUp() throws Exception {
- shardStats = new ShardStats("shard-1");
+ shardStats = new ShardStats("shard-1", "DataStore");
shardStats.registerMBean();
- mbeanServer = shardStats.getMBeanServer();
+ mbeanServer = ManagementFactory.getPlatformMBeanServer();
String objectName =
- AbstractBaseMBean.BASE_JMX_PREFIX + "type=" + shardStats
+ AbstractMXBean.BASE_JMX_PREFIX + "type=" + shardStats
.getMBeanType() + ",Category=" +
shardStats.getMBeanCategory() + ",name=" +
shardStats.getMBeanName();
public void testGetShardName() throws Exception {
Object attribute = mbeanServer.getAttribute(testMBeanName, "ShardName");
- Assert.assertEquals((String) attribute, "shard-1");
+ Assert.assertEquals(attribute, "shard-1");
}
//now let us get from MBeanServer what is the transaction count.
Object attribute = mbeanServer.getAttribute(testMBeanName,
"CommittedTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 3L);
+ Assert.assertEquals(attribute, 3L);
}
Assert.assertEquals(shardStats.getLastCommittedTransactionTime(),
sdf.format(new Date(0L)));
long millis = System.currentTimeMillis();
- shardStats.setLastCommittedTransactionTime(new Date(millis));
+ shardStats.setLastCommittedTransactionTime(millis);
//now let us get from MBeanServer what is the transaction count.
Object attribute = mbeanServer.getAttribute(testMBeanName,
"LastCommittedTransactionTime");
- Assert.assertEquals((String) attribute, sdf.format(new Date(millis)));
- Assert.assertNotEquals((String) attribute,
+ Assert.assertEquals(attribute, sdf.format(new Date(millis)));
+ Assert.assertNotEquals(attribute,
sdf.format(new Date(millis - 1)));
}
//now let us get from MBeanServer what is the transaction count.
Object attribute =
mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 2L);
+ Assert.assertEquals(attribute, 2L);
}
@Test
//now let us get from MBeanServer what is the transaction count.
Object attribute =
mbeanServer.getAttribute(testMBeanName, "AbortTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 2L);
+ Assert.assertEquals(attribute, 2L);
}
@Test
//now let us get from MBeanServer what is the transaction count.
Object attribute =
mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 2L);
+ Assert.assertEquals(attribute, 2L);
}
@Test
//now let us get from MBeanServer what is the transaction count.
Object attribute = mbeanServer.getAttribute(testMBeanName,
"CommittedTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 3L);
+ Assert.assertEquals(attribute, 3L);
//let us increment FailedReadTransactions count and then check
shardStats.incrementFailedReadTransactionsCount();
//now let us get from MBeanServer what is the transaction count.
attribute =
mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 2L);
+ Assert.assertEquals(attribute, 2L);
//here we will reset the counters and check the above ones are 0 after reset
//now let us get from MBeanServer what is the transaction count.
attribute = mbeanServer.getAttribute(testMBeanName,
"CommittedTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 0L);
+ Assert.assertEquals(attribute, 0L);
attribute =
mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long) 0L);
+ Assert.assertEquals(attribute, 0L);
}
package org.opendaylight.controller.cluster.datastore.modification;
import com.google.common.base.Optional;
-import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+
public class MutableCompositeModificationTest extends AbstractModificationTest {
- @Test
- public void testApply() throws Exception {
+ @Test
+ public void testApply() throws Exception {
+
+ MutableCompositeModification compositeModification = new MutableCompositeModification();
+ compositeModification.addModification(new WriteModification(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()));
+
+ DOMStoreReadWriteTransaction transaction = store.newReadWriteTransaction();
+ compositeModification.apply(transaction);
+ commitTransaction(transaction);
+
+ Optional<NormalizedNode<?, ?>> data = readData(TestModel.TEST_PATH);
- MutableCompositeModification compositeModification = new MutableCompositeModification();
- compositeModification.addModification(new WriteModification(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()));
+ assertNotNull(data.get());
+ assertEquals(TestModel.TEST_QNAME, data.get().getNodeType());
+ }
- DOMStoreReadWriteTransaction transaction = store.newReadWriteTransaction();
- compositeModification.apply(transaction);
- commitTransaction(transaction);
+ @Test
+ public void testEverySerializedCompositeModificationObjectMustBeDifferent(){
+ MutableCompositeModification compositeModification = new MutableCompositeModification();
+ compositeModification.addModification(new WriteModification(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()));
- Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
+ assertNotEquals(compositeModification.toSerializable(), compositeModification.toSerializable());
- Assert.assertNotNull(data.get());
- Assert.assertEquals(TestModel.TEST_QNAME, data.get().getNodeType());
- }
+ }
}
<Export-package></Export-package>
<Private-Package></Private-Package>
<Import-Package>!org.iq80.*;!*snappy;!org.jboss.*;!com.jcraft.*;!org.fusesource.*;!*jetty*;!sun.security.*;*</Import-Package>
+ <!--
<Embed-Dependency>
sal-clustering-commons;
sal-akka-raft;
*uncommons*;
</Embed-Dependency>
<Embed-Transitive>true</Embed-Transitive>
+ -->
</instructions>
</configuration>
</plugin>
import akka.actor.ActorSystem;
import akka.osgi.BundleDelegatingClassLoader;
-import com.typesafe.config.ConfigFactory;
+import org.opendaylight.controller.remote.rpc.utils.AkkaConfigurationReader;
import org.osgi.framework.BundleContext;
public class ActorSystemFactory {
- private static volatile ActorSystem actorSystem = null;
+
+ public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-rpc";
+ public static final String CONFIGURATION_NAME = "odl-cluster-rpc";
+
+ private static volatile ActorSystem actorSystem = null;
public static final ActorSystem getInstance(){
return actorSystem;
*
* @param bundleContext
*/
- public static final void createInstance(final BundleContext bundleContext) {
+ public static final void createInstance(final BundleContext bundleContext, AkkaConfigurationReader akkaConfigurationReader) {
if(actorSystem == null) {
// Create an OSGi bundle classloader for actor system
BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
synchronized (ActorSystemFactory.class) {
// Double check
if (actorSystem == null) {
- ActorSystem system = ActorSystem.create("opendaylight-cluster-rpc",
- ConfigFactory.load().getConfig("odl-cluster-rpc"), classLoader);
+ ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
+ akkaConfigurationReader.read().getConfig(CONFIGURATION_NAME), classLoader);
actorSystem = system;
}
}
throw new IllegalStateException("Actor system should be created only once. Use getInstance method to access existing actor system");
}
}
+
}
package org.opendaylight.controller.remote.rpc;
+import static akka.pattern.Patterns.ask;
import akka.actor.ActorRef;
-import com.google.common.util.concurrent.Futures;
+import akka.dispatch.OnComplete;
+import akka.util.Timeout;
+
import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.remote.rpc.messages.ErrorResponse;
+import com.google.common.util.concurrent.SettableFuture;
+
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import scala.concurrent.ExecutionContext;
+
import java.util.Collections;
import java.util.Set;
-public class RemoteRpcImplementation implements RpcImplementation,
- RoutedRpcDefaultImplementation {
- private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcImplementation.class);
- private ActorRef rpcBroker;
- private SchemaContext schemaContext;
-
- public RemoteRpcImplementation(ActorRef rpcBroker, SchemaContext schemaContext) {
- this.rpcBroker = rpcBroker;
- this.schemaContext = schemaContext;
- }
-
- @Override
- public ListenableFuture<RpcResult<CompositeNode>> invokeRpc(QName rpc, YangInstanceIdentifier identifier, CompositeNode input) {
- InvokeRpc rpcMsg = new InvokeRpc(rpc, identifier, input);
-
- return executeMsg(rpcMsg);
- }
-
- @Override
- public Set<QName> getSupportedRpcs() {
- // TODO : check if we need to get this from routing registry
- return Collections.emptySet();
- }
-
- @Override
- public ListenableFuture<RpcResult<CompositeNode>> invokeRpc(QName rpc, CompositeNode input) {
- InvokeRpc rpcMsg = new InvokeRpc(rpc, null, input);
- return executeMsg(rpcMsg);
- }
-
- private ListenableFuture<RpcResult<CompositeNode>> executeMsg(Object rpcMsg) {
- ListenableFuture<RpcResult<CompositeNode>> listenableFuture = null;
-
- try {
- Object response = ActorUtil.executeOperation(rpcBroker, rpcMsg, ActorUtil.ASK_DURATION, ActorUtil.AWAIT_DURATION);
- if(response instanceof RpcResponse) {
-
- RpcResponse rpcResponse = (RpcResponse) response;
- CompositeNode result = XmlUtils.xmlToCompositeNode(rpcResponse.getResultCompositeNode());
- listenableFuture = Futures.immediateFuture(RpcResultBuilder.success(result).build());
-
- } else if(response instanceof ErrorResponse) {
-
- ErrorResponse errorResponse = (ErrorResponse) response;
- Exception e = errorResponse.getException();
- final RpcResultBuilder<CompositeNode> failed = RpcResultBuilder.failed();
- failed.withError(null, null, e.getMessage(), null, null, e.getCause());
- listenableFuture = Futures.immediateFuture(failed.build());
-
- }
- } catch (Exception e) {
- LOG.error("Error occurred while invoking RPC actor {}", e);
-
- final RpcResultBuilder<CompositeNode> failed = RpcResultBuilder.failed();
- failed.withError(null, null, e.getMessage(), null, null, e.getCause());
- listenableFuture = Futures.immediateFuture(failed.build());
+public class RemoteRpcImplementation implements RpcImplementation, RoutedRpcDefaultImplementation {
+ private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcImplementation.class);
+ private final ActorRef rpcBroker;
+ private final SchemaContext schemaContext;
+
+ public RemoteRpcImplementation(ActorRef rpcBroker, SchemaContext schemaContext) {
+ this.rpcBroker = rpcBroker;
+ this.schemaContext = schemaContext;
+ }
+
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> invokeRpc(QName rpc,
+ YangInstanceIdentifier identifier, CompositeNode input) {
+ InvokeRpc rpcMsg = new InvokeRpc(rpc, identifier, input);
+
+ return executeMsg(rpcMsg);
+ }
+
+ @Override
+ public Set<QName> getSupportedRpcs() {
+ // TODO : check if we need to get this from routing registry
+ return Collections.emptySet();
+ }
+
+ @Override
+ public ListenableFuture<RpcResult<CompositeNode>> invokeRpc(QName rpc, CompositeNode input) {
+ InvokeRpc rpcMsg = new InvokeRpc(rpc, null, input);
+ return executeMsg(rpcMsg);
}
- return listenableFuture;
- }
+ private ListenableFuture<RpcResult<CompositeNode>> executeMsg(InvokeRpc rpcMsg) {
+
+ final SettableFuture<RpcResult<CompositeNode>> listenableFuture = SettableFuture.create();
+
+ scala.concurrent.Future<Object> future = ask(rpcBroker, rpcMsg,
+ new Timeout(ActorUtil.ASK_DURATION));
+
+ OnComplete<Object> onComplete = new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object reply) throws Throwable {
+ if(failure != null) {
+ LOG.error("InvokeRpc failed", failure);
+
+ RpcResult<CompositeNode> rpcResult;
+ if(failure instanceof RpcErrorsException) {
+ rpcResult = RpcResultBuilder.<CompositeNode>failed().withRpcErrors(
+ ((RpcErrorsException)failure).getRpcErrors()).build();
+ } else {
+ rpcResult = RpcResultBuilder.<CompositeNode>failed().withError(
+ ErrorType.RPC, failure.getMessage(), failure).build();
+ }
+
+ listenableFuture.set(rpcResult);
+ return;
+ }
+
+ RpcResponse rpcReply = (RpcResponse)reply;
+ CompositeNode result = XmlUtils.xmlToCompositeNode(rpcReply.getResultCompositeNode());
+ listenableFuture.set(RpcResultBuilder.success(result).build());
+ }
+ };
+
+ future.onComplete(onComplete, ExecutionContext.Implicits$.MODULE$.global());
+
+ return listenableFuture;
+ }
}
package org.opendaylight.controller.remote.rpc;
+import org.opendaylight.controller.remote.rpc.utils.DefaultAkkaConfigurationReader;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
import org.osgi.framework.BundleContext;
public class RemoteRpcProviderFactory {
public static RemoteRpcProvider createInstance(final Broker broker, final BundleContext bundleContext){
- ActorSystemFactory.createInstance(bundleContext);
+ ActorSystemFactory.createInstance(bundleContext, new DefaultAkkaConfigurationReader());
RemoteRpcProvider rpcProvider =
new RemoteRpcProvider(ActorSystemFactory.getInstance(), (RpcProvisionRegistry) broker);
broker.registerProvider(rpcProvider);
package org.opendaylight.controller.remote.rpc;
+import static akka.pattern.Patterns.ask;
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.japi.Pair;
-import org.opendaylight.controller.remote.rpc.messages.ErrorResponse;
+import akka.util.Timeout;
+
import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
import org.opendaylight.controller.sal.core.api.Broker;
+import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+
+import java.util.Arrays;
+import java.util.Collection;
import java.util.List;
import java.util.concurrent.Future;
public class RpcBroker extends AbstractUntypedActor {
- private static final Logger LOG = LoggerFactory.getLogger(RpcBroker.class);
- private final Broker.ProviderSession brokerSession;
- private final ActorRef rpcRegistry;
- private SchemaContext schemaContext;
-
- private RpcBroker(Broker.ProviderSession brokerSession, ActorRef rpcRegistry, SchemaContext schemaContext){
- this.brokerSession = brokerSession;
- this.rpcRegistry = rpcRegistry;
- this.schemaContext = schemaContext;
- }
-
- public static Props props(final Broker.ProviderSession brokerSession, final ActorRef rpcRegistry, final SchemaContext schemaContext){
- return Props.create(new Creator<RpcBroker>(){
-
- @Override
- public RpcBroker create() throws Exception {
- return new RpcBroker(brokerSession, rpcRegistry, schemaContext);
- }
- });
- }
- @Override
- protected void handleReceive(Object message) throws Exception {
- if(message instanceof InvokeRpc) {
- invokeRemoteRpc((InvokeRpc) message);
- } else if(message instanceof ExecuteRpc) {
- executeRpc((ExecuteRpc) message);
+ private static final Logger LOG = LoggerFactory.getLogger(RpcBroker.class);
+ private final Broker.ProviderSession brokerSession;
+ private final ActorRef rpcRegistry;
+ private final SchemaContext schemaContext;
+
+ private RpcBroker(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
+ SchemaContext schemaContext) {
+ this.brokerSession = brokerSession;
+ this.rpcRegistry = rpcRegistry;
+ this.schemaContext = schemaContext;
}
- }
-
- private void invokeRemoteRpc(InvokeRpc msg) {
- // Look up the remote actor to execute rpc
- LOG.debug("Looking up the remote actor for route {}", msg);
- try {
- // Find router
- RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, msg.getRpc(), msg.getIdentifier());
- RpcRegistry.Messages.FindRouters rpcMsg = new RpcRegistry.Messages.FindRouters(routeId);
- RpcRegistry.Messages.FindRoutersReply rpcReply =
- (RpcRegistry.Messages.FindRoutersReply) ActorUtil.executeOperation(rpcRegistry, rpcMsg, ActorUtil.LOCAL_ASK_DURATION, ActorUtil.LOCAL_AWAIT_DURATION);
-
- List<Pair<ActorRef, Long>> actorRefList = rpcReply.getRouterWithUpdateTime();
-
- if(actorRefList == null || actorRefList.isEmpty()) {
- LOG.debug("No remote actor found for rpc {{}}.", msg.getRpc());
-
- getSender().tell(new ErrorResponse(
- new IllegalStateException("No remote actor found for rpc execution of : " + msg.getRpc())), self());
- } else {
- RoutingLogic logic = new LatestEntryRoutingLogic(actorRefList);
- ExecuteRpc executeMsg = new ExecuteRpc(XmlUtils.inputCompositeNodeToXml(msg.getInput(), schemaContext), msg.getRpc());
- Object operationRes = ActorUtil.executeOperation(logic.select(),
- executeMsg, ActorUtil.REMOTE_ASK_DURATION, ActorUtil.REMOTE_AWAIT_DURATION);
+ public static Props props(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
+ SchemaContext schemaContext) {
+ return Props.create(new RpcBrokerCreator(brokerSession, rpcRegistry, schemaContext));
+ }
- getSender().tell(operationRes, self());
- }
- } catch (Exception e) {
- LOG.error("invokeRemoteRpc: {}", e);
- getSender().tell(new ErrorResponse(e), self());
+ @Override
+ protected void handleReceive(Object message) throws Exception {
+ if(message instanceof InvokeRpc) {
+ invokeRemoteRpc((InvokeRpc) message);
+ } else if(message instanceof ExecuteRpc) {
+ executeRpc((ExecuteRpc) message);
+ }
}
- }
+ private void invokeRemoteRpc(final InvokeRpc msg) {
+ LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
+
+ RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(
+ null, msg.getRpc(), msg.getIdentifier());
+ RpcRegistry.Messages.FindRouters findMsg = new RpcRegistry.Messages.FindRouters(routeId);
+
+ scala.concurrent.Future<Object> future = ask(rpcRegistry, findMsg,
+ new Timeout(ActorUtil.LOCAL_ASK_DURATION));
+ final ActorRef sender = getSender();
+ final ActorRef self = self();
- private void executeRpc(ExecuteRpc msg) {
- LOG.debug("Executing rpc for rpc {}", msg.getRpc());
- try {
- Future<RpcResult<CompositeNode>> rpc = brokerSession.rpc(msg.getRpc(),
- XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(), schemaContext));
- RpcResult<CompositeNode> rpcResult = rpc != null ? rpc.get():null;
- CompositeNode result = rpcResult != null ? rpcResult.getResult() : null;
- getSender().tell(new RpcResponse(XmlUtils.outputCompositeNodeToXml(result, schemaContext)), self());
- } catch (Exception e) {
- LOG.error("executeRpc: {}", e);
- getSender().tell(new ErrorResponse(e), self());
+ OnComplete<Object> onComplete = new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object reply) throws Throwable {
+ if(failure != null) {
+ LOG.error("FindRouters failed", failure);
+ sender.tell(new akka.actor.Status.Failure(failure), self);
+ return;
+ }
+
+ RpcRegistry.Messages.FindRoutersReply findReply =
+ (RpcRegistry.Messages.FindRoutersReply)reply;
+
+ List<Pair<ActorRef, Long>> actorRefList = findReply.getRouterWithUpdateTime();
+
+ if(actorRefList == null || actorRefList.isEmpty()) {
+ String message = String.format(
+ "No remote implementation found for rpc %s", msg.getRpc());
+ sender.tell(new akka.actor.Status.Failure(new RpcErrorsException(
+ message, Arrays.asList(RpcResultBuilder.newError(ErrorType.RPC,
+ "operation-not-supported", message)))), self);
+ return;
+ }
+
+ finishInvokeRpc(actorRefList, msg, sender, self);
+ }
+ };
+
+ future.onComplete(onComplete, getContext().dispatcher());
}
- }
+ protected void finishInvokeRpc(final List<Pair<ActorRef, Long>> actorRefList,
+ final InvokeRpc msg, final ActorRef sender, final ActorRef self) {
+
+ RoutingLogic logic = new LatestEntryRoutingLogic(actorRefList);
+
+ ExecuteRpc executeMsg = new ExecuteRpc(XmlUtils.inputCompositeNodeToXml(msg.getInput(),
+ schemaContext), msg.getRpc());
+
+ scala.concurrent.Future<Object> future = ask(logic.select(), executeMsg,
+ new Timeout(ActorUtil.REMOTE_ASK_DURATION));
+
+ OnComplete<Object> onComplete = new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object reply) throws Throwable {
+ if(failure != null) {
+ LOG.error("ExecuteRpc failed", failure);
+ sender.tell(new akka.actor.Status.Failure(failure), self);
+ return;
+ }
+
+ sender.tell(reply, self);
+ }
+ };
+
+ future.onComplete(onComplete, getContext().dispatcher());
+ }
+
+ private void executeRpc(final ExecuteRpc msg) {
+ LOG.debug("Executing rpc {}", msg.getRpc());
+
+ Future<RpcResult<CompositeNode>> future = brokerSession.rpc(msg.getRpc(),
+ XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(),
+ schemaContext));
+
+ ListenableFuture<RpcResult<CompositeNode>> listenableFuture =
+ JdkFutureAdapters.listenInPoolThread(future);
+
+ final ActorRef sender = getSender();
+ final ActorRef self = self();
+
+ Futures.addCallback(listenableFuture, new FutureCallback<RpcResult<CompositeNode>>() {
+ @Override
+ public void onSuccess(RpcResult<CompositeNode> result) {
+ if(result.isSuccessful()) {
+ sender.tell(new RpcResponse(XmlUtils.outputCompositeNodeToXml(result.getResult(),
+ schemaContext)), self);
+ } else {
+ String message = String.format("Execution of RPC %s failed", msg.getRpc());
+ Collection<RpcError> errors = result.getErrors();
+ if(errors == null || errors.size() == 0) {
+ errors = Arrays.asList(RpcResultBuilder.newError(ErrorType.RPC,
+ null, message));
+ }
+
+ sender.tell(new akka.actor.Status.Failure(new RpcErrorsException(
+ message, errors)), self);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("executeRpc for {} failed: {}", msg.getRpc(), t);
+ sender.tell(new akka.actor.Status.Failure(t), self);
+ }
+ });
+ }
+
+ private static class RpcBrokerCreator implements Creator<RpcBroker> {
+ private static final long serialVersionUID = 1L;
+
+ final Broker.ProviderSession brokerSession;
+ final ActorRef rpcRegistry;
+ final SchemaContext schemaContext;
+
+ RpcBrokerCreator(ProviderSession brokerSession, ActorRef rpcRegistry,
+ SchemaContext schemaContext) {
+ this.brokerSession = brokerSession;
+ this.rpcRegistry = rpcRegistry;
+ this.schemaContext = schemaContext;
+ }
+
+ @Override
+ public RpcBroker create() throws Exception {
+ return new RpcBroker(brokerSession, rpcRegistry, schemaContext);
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+/**
+ * An Exception for transferring RpcErrors.
+ *
+ * @author Thomas Pantelis
+ */
+public class RpcErrorsException extends Exception {
+
+ private static final long serialVersionUID = 1L;
+
+ private static class RpcErrorData implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ final ErrorSeverity severity;
+ final ErrorType errorType;
+ final String tag;
+ final String applicationTag;
+ final String message;
+ final String info;
+ final Throwable cause;
+
+ RpcErrorData(ErrorSeverity severity, ErrorType errorType, String tag,
+ String applicationTag, String message, String info, Throwable cause) {
+ this.severity = severity;
+ this.errorType = errorType;
+ this.tag = tag;
+ this.applicationTag = applicationTag;
+ this.message = message;
+ this.info = info;
+ this.cause = cause;
+ }
+ }
+
+ private final List<RpcErrorData> rpcErrorDataList = new ArrayList<>();
+
+ public RpcErrorsException(String message, Iterable<RpcError> rpcErrors) {
+ super(message);
+
+ for(RpcError rpcError: rpcErrors) {
+ rpcErrorDataList.add(new RpcErrorData(rpcError.getSeverity(), rpcError.getErrorType(),
+ rpcError.getTag(), rpcError.getApplicationTag(), rpcError.getMessage(),
+ rpcError.getInfo(), rpcError.getCause()));
+ }
+ }
+
+ public Collection<RpcError> getRpcErrors() {
+ Collection<RpcError> rpcErrors = new ArrayList<>();
+ for(RpcErrorData ed: rpcErrorDataList) {
+ RpcError rpcError = ed.severity == ErrorSeverity.ERROR ?
+ RpcResultBuilder.newError(ed.errorType, ed.tag, ed.message, ed.applicationTag,
+ ed.info, ed.cause) :
+ RpcResultBuilder.newWarning(ed.errorType, ed.tag, ed.message, ed.applicationTag,
+ ed.info, ed.cause);
+ rpcErrors.add(rpcError);
+ }
+
+ return rpcErrors;
+ }
+}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.remote.rpc.messages;
-
-import com.google.common.base.Preconditions;
-
-import java.io.Serializable;
-
-public class ErrorResponse implements Serializable {
-
- private final Exception exception;
-
- public ErrorResponse(final Exception e) {
- Preconditions.checkNotNull(e, "Exception should be present for error message");
- this.exception = e;
- }
-
- public Exception getException() {
- return exception;
- }
-}
*/
package org.opendaylight.controller.remote.rpc.utils;
-import akka.actor.ActorRef;
-import akka.util.Timeout;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
import java.util.concurrent.TimeUnit;
-import static akka.pattern.Patterns.ask;
-
public class ActorUtil {
public static final FiniteDuration LOCAL_ASK_DURATION = Duration.create(2, TimeUnit.SECONDS);
public static final FiniteDuration REMOTE_ASK_DURATION = Duration.create(15, TimeUnit.SECONDS);
public static final FiniteDuration ASK_DURATION = Duration.create(17, TimeUnit.SECONDS);
- public static final FiniteDuration LOCAL_AWAIT_DURATION = Duration.create(2, TimeUnit.SECONDS);
- public static final FiniteDuration REMOTE_AWAIT_DURATION = Duration.create(15, TimeUnit.SECONDS);
- public static final FiniteDuration AWAIT_DURATION = Duration.create(17, TimeUnit.SECONDS);
public static final FiniteDuration GOSSIP_TICK_INTERVAL = Duration.create(500, TimeUnit.MILLISECONDS);
public static final String MAILBOX = "bounded-mailbox";
-
-
- /**
- * Executes an operation on a local actor and wait for it's response
- *
- * @param actor
- * @param message
- * @param askDuration
- * @param awaitDuration
- * @return The response of the operation
- */
- public static Object executeOperation(ActorRef actor, Object message,
- FiniteDuration askDuration, FiniteDuration awaitDuration) throws Exception {
- Future<Object> future =
- ask(actor, message, new Timeout(askDuration));
-
- return Await.result(future, awaitDuration);
- }
-
-
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.utils;
+
+import com.typesafe.config.Config;
+
+public interface AkkaConfigurationReader {
+ Config read();
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.utils;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+import java.io.File;
+
+public class DefaultAkkaConfigurationReader implements AkkaConfigurationReader {
+ public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+
+ @Override public Config read() {
+ File defaultConfigFile = new File(AKKA_CONF_PATH);
+ Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
+ return ConfigFactory.parseFile(defaultConfigFile);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.mockito.Mockito;
+import org.opendaylight.controller.sal.core.api.Broker;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
+import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.testkit.JavaTestKit;
+
+import com.google.common.collect.ImmutableList;
+import com.typesafe.config.ConfigFactory;
+
+/**
+ * Base class for RPC tests.
+ *
+ * @author Thomas Pantelis
+ */
+public class AbstractRpcTest {
+ static final String TEST_REV = "2014-08-28";
+ static final String TEST_NS = "urn:test";
+ static final URI TEST_URI = URI.create(TEST_NS);
+ static final QName TEST_RPC = QName.create(TEST_NS, TEST_REV, "test-rpc");
+ static final QName TEST_RPC_INPUT = QName.create(TEST_NS, TEST_REV, "input");
+ static final QName TEST_RPC_INPUT_DATA = QName.create(TEST_NS, TEST_REV, "input-data");
+ static final QName TEST_RPC_OUTPUT = QName.create(TEST_NS, TEST_REV, "output");
+ static final QName TEST_RPC_OUTPUT_DATA = new QName(TEST_URI, "output-data");
+
+ static ActorSystem node1;
+ static ActorSystem node2;
+
+ protected ActorRef rpcBroker1;
+ protected JavaTestKit probeReg1;
+ protected ActorRef rpcBroker2;
+ protected JavaTestKit probeReg2;
+ protected Broker.ProviderSession brokerSession;
+ protected SchemaContext schemaContext;
+
+ @BeforeClass
+ public static void setup() throws InterruptedException {
+ node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
+ node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
+ }
+
+ @AfterClass
+ public static void teardown() {
+ JavaTestKit.shutdownActorSystem(node1);
+ JavaTestKit.shutdownActorSystem(node2);
+ node1 = null;
+ node2 = null;
+ }
+
+ @Before
+ public void setUp() {
+ schemaContext = new YangParserImpl().parseFiles(Arrays.asList(
+ new File(RpcBrokerTest.class.getResource("/test-rpc.yang").getPath())));
+
+ brokerSession = Mockito.mock(Broker.ProviderSession.class);
+ probeReg1 = new JavaTestKit(node1);
+ rpcBroker1 = node1.actorOf(RpcBroker.props(brokerSession, probeReg1.getRef(), schemaContext));
+ probeReg2 = new JavaTestKit(node2);
+ rpcBroker2 = node2.actorOf(RpcBroker.props(brokerSession, probeReg2.getRef(), schemaContext));
+
+ }
+
+ static void assertRpcErrorEquals(RpcError rpcError, ErrorSeverity severity,
+ ErrorType errorType, String tag, String message, String applicationTag, String info,
+ String causeMsg) {
+ assertEquals("getSeverity", severity, rpcError.getSeverity());
+ assertEquals("getErrorType", errorType, rpcError.getErrorType());
+ assertEquals("getTag", tag, rpcError.getTag());
+ assertTrue("getMessage contains " + message, rpcError.getMessage().contains(message));
+ assertEquals("getApplicationTag", applicationTag, rpcError.getApplicationTag());
+ assertEquals("getInfo", info, rpcError.getInfo());
+
+ if(causeMsg == null) {
+ assertNull("Unexpected cause " + rpcError.getCause(), rpcError.getCause());
+ } else {
+ assertEquals("Cause message", causeMsg, rpcError.getCause().getMessage());
+ }
+ }
+
+ static void assertCompositeNodeEquals(CompositeNode exp, CompositeNode actual) {
+ assertEquals("NodeType getNamespace", exp.getNodeType().getNamespace(),
+ actual.getNodeType().getNamespace());
+ assertEquals("NodeType getLocalName", exp.getNodeType().getLocalName(),
+ actual.getNodeType().getLocalName());
+ for(Node<?> child: exp.getValue()) {
+ List<Node<?>> c = actual.get(child.getNodeType());
+ assertNotNull("Missing expected child " + child.getNodeType(), c);
+ if(child instanceof CompositeNode) {
+ assertCompositeNodeEquals((CompositeNode) child, (CompositeNode)c.get(0));
+ } else {
+ assertEquals("Value for Node " + child.getNodeType(), child.getValue(),
+ c.get(0).getValue());
+ }
+ }
+ }
+
+ static CompositeNode makeRPCInput(String data) {
+ CompositeNodeBuilder<ImmutableCompositeNode> builder = ImmutableCompositeNode.builder()
+ .setQName(TEST_RPC_INPUT).addLeaf(TEST_RPC_INPUT_DATA, data);
+ return ImmutableCompositeNode.create(
+ TEST_RPC, ImmutableList.<Node<?>>of(builder.toInstance()));
+ }
+
+ static CompositeNode makeRPCOutput(String data) {
+ CompositeNodeBuilder<ImmutableCompositeNode> builder = ImmutableCompositeNode.builder()
+ .setQName(TEST_RPC_OUTPUT).addLeaf(TEST_RPC_OUTPUT_DATA, data);
+ return ImmutableCompositeNode.create(
+ TEST_RPC, ImmutableList.<Node<?>>of(builder.toInstance()));
+ }
+
+ static void assertFailedRpcResult(RpcResult<CompositeNode> rpcResult, ErrorSeverity severity,
+ ErrorType errorType, String tag, String message, String applicationTag, String info,
+ String causeMsg) {
+
+ assertNotNull("RpcResult was null", rpcResult);
+ assertEquals("isSuccessful", false, rpcResult.isSuccessful());
+ Collection<RpcError> rpcErrors = rpcResult.getErrors();
+ assertEquals("RpcErrors count", 1, rpcErrors.size());
+ assertRpcErrorEquals(rpcErrors.iterator().next(), severity, errorType, tag, message,
+ applicationTag, info, causeMsg);
+ }
+
+ static void assertSuccessfulRpcResult(RpcResult<CompositeNode> rpcResult,
+ CompositeNode expOutput) {
+
+ assertNotNull("RpcResult was null", rpcResult);
+ assertEquals("isSuccessful", true, rpcResult.isSuccessful());
+ assertCompositeNodeEquals(expOutput, rpcResult.getResult());
+ }
+
+ static class TestException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ static final String MESSAGE = "mock error";
+
+ TestException() {
+ super(MESSAGE);
+ }
+ }
+}
import akka.actor.ActorSystem;
+import com.typesafe.config.ConfigFactory;
import junit.framework.Assert;
import org.junit.After;
import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.utils.AkkaConfigurationReader;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
public void testActorSystemCreation(){
BundleContext context = mock(BundleContext.class);
when(context.getBundle()).thenReturn(mock(Bundle.class));
- ActorSystemFactory.createInstance(context);
+
+ AkkaConfigurationReader reader = mock(AkkaConfigurationReader.class);
+ when(reader.read()).thenReturn(ConfigFactory.load());
+
+ ActorSystemFactory.createInstance(context, reader);
system = ActorSystemFactory.getInstance();
Assert.assertNotNull(system);
// Check illegal state exception
try {
- ActorSystemFactory.createInstance(context);
+ ActorSystemFactory.createInstance(context, reader);
fail("Illegal State exception should be thrown, while creating actor system second time");
} catch (IllegalStateException e) {
}
system.shutdown();
}
}
-
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc;
+
+import static org.junit.Assert.assertEquals;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
+import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
+import org.opendaylight.controller.xml.codec.XmlUtils;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+import akka.testkit.JavaTestKit;
+
+import com.google.common.util.concurrent.ListenableFuture;
+
+/***
+ * Unit tests for RemoteRpcImplementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class RemoteRpcImplementationTest extends AbstractRpcTest {
+
+ @Test
+ public void testInvokeRpc() throws Exception {
+ final AtomicReference<AssertionError> assertError = new AtomicReference<>();
+ try {
+ RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
+ probeReg1.getRef(), schemaContext);
+
+ final CompositeNode input = makeRPCInput("foo");
+ final CompositeNode output = makeRPCOutput("bar");
+ final AtomicReference<InvokeRpc> invokeRpcMsg = setupInvokeRpcReply(assertError, output);
+
+ ListenableFuture<RpcResult<CompositeNode>> future = rpcImpl.invokeRpc(TEST_RPC, input);
+
+ RpcResult<CompositeNode> rpcResult = future.get(5, TimeUnit.SECONDS);
+
+ assertSuccessfulRpcResult(rpcResult, (CompositeNode)output.getValue().get(0));
+
+ assertEquals("getRpc", TEST_RPC, invokeRpcMsg.get().getRpc());
+ assertEquals("getInput", input, invokeRpcMsg.get().getInput());
+ } finally {
+ if(assertError.get() != null) {
+ throw assertError.get();
+ }
+ }
+ }
+
+ @Test
+ public void testInvokeRpcWithIdentifier() throws Exception {
+ final AtomicReference<AssertionError> assertError = new AtomicReference<>();
+ try {
+ RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
+ probeReg1.getRef(), schemaContext);
+
+ QName instanceQName = new QName(new URI("ns"), "instance");
+ YangInstanceIdentifier identifier = YangInstanceIdentifier.of(instanceQName);
+
+ CompositeNode input = makeRPCInput("foo");
+ CompositeNode output = makeRPCOutput("bar");
+ final AtomicReference<InvokeRpc> invokeRpcMsg = setupInvokeRpcReply(assertError, output);
+
+ ListenableFuture<RpcResult<CompositeNode>> future = rpcImpl.invokeRpc(
+ TEST_RPC, identifier, input);
+
+ RpcResult<CompositeNode> rpcResult = future.get(5, TimeUnit.SECONDS);
+
+ assertSuccessfulRpcResult(rpcResult, (CompositeNode)output.getValue().get(0));
+
+ assertEquals("getRpc", TEST_RPC, invokeRpcMsg.get().getRpc());
+ assertEquals("getInput", input, invokeRpcMsg.get().getInput());
+ assertEquals("getRoute", identifier, invokeRpcMsg.get().getIdentifier());
+ } finally {
+ if(assertError.get() != null) {
+ throw assertError.get();
+ }
+ }
+ }
+
+ @Test
+ public void testInvokeRpcWithRpcErrorsException() throws Exception {
+ final AtomicReference<AssertionError> assertError = new AtomicReference<>();
+ try {
+ RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
+ probeReg1.getRef(), schemaContext);
+
+ final CompositeNode input = makeRPCInput("foo");
+
+ setupInvokeRpcErrorReply(assertError, new RpcErrorsException(
+ "mock", Arrays.asList(RpcResultBuilder.newError(ErrorType.RPC, "tag",
+ "error", "appTag", "info", null))));
+
+ ListenableFuture<RpcResult<CompositeNode>> future = rpcImpl.invokeRpc(TEST_RPC, input);
+
+ RpcResult<CompositeNode> rpcResult = future.get(5, TimeUnit.SECONDS);
+
+ assertFailedRpcResult(rpcResult, ErrorSeverity.ERROR, ErrorType.RPC, "tag",
+ "error", "appTag", "info", null);
+ } finally {
+ if(assertError.get() != null) {
+ throw assertError.get();
+ }
+ }
+ }
+
+ @Test
+ public void testInvokeRpcWithOtherException() throws Exception {
+ final AtomicReference<AssertionError> assertError = new AtomicReference<>();
+ try {
+ RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
+ probeReg1.getRef(), schemaContext);
+
+ final CompositeNode input = makeRPCInput("foo");
+
+ setupInvokeRpcErrorReply(assertError, new TestException());
+
+ ListenableFuture<RpcResult<CompositeNode>> future = rpcImpl.invokeRpc(TEST_RPC, input);
+
+ RpcResult<CompositeNode> rpcResult = future.get(5, TimeUnit.SECONDS);
+
+ assertFailedRpcResult(rpcResult, ErrorSeverity.ERROR, ErrorType.RPC, "operation-failed",
+ TestException.MESSAGE, null, null, TestException.MESSAGE);
+ } finally {
+ if(assertError.get() != null) {
+ throw assertError.get();
+ }
+ }
+ }
+
+ private AtomicReference<InvokeRpc> setupInvokeRpcReply(
+ final AtomicReference<AssertionError> assertError, final CompositeNode output) {
+ return setupInvokeRpcReply(assertError, output, null);
+ }
+
+ private AtomicReference<InvokeRpc> setupInvokeRpcErrorReply(
+ final AtomicReference<AssertionError> assertError, final Exception error) {
+ return setupInvokeRpcReply(assertError, null, error);
+ }
+
+ private AtomicReference<InvokeRpc> setupInvokeRpcReply(
+ final AtomicReference<AssertionError> assertError, final CompositeNode output,
+ final Exception error) {
+ final AtomicReference<InvokeRpc> invokeRpcMsg = new AtomicReference<>();
+
+ new Thread() {
+ @Override
+ public void run() {
+ try {
+ invokeRpcMsg.set(probeReg1.expectMsgClass(
+ JavaTestKit.duration("5 seconds"), InvokeRpc.class));
+
+ if(output != null) {
+ probeReg1.reply(new RpcResponse(XmlUtils.outputCompositeNodeToXml(
+ output, schemaContext)));
+ } else {
+ probeReg1.reply(new akka.actor.Status.Failure(error));
+ }
+
+ } catch(AssertionError e) {
+ assertError.set(e);
+ }
+ }
+
+ }.start();
+
+ return invokeRpcMsg;
+ }
+}
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
import akka.japi.Pair;
import akka.testkit.JavaTestKit;
+
+import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Futures;
-import com.typesafe.config.ConfigFactory;
-import junit.framework.Assert;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import static org.junit.Assert.assertEquals;
import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.remote.rpc.messages.ErrorResponse;
+import org.mockito.ArgumentCaptor;
+import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
-import org.opendaylight.controller.sal.common.util.Rpcs;
-import org.opendaylight.controller.sal.core.api.Broker;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import org.opendaylight.controller.sal.connector.api.RpcRouter.RouteIdentifier;
+import org.opendaylight.controller.xml.codec.XmlUtils;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.ModifyAction;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import java.net.URI;
import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Collection;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
-import java.util.concurrent.Future;
-import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.any;
+
+public class RpcBrokerTest extends AbstractRpcTest {
+
+ @Test
+ public void testInvokeRpcWithNoRemoteActor() throws Exception {
+ new JavaTestKit(node1) {{
+ CompositeNode input = makeRPCInput("foo");
+
+ InvokeRpc invokeMsg = new InvokeRpc(TEST_RPC, null, input);
+ rpcBroker1.tell(invokeMsg, getRef());
+
+ probeReg1.expectMsgClass(duration("5 seconds"), RpcRegistry.Messages.FindRouters.class);
+ probeReg1.reply(new RpcRegistry.Messages.FindRoutersReply(
+ Collections.<Pair<ActorRef, Long>>emptyList()));
+
+ akka.actor.Status.Failure failure = expectMsgClass(duration("5 seconds"),
+ akka.actor.Status.Failure.class);
+
+ assertEquals("failure.cause()", RpcErrorsException.class, failure.cause().getClass());
+ }};
+ }
+
+
+ /**
+ * This test method invokes and executes the remote rpc
+ */
+ //@Test
+ public void testInvokeRpc() throws URISyntaxException {
+ new JavaTestKit(node1) {{
+ QName instanceQName = new QName(new URI("ns"), "instance");
+
+ CompositeNode invokeRpcResult = makeRPCOutput("bar");
+ RpcResult<CompositeNode> rpcResult =
+ RpcResultBuilder.<CompositeNode>success(invokeRpcResult).build();
+ ArgumentCaptor<CompositeNode> inputCaptor = new ArgumentCaptor<>();
+ when(brokerSession.rpc(eq(TEST_RPC), inputCaptor.capture()))
+ .thenReturn(Futures.immediateFuture(rpcResult));
+
+ // invoke rpc
+ CompositeNode input = makeRPCInput("foo");
+ YangInstanceIdentifier instanceID = YangInstanceIdentifier.of(instanceQName);
+ InvokeRpc invokeMsg = new InvokeRpc(TEST_RPC, instanceID, input);
+ rpcBroker1.tell(invokeMsg, getRef());
+
+ FindRouters findRouters = probeReg1.expectMsgClass(RpcRegistry.Messages.FindRouters.class);
+ RouteIdentifier<?, ?, ?> routeIdentifier = findRouters.getRouteIdentifier();
+ assertEquals("getType", TEST_RPC, routeIdentifier.getType());
+ assertEquals("getRoute", instanceID, routeIdentifier.getRoute());
+
+ probeReg1.reply(new RpcRegistry.Messages.FindRoutersReply(
+ Arrays.asList(new Pair<ActorRef, Long>(rpcBroker2, 200L))));
+
+ RpcResponse rpcResponse = expectMsgClass(duration("5 seconds"), RpcResponse.class);
+ assertCompositeNodeEquals((CompositeNode)invokeRpcResult.getValue().get(0),
+ XmlUtils.xmlToCompositeNode(rpcResponse.getResultCompositeNode()));
+ assertCompositeNodeEquals(input, inputCaptor.getValue());
+ }};
+ }
+
+ @Test
+ public void testInvokeRpcWithNoOutput() {
+ new JavaTestKit(node1) {{
+
+ RpcResult<CompositeNode> rpcResult = RpcResultBuilder.<CompositeNode>success().build();
+ when(brokerSession.rpc(eq(TEST_RPC), any(CompositeNode.class)))
+ .thenReturn(Futures.immediateFuture(rpcResult));
+
+ InvokeRpc invokeMsg = new InvokeRpc(TEST_RPC, null, makeRPCInput("foo"));
+ rpcBroker1.tell(invokeMsg, getRef());
+
+ probeReg1.expectMsgClass(RpcRegistry.Messages.FindRouters.class);
+ probeReg1.reply(new RpcRegistry.Messages.FindRoutersReply(
+ Arrays.asList(new Pair<ActorRef, Long>(rpcBroker2, 200L))));
+
+ RpcResponse rpcResponse = expectMsgClass(duration("5 seconds"), RpcResponse.class);
+
+ assertEquals("getResultCompositeNode", "", rpcResponse.getResultCompositeNode());
+ }};
+ }
+
+ @Test
+ public void testInvokeRpcWithExecuteFailure() {
+ new JavaTestKit(node1) {{
+
+ RpcResult<CompositeNode> rpcResult = RpcResultBuilder.<CompositeNode>failed()
+ .withError(ErrorType.RPC, "tag", "error", "appTag", "info",
+ new Exception("mock"))
+ .build();
+ when(brokerSession.rpc(eq(TEST_RPC), any(CompositeNode.class)))
+ .thenReturn(Futures.immediateFuture(rpcResult));
+
+ InvokeRpc invokeMsg = new InvokeRpc(TEST_RPC, null, makeRPCInput("foo"));
+ rpcBroker1.tell(invokeMsg, getRef());
+
+ probeReg1.expectMsgClass(RpcRegistry.Messages.FindRouters.class);
+ probeReg1.reply(new RpcRegistry.Messages.FindRoutersReply(
+ Arrays.asList(new Pair<ActorRef, Long>(rpcBroker2, 200L))));
+
+ akka.actor.Status.Failure failure = expectMsgClass(duration("5 seconds"),
+ akka.actor.Status.Failure.class);
+
+ assertEquals("failure.cause()", RpcErrorsException.class, failure.cause().getClass());
+
+ RpcErrorsException errorsEx = (RpcErrorsException)failure.cause();
+ List<RpcError> rpcErrors = Lists.newArrayList(errorsEx.getRpcErrors());
+ assertEquals("RpcErrors count", 1, rpcErrors.size());
+ assertRpcErrorEquals(rpcErrors.get(0), ErrorSeverity.ERROR, ErrorType.RPC, "tag",
+ "error", "appTag", "info", "mock");
+ }};
+ }
+
+ @Test
+ public void testInvokeRpcWithFindRoutersFailure() {
+ new JavaTestKit(node1) {{
+
+ InvokeRpc invokeMsg = new InvokeRpc(TEST_RPC, null, makeRPCInput("foo"));
+ rpcBroker1.tell(invokeMsg, getRef());
+
+ probeReg1.expectMsgClass(RpcRegistry.Messages.FindRouters.class);
+ probeReg1.reply(new akka.actor.Status.Failure(new TestException()));
+
+ akka.actor.Status.Failure failure = expectMsgClass(duration("5 seconds"),
+ akka.actor.Status.Failure.class);
+
+ assertEquals("failure.cause()", TestException.class, failure.cause().getClass());
+ }};
+ }
+
+ @Test
+ public void testExecuteRpc() {
+ new JavaTestKit(node1) {{
+
+ String xml = "<input xmlns=\"urn:test\"><input-data>foo</input-data></input>";
+
+ CompositeNode invokeRpcResult = makeRPCOutput("bar");
+ RpcResult<CompositeNode> rpcResult =
+ RpcResultBuilder.<CompositeNode>success(invokeRpcResult).build();
+ ArgumentCaptor<CompositeNode> inputCaptor = new ArgumentCaptor<>();
+ when(brokerSession.rpc(eq(TEST_RPC), inputCaptor.capture()))
+ .thenReturn(Futures.immediateFuture(rpcResult));
+
+ ExecuteRpc executeMsg = new ExecuteRpc(xml, TEST_RPC);
+
+ rpcBroker1.tell(executeMsg, getRef());
+
+ RpcResponse rpcResponse = expectMsgClass(duration("5 seconds"), RpcResponse.class);
+
+ assertCompositeNodeEquals((CompositeNode)invokeRpcResult.getValue().get(0),
+ XmlUtils.xmlToCompositeNode(rpcResponse.getResultCompositeNode()));
+ }};
+ }
+
+ @Test
+ public void testExecuteRpcFailureWithRpcErrors() {
+ new JavaTestKit(node1) {{
+
+ String xml = "<input xmlns=\"urn:test\"><input-data>foo</input-data></input>";
+
+ RpcResult<CompositeNode> rpcResult = RpcResultBuilder.<CompositeNode>failed()
+ .withError(ErrorType.RPC, "tag1", "error", "appTag1", "info1",
+ new Exception("mock"))
+ .withWarning(ErrorType.PROTOCOL, "tag2", "warning", "appTag2", "info2", null)
+ .build();
+ when(brokerSession.rpc(eq(TEST_RPC), any(CompositeNode.class)))
+ .thenReturn(Futures.immediateFuture(rpcResult));
+
+ ExecuteRpc executeMsg = new ExecuteRpc(xml, TEST_RPC);
+
+ rpcBroker1.tell(executeMsg, getRef());
+
+ akka.actor.Status.Failure failure = expectMsgClass(duration("5 seconds"),
+ akka.actor.Status.Failure.class);
+
+ assertEquals("failure.cause()", RpcErrorsException.class, failure.cause().getClass());
+
+ RpcErrorsException errorsEx = (RpcErrorsException)failure.cause();
+ List<RpcError> rpcErrors = Lists.newArrayList(errorsEx.getRpcErrors());
+ assertEquals("RpcErrors count", 2, rpcErrors.size());
+ assertRpcErrorEquals(rpcErrors.get(0), ErrorSeverity.ERROR, ErrorType.RPC, "tag1",
+ "error", "appTag1", "info1", "mock");
+ assertRpcErrorEquals(rpcErrors.get(1), ErrorSeverity.WARNING, ErrorType.PROTOCOL, "tag2",
+ "warning", "appTag2", "info2", null);
+ }};
+ }
+
+ @Test
+ public void testExecuteRpcFailureWithNoRpcErrors() {
+ new JavaTestKit(node1) {{
+
+ String xml = "<input xmlns=\"urn:test\"><input-data>foo</input-data></input>";
+
+ RpcResult<CompositeNode> rpcResult = RpcResultBuilder.<CompositeNode>failed().build();
+ when(brokerSession.rpc(eq(TEST_RPC), any(CompositeNode.class)))
+ .thenReturn(Futures.immediateFuture(rpcResult));
+
+ ExecuteRpc executeMsg = new ExecuteRpc(xml, TEST_RPC);
+
+ rpcBroker1.tell(executeMsg, getRef());
+
+ akka.actor.Status.Failure failure = expectMsgClass(duration("5 seconds"),
+ akka.actor.Status.Failure.class);
+
+ assertEquals("failure.cause()", RpcErrorsException.class, failure.cause().getClass());
+
+ RpcErrorsException errorsEx = (RpcErrorsException)failure.cause();
+ List<RpcError> rpcErrors = Lists.newArrayList(errorsEx.getRpcErrors());
+ assertEquals("RpcErrors count", 1, rpcErrors.size());
+ assertRpcErrorEquals(rpcErrors.get(0), ErrorSeverity.ERROR, ErrorType.RPC,
+ "operation-failed", "failed", null, null, null);
+ }};
+ }
+
+ @Test
+ public void testExecuteRpcFailureWithException() {
+ new JavaTestKit(node1) {{
+
+ String xml = "<input xmlns=\"urn:test\"><input-data>foo</input-data></input>";
+
+ when(brokerSession.rpc(eq(TEST_RPC), any(CompositeNode.class)))
+ .thenReturn(Futures.<RpcResult<CompositeNode>>immediateFailedFuture(
+ new TestException()));
+
+ ExecuteRpc executeMsg = new ExecuteRpc(xml, TEST_RPC);
+
+ rpcBroker1.tell(executeMsg, getRef());
+
+ akka.actor.Status.Failure failure = expectMsgClass(duration("5 seconds"),
+ akka.actor.Status.Failure.class);
-public class RpcBrokerTest {
-
- static ActorSystem node1;
- static ActorSystem node2;
- private ActorRef rpcBroker1;
- private JavaTestKit probeReg1;
- private ActorRef rpcBroker2;
- private JavaTestKit probeReg2;
- private Broker.ProviderSession brokerSession;
-
-
- @BeforeClass
- public static void setup() throws InterruptedException {
- node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
- node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
- }
-
- @AfterClass
- public static void teardown() {
- JavaTestKit.shutdownActorSystem(node1);
- JavaTestKit.shutdownActorSystem(node2);
- node1 = null;
- node2 = null;
- }
-
- @Before
- public void createActor() {
- brokerSession = Mockito.mock(Broker.ProviderSession.class);
- SchemaContext schemaContext = mock(SchemaContext.class);
- probeReg1 = new JavaTestKit(node1);
- rpcBroker1 = node1.actorOf(RpcBroker.props(brokerSession, probeReg1.getRef(), schemaContext));
- probeReg2 = new JavaTestKit(node2);
- rpcBroker2 = node2.actorOf(RpcBroker.props(brokerSession, probeReg2.getRef(), schemaContext));
-
- }
- @Test
- public void testInvokeRpcError() throws Exception {
- new JavaTestKit(node1) {{
- QName rpc = new QName(new URI("noactor1"), "noactor1");
- CompositeNode input = new ImmutableCompositeNode(QName.create("ns", "2013-12-09", "no child"), new ArrayList<Node<?>>(), ModifyAction.REPLACE);
-
-
- InvokeRpc invokeMsg = new InvokeRpc(rpc, null, input);
- rpcBroker1.tell(invokeMsg, getRef());
- probeReg1.expectMsgClass(RpcRegistry.Messages.FindRouters.class);
- probeReg1.reply(new RpcRegistry.Messages.FindRoutersReply(new ArrayList<Pair<ActorRef, Long>>()));
-
- Boolean getMsg = new ExpectMsg<Boolean>("ErrorResponse") {
- protected Boolean match(Object in) {
- if (in instanceof ErrorResponse) {
- ErrorResponse reply = (ErrorResponse)in;
- return reply.getException().getMessage().contains("No remote actor found for rpc execution of :");
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- Assert.assertTrue(getMsg);
-
- }};
- }
-
-
- /**
- * This test method invokes and executes the remote rpc
- */
-
- @Test
- public void testInvokeRpc() throws URISyntaxException {
- new JavaTestKit(node1) {{
- QName rpc = new QName(new URI("noactor1"), "noactor1");
- // invoke rpc
- CompositeNode input = new ImmutableCompositeNode(QName.create("ns", "2013-12-09", "child1"), new ArrayList<Node<?>>(), ModifyAction.REPLACE);
- InvokeRpc invokeMsg = new InvokeRpc(rpc, null, input);
- rpcBroker1.tell(invokeMsg, getRef());
-
- probeReg1.expectMsgClass(RpcRegistry.Messages.FindRouters.class);
- List<Pair<ActorRef, Long>> routerList = new ArrayList<Pair<ActorRef, Long>>();
-
- routerList.add(new Pair<ActorRef, Long>(rpcBroker2, 200L));
-
- probeReg1.reply(new RpcRegistry.Messages.FindRoutersReply(routerList));
-
- CompositeNode invokeRpcResult = mock(CompositeNode.class);
- Collection<RpcError> errors = new ArrayList<>();
- RpcResult<CompositeNode> result = Rpcs.getRpcResult(true, invokeRpcResult, errors);
- Future<RpcResult<CompositeNode>> rpcResult = Futures.immediateFuture(result);
- when(brokerSession.rpc(rpc, input)).thenReturn(rpcResult);
-
- //verify response msg
- Boolean getMsg = new ExpectMsg<Boolean>("RpcResponse") {
- protected Boolean match(Object in) {
- if (in instanceof RpcResponse) {
- return true;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- Assert.assertTrue(getMsg);
- }};
- }
+ assertEquals("failure.cause()", TestException.class, failure.cause().getClass());
+ }};
+ }
}
--- /dev/null
+module test-rpc-service {
+ yang-version 1;
+ namespace "urn:test";
+ prefix "rpc";
+
+ revision "2014-08-28" {
+ description
+ "Initial revision";
+ }
+
+ rpc test-rpc {
+ input {
+ leaf input-data {
+ type string;
+ }
+ }
+
+ output {
+ leaf output-data {
+ type string;
+ }
+ }
+ }
+}
\ No newline at end of file
public class RestConnectorModule extends org.opendaylight.controller.config.yang.md.sal.rest.connector.AbstractRestConnectorModule {
+ private static RestConnectorRuntimeRegistration runtimeRegistration;
+
public RestConnectorModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
// Register it with the Broker
getDomBrokerDependency().registerProvider(instance);
+ if(runtimeRegistration != null){
+ runtimeRegistration.close();
+ }
- getRootRuntimeBeanRegistratorWrapper().register(instance);
+ runtimeRegistration =
+ getRootRuntimeBeanRegistratorWrapper().register(instance);
return instance;
}
*/
package org.opendaylight.controller.sal.restconf.impl;
-import java.math.BigInteger;
-import java.util.Collection;
-import java.util.Collections;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Config;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Get;
import org.opendaylight.controller.config.yang.md.sal.rest.connector.Operational;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-public class RestconfProviderImpl implements Provider, AutoCloseable, RestConnector, RestConnectorRuntimeMXBean {
+import java.math.BigInteger;
+import java.util.Collection;
+import java.util.Collections;
- public final static String NOT_INITALIZED_MSG = "Restconf is not initialized yet. Please try again later";
+public class RestconfProviderImpl implements Provider, AutoCloseable, RestConnector, RestConnectorRuntimeMXBean {
private final StatisticsRestconfServiceWrapper stats = StatisticsRestconfServiceWrapper.getInstance();
private ListenerRegistration<SchemaContextListener> listenerRegistration;
private PortNumber port;
+ private Thread webSocketServerThread;
+
public void setWebsocketPort(PortNumber port) {
this.port = port;
}
- private Thread webSocketServerThread;
-
@Override
public void onSessionInitiated(ProviderSession session) {
final DOMDataBroker domDataBroker = session.getService(DOMDataBroker.class);
@Override
public void close() {
+
if (listenerRegistration != null) {
listenerRegistration.close();
}
+
+ WebSocketServer.destroyInstance();
webSocketServerThread.interrupt();
}
public class WebSocketServer implements Runnable {
private static final Logger logger = LoggerFactory.getLogger(WebSocketServer.class);
- public static final String WEBSOCKET_SERVER_CONFIG_PROPERTY = "restconf.websocket.port";
public static final int DEFAULT_PORT = 8181;
private EventLoopGroup bossGroup;
private EventLoopGroup workerGroup;
- private static WebSocketServer singleton = null;
+ private static WebSocketServer instance = null;
private int port = DEFAULT_PORT;
private WebSocketServer(int port) {
* @return instance of {@link WebSocketServer}
*/
public static WebSocketServer createInstance(int port) {
- if (singleton != null) {
- throw new IllegalStateException("createInstance() has already been called");
- }
- if (port < 1024) {
- throw new IllegalArgumentException("Privileged port (below 1024) is not allowed");
- }
- singleton = new WebSocketServer(port);
- return singleton;
+ Preconditions.checkState(instance == null, "createInstance() has already been called");
+ Preconditions.checkArgument(port > 1024, "Privileged port (below 1024) is not allowed");
+
+ instance = new WebSocketServer(port);
+ return instance;
}
/**
* @return instance of {@link WebSocketServer}
*/
public static WebSocketServer getInstance() {
- Preconditions.checkNotNull(singleton, "createInstance() must be called prior to getInstance()");
- return singleton;
+ Preconditions.checkNotNull(instance, "createInstance() must be called prior to getInstance()");
+ return instance;
}
/**
* Destroy this already created instance
*/
public static void destroyInstance() {
- if (singleton == null) {
- throw new IllegalStateException("createInstance() must be called prior to destroyInstance()");
- }
- getInstance().stop();
+ Preconditions.checkState(instance != null, "createInstance() must be called prior to destroyInstance()");
+
+ instance.stop();
+ instance = null;
}
@Override
Notificator.removeAllListeners();
if (bossGroup != null) {
bossGroup.shutdownGracefully();
+ bossGroup = null;
}
if (workerGroup != null) {
workerGroup.shutdownGracefully();
+ workerGroup = null;
}
}